python_code
stringlengths
0
1.8M
repo_name
stringclasses
7 values
file_path
stringlengths
5
99
/* * This file implement the Wireless Extensions spy API. * * Authors : Jean Tourrilhes - HPL - <[email protected]> * Copyright (c) 1997-2007 Jean Tourrilhes, All Rights Reserved. * * (As all part of the Linux kernel, this file is GPL) */ #include <linux/wireless.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/export.h> #include <net/iw_handler.h> #include <net/arp.h> #include <net/wext.h> static inline struct iw_spy_data *get_spydata(struct net_device *dev) { /* This is the new way */ if (dev->wireless_data) return dev->wireless_data->spy_data; return NULL; } int iw_handler_set_spy(struct net_device * dev, struct iw_request_info * info, union iwreq_data * wrqu, char * extra) { struct iw_spy_data * spydata = get_spydata(dev); struct sockaddr * address = (struct sockaddr *) extra; /* Make sure driver is not buggy or using the old API */ if (!spydata) return -EOPNOTSUPP; /* Disable spy collection while we copy the addresses. * While we copy addresses, any call to wireless_spy_update() * will NOP. This is OK, as anyway the addresses are changing. */ spydata->spy_number = 0; /* We want to operate without locking, because wireless_spy_update() * most likely will happen in the interrupt handler, and therefore * have its own locking constraints and needs performance. * The rtnl_lock() make sure we don't race with the other iw_handlers. * This make sure wireless_spy_update() "see" that the spy list * is temporarily disabled. */ smp_wmb(); /* Are there are addresses to copy? */ if (wrqu->data.length > 0) { int i; /* Copy addresses */ for (i = 0; i < wrqu->data.length; i++) memcpy(spydata->spy_address[i], address[i].sa_data, ETH_ALEN); /* Reset stats */ memset(spydata->spy_stat, 0, sizeof(struct iw_quality) * IW_MAX_SPY); } /* Make sure above is updated before re-enabling */ smp_wmb(); /* Enable addresses */ spydata->spy_number = wrqu->data.length; return 0; } EXPORT_SYMBOL(iw_handler_set_spy); int iw_handler_get_spy(struct net_device * dev, struct iw_request_info * info, union iwreq_data * wrqu, char * extra) { struct iw_spy_data * spydata = get_spydata(dev); struct sockaddr * address = (struct sockaddr *) extra; int i; /* Make sure driver is not buggy or using the old API */ if (!spydata) return -EOPNOTSUPP; wrqu->data.length = spydata->spy_number; /* Copy addresses. */ for (i = 0; i < spydata->spy_number; i++) { memcpy(address[i].sa_data, spydata->spy_address[i], ETH_ALEN); address[i].sa_family = AF_UNIX; } /* Copy stats to the user buffer (just after). */ if (spydata->spy_number > 0) memcpy(extra + (sizeof(struct sockaddr) *spydata->spy_number), spydata->spy_stat, sizeof(struct iw_quality) * spydata->spy_number); /* Reset updated flags. */ for (i = 0; i < spydata->spy_number; i++) spydata->spy_stat[i].updated &= ~IW_QUAL_ALL_UPDATED; return 0; } EXPORT_SYMBOL(iw_handler_get_spy); /*------------------------------------------------------------------*/ /* * Standard Wireless Handler : set spy threshold */ int iw_handler_set_thrspy(struct net_device * dev, struct iw_request_info *info, union iwreq_data * wrqu, char * extra) { struct iw_spy_data * spydata = get_spydata(dev); struct iw_thrspy * threshold = (struct iw_thrspy *) extra; /* Make sure driver is not buggy or using the old API */ if (!spydata) return -EOPNOTSUPP; /* Just do it */ spydata->spy_thr_low = threshold->low; spydata->spy_thr_high = threshold->high; /* Clear flag */ memset(spydata->spy_thr_under, '\0', sizeof(spydata->spy_thr_under)); return 0; } EXPORT_SYMBOL(iw_handler_set_thrspy); /*------------------------------------------------------------------*/ /* * Standard Wireless Handler : get spy threshold */ int iw_handler_get_thrspy(struct net_device * dev, struct iw_request_info *info, union iwreq_data * wrqu, char * extra) { struct iw_spy_data * spydata = get_spydata(dev); struct iw_thrspy * threshold = (struct iw_thrspy *) extra; /* Make sure driver is not buggy or using the old API */ if (!spydata) return -EOPNOTSUPP; /* Just do it */ threshold->low = spydata->spy_thr_low; threshold->high = spydata->spy_thr_high; return 0; } EXPORT_SYMBOL(iw_handler_get_thrspy); /*------------------------------------------------------------------*/ /* * Prepare and send a Spy Threshold event */ static void iw_send_thrspy_event(struct net_device * dev, struct iw_spy_data * spydata, unsigned char * address, struct iw_quality * wstats) { union iwreq_data wrqu; struct iw_thrspy threshold; /* Init */ wrqu.data.length = 1; wrqu.data.flags = 0; /* Copy address */ memcpy(threshold.addr.sa_data, address, ETH_ALEN); threshold.addr.sa_family = ARPHRD_ETHER; /* Copy stats */ threshold.qual = *wstats; /* Copy also thresholds */ threshold.low = spydata->spy_thr_low; threshold.high = spydata->spy_thr_high; /* Send event to user space */ wireless_send_event(dev, SIOCGIWTHRSPY, &wrqu, (char *) &threshold); } /* ---------------------------------------------------------------- */ /* * Call for the driver to update the spy data. * For now, the spy data is a simple array. As the size of the array is * small, this is good enough. If we wanted to support larger number of * spy addresses, we should use something more efficient... */ void wireless_spy_update(struct net_device * dev, unsigned char * address, struct iw_quality * wstats) { struct iw_spy_data * spydata = get_spydata(dev); int i; int match = -1; /* Make sure driver is not buggy or using the old API */ if (!spydata) return; /* Update all records that match */ for (i = 0; i < spydata->spy_number; i++) if (ether_addr_equal(address, spydata->spy_address[i])) { memcpy(&(spydata->spy_stat[i]), wstats, sizeof(struct iw_quality)); match = i; } /* Generate an event if we cross the spy threshold. * To avoid event storms, we have a simple hysteresis : we generate * event only when we go under the low threshold or above the * high threshold. */ if (match >= 0) { if (spydata->spy_thr_under[match]) { if (wstats->level > spydata->spy_thr_high.level) { spydata->spy_thr_under[match] = 0; iw_send_thrspy_event(dev, spydata, address, wstats); } } else { if (wstats->level < spydata->spy_thr_low.level) { spydata->spy_thr_under[match] = 1; iw_send_thrspy_event(dev, spydata, address, wstats); } } } } EXPORT_SYMBOL(wireless_spy_update);
linux-master
net/wireless/wext-spy.c
// SPDX-License-Identifier: GPL-2.0 /* * cfg80211 wext compat for managed mode. * * Copyright 2009 Johannes Berg <[email protected]> * Copyright (C) 2009, 2020-2023 Intel Corporation */ #include <linux/export.h> #include <linux/etherdevice.h> #include <linux/if_arp.h> #include <linux/slab.h> #include <net/cfg80211.h> #include <net/cfg80211-wext.h> #include "wext-compat.h" #include "nl80211.h" int cfg80211_mgd_wext_connect(struct cfg80211_registered_device *rdev, struct wireless_dev *wdev) { struct cfg80211_cached_keys *ck = NULL; const u8 *prev_bssid = NULL; int err, i; ASSERT_RTNL(); ASSERT_WDEV_LOCK(wdev); if (!netif_running(wdev->netdev)) return 0; wdev->wext.connect.ie = wdev->wext.ie; wdev->wext.connect.ie_len = wdev->wext.ie_len; /* Use default background scan period */ wdev->wext.connect.bg_scan_period = -1; if (wdev->wext.keys) { wdev->wext.keys->def = wdev->wext.default_key; if (wdev->wext.default_key != -1) wdev->wext.connect.privacy = true; } if (!wdev->wext.connect.ssid_len) return 0; if (wdev->wext.keys && wdev->wext.keys->def != -1) { ck = kmemdup(wdev->wext.keys, sizeof(*ck), GFP_KERNEL); if (!ck) return -ENOMEM; for (i = 0; i < 4; i++) ck->params[i].key = ck->data[i]; } if (wdev->wext.prev_bssid_valid) prev_bssid = wdev->wext.prev_bssid; err = cfg80211_connect(rdev, wdev->netdev, &wdev->wext.connect, ck, prev_bssid); if (err) kfree_sensitive(ck); return err; } int cfg80211_mgd_wext_siwfreq(struct net_device *dev, struct iw_request_info *info, struct iw_freq *wextfreq, char *extra) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); struct ieee80211_channel *chan = NULL; int err, freq; /* call only for station! */ if (WARN_ON(wdev->iftype != NL80211_IFTYPE_STATION)) return -EINVAL; freq = cfg80211_wext_freq(wextfreq); if (freq < 0) return freq; if (freq) { chan = ieee80211_get_channel(wdev->wiphy, freq); if (!chan) return -EINVAL; if (chan->flags & IEEE80211_CHAN_DISABLED) return -EINVAL; } wdev_lock(wdev); if (wdev->conn) { bool event = true; if (wdev->wext.connect.channel == chan) { err = 0; goto out; } /* if SSID set, we'll try right again, avoid event */ if (wdev->wext.connect.ssid_len) event = false; err = cfg80211_disconnect(rdev, dev, WLAN_REASON_DEAUTH_LEAVING, event); if (err) goto out; } wdev->wext.connect.channel = chan; err = cfg80211_mgd_wext_connect(rdev, wdev); out: wdev_unlock(wdev); return err; } int cfg80211_mgd_wext_giwfreq(struct net_device *dev, struct iw_request_info *info, struct iw_freq *freq, char *extra) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct ieee80211_channel *chan = NULL; /* call only for station! */ if (WARN_ON(wdev->iftype != NL80211_IFTYPE_STATION)) return -EINVAL; if (wdev->valid_links) return -EOPNOTSUPP; wdev_lock(wdev); if (wdev->links[0].client.current_bss) chan = wdev->links[0].client.current_bss->pub.channel; else if (wdev->wext.connect.channel) chan = wdev->wext.connect.channel; wdev_unlock(wdev); if (chan) { freq->m = chan->center_freq; freq->e = 6; return 0; } /* no channel if not joining */ return -EINVAL; } int cfg80211_mgd_wext_siwessid(struct net_device *dev, struct iw_request_info *info, struct iw_point *data, char *ssid) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); size_t len = data->length; int err; /* call only for station! */ if (WARN_ON(wdev->iftype != NL80211_IFTYPE_STATION)) return -EINVAL; if (!data->flags) len = 0; /* iwconfig uses nul termination in SSID.. */ if (len > 0 && ssid[len - 1] == '\0') len--; wdev_lock(wdev); err = 0; if (wdev->conn) { bool event = true; if (wdev->wext.connect.ssid && len && len == wdev->wext.connect.ssid_len && memcmp(wdev->wext.connect.ssid, ssid, len) == 0) goto out; /* if SSID set now, we'll try to connect, avoid event */ if (len) event = false; err = cfg80211_disconnect(rdev, dev, WLAN_REASON_DEAUTH_LEAVING, event); if (err) goto out; } wdev->wext.prev_bssid_valid = false; wdev->wext.connect.ssid = wdev->wext.ssid; memcpy(wdev->wext.ssid, ssid, len); wdev->wext.connect.ssid_len = len; wdev->wext.connect.crypto.control_port = false; wdev->wext.connect.crypto.control_port_ethertype = cpu_to_be16(ETH_P_PAE); err = cfg80211_mgd_wext_connect(rdev, wdev); out: wdev_unlock(wdev); return err; } int cfg80211_mgd_wext_giwessid(struct net_device *dev, struct iw_request_info *info, struct iw_point *data, char *ssid) { struct wireless_dev *wdev = dev->ieee80211_ptr; int ret = 0; /* call only for station! */ if (WARN_ON(wdev->iftype != NL80211_IFTYPE_STATION)) return -EINVAL; if (wdev->valid_links) return -EINVAL; data->flags = 0; wdev_lock(wdev); if (wdev->links[0].client.current_bss) { const struct element *ssid_elem; rcu_read_lock(); ssid_elem = ieee80211_bss_get_elem( &wdev->links[0].client.current_bss->pub, WLAN_EID_SSID); if (ssid_elem) { data->flags = 1; data->length = ssid_elem->datalen; if (data->length > IW_ESSID_MAX_SIZE) ret = -EINVAL; else memcpy(ssid, ssid_elem->data, data->length); } rcu_read_unlock(); } else if (wdev->wext.connect.ssid && wdev->wext.connect.ssid_len) { data->flags = 1; data->length = wdev->wext.connect.ssid_len; memcpy(ssid, wdev->wext.connect.ssid, data->length); } wdev_unlock(wdev); return ret; } int cfg80211_mgd_wext_siwap(struct net_device *dev, struct iw_request_info *info, struct sockaddr *ap_addr, char *extra) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); u8 *bssid = ap_addr->sa_data; int err; /* call only for station! */ if (WARN_ON(wdev->iftype != NL80211_IFTYPE_STATION)) return -EINVAL; if (ap_addr->sa_family != ARPHRD_ETHER) return -EINVAL; /* automatic mode */ if (is_zero_ether_addr(bssid) || is_broadcast_ether_addr(bssid)) bssid = NULL; wdev_lock(wdev); if (wdev->conn) { err = 0; /* both automatic */ if (!bssid && !wdev->wext.connect.bssid) goto out; /* fixed already - and no change */ if (wdev->wext.connect.bssid && bssid && ether_addr_equal(bssid, wdev->wext.connect.bssid)) goto out; err = cfg80211_disconnect(rdev, dev, WLAN_REASON_DEAUTH_LEAVING, false); if (err) goto out; } if (bssid) { memcpy(wdev->wext.bssid, bssid, ETH_ALEN); wdev->wext.connect.bssid = wdev->wext.bssid; } else wdev->wext.connect.bssid = NULL; err = cfg80211_mgd_wext_connect(rdev, wdev); out: wdev_unlock(wdev); return err; } int cfg80211_mgd_wext_giwap(struct net_device *dev, struct iw_request_info *info, struct sockaddr *ap_addr, char *extra) { struct wireless_dev *wdev = dev->ieee80211_ptr; /* call only for station! */ if (WARN_ON(wdev->iftype != NL80211_IFTYPE_STATION)) return -EINVAL; ap_addr->sa_family = ARPHRD_ETHER; wdev_lock(wdev); if (wdev->valid_links) { wdev_unlock(wdev); return -EOPNOTSUPP; } if (wdev->links[0].client.current_bss) memcpy(ap_addr->sa_data, wdev->links[0].client.current_bss->pub.bssid, ETH_ALEN); else eth_zero_addr(ap_addr->sa_data); wdev_unlock(wdev); return 0; } int cfg80211_wext_siwgenie(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct iw_point *data = &wrqu->data; struct wireless_dev *wdev = dev->ieee80211_ptr; struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); u8 *ie = extra; int ie_len = data->length, err; if (wdev->iftype != NL80211_IFTYPE_STATION) return -EOPNOTSUPP; if (!ie_len) ie = NULL; wiphy_lock(wdev->wiphy); wdev_lock(wdev); /* no change */ err = 0; if (wdev->wext.ie_len == ie_len && memcmp(wdev->wext.ie, ie, ie_len) == 0) goto out; if (ie_len) { ie = kmemdup(extra, ie_len, GFP_KERNEL); if (!ie) { err = -ENOMEM; goto out; } } else ie = NULL; kfree(wdev->wext.ie); wdev->wext.ie = ie; wdev->wext.ie_len = ie_len; if (wdev->conn) { err = cfg80211_disconnect(rdev, dev, WLAN_REASON_DEAUTH_LEAVING, false); if (err) goto out; } /* userspace better not think we'll reconnect */ err = 0; out: wdev_unlock(wdev); wiphy_unlock(wdev->wiphy); return err; } int cfg80211_wext_siwmlme(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct iw_mlme *mlme = (struct iw_mlme *)extra; struct cfg80211_registered_device *rdev; int err; if (!wdev) return -EOPNOTSUPP; rdev = wiphy_to_rdev(wdev->wiphy); if (wdev->iftype != NL80211_IFTYPE_STATION) return -EINVAL; if (mlme->addr.sa_family != ARPHRD_ETHER) return -EINVAL; wiphy_lock(&rdev->wiphy); wdev_lock(wdev); switch (mlme->cmd) { case IW_MLME_DEAUTH: case IW_MLME_DISASSOC: err = cfg80211_disconnect(rdev, dev, mlme->reason_code, true); break; default: err = -EOPNOTSUPP; break; } wdev_unlock(wdev); wiphy_unlock(&rdev->wiphy); return err; }
linux-master
net/wireless/wext-sme.c
// SPDX-License-Identifier: GPL-2.0 /* * SME code for cfg80211 * both driver SME event handling and the SME implementation * (for nl80211's connect() and wext) * * Copyright 2009 Johannes Berg <[email protected]> * Copyright (C) 2009, 2020, 2022-2023 Intel Corporation. All rights reserved. * Copyright 2017 Intel Deutschland GmbH */ #include <linux/etherdevice.h> #include <linux/if_arp.h> #include <linux/slab.h> #include <linux/workqueue.h> #include <linux/wireless.h> #include <linux/export.h> #include <net/iw_handler.h> #include <net/cfg80211.h> #include <net/rtnetlink.h> #include "nl80211.h" #include "reg.h" #include "rdev-ops.h" /* * Software SME in cfg80211, using auth/assoc/deauth calls to the * driver. This is for implementing nl80211's connect/disconnect * and wireless extensions (if configured.) */ struct cfg80211_conn { struct cfg80211_connect_params params; /* these are sub-states of the _CONNECTING sme_state */ enum { CFG80211_CONN_SCANNING, CFG80211_CONN_SCAN_AGAIN, CFG80211_CONN_AUTHENTICATE_NEXT, CFG80211_CONN_AUTHENTICATING, CFG80211_CONN_AUTH_FAILED_TIMEOUT, CFG80211_CONN_ASSOCIATE_NEXT, CFG80211_CONN_ASSOCIATING, CFG80211_CONN_ASSOC_FAILED, CFG80211_CONN_ASSOC_FAILED_TIMEOUT, CFG80211_CONN_DEAUTH, CFG80211_CONN_ABANDON, CFG80211_CONN_CONNECTED, } state; u8 bssid[ETH_ALEN], prev_bssid[ETH_ALEN]; const u8 *ie; size_t ie_len; bool auto_auth, prev_bssid_valid; }; static void cfg80211_sme_free(struct wireless_dev *wdev) { if (!wdev->conn) return; kfree(wdev->conn->ie); kfree(wdev->conn); wdev->conn = NULL; } static int cfg80211_conn_scan(struct wireless_dev *wdev) { struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); struct cfg80211_scan_request *request; int n_channels, err; ASSERT_WDEV_LOCK(wdev); if (rdev->scan_req || rdev->scan_msg) return -EBUSY; if (wdev->conn->params.channel) n_channels = 1; else n_channels = ieee80211_get_num_supported_channels(wdev->wiphy); request = kzalloc(sizeof(*request) + sizeof(request->ssids[0]) + sizeof(request->channels[0]) * n_channels, GFP_KERNEL); if (!request) return -ENOMEM; if (wdev->conn->params.channel) { enum nl80211_band band = wdev->conn->params.channel->band; struct ieee80211_supported_band *sband = wdev->wiphy->bands[band]; if (!sband) { kfree(request); return -EINVAL; } request->channels[0] = wdev->conn->params.channel; request->rates[band] = (1 << sband->n_bitrates) - 1; } else { int i = 0, j; enum nl80211_band band; struct ieee80211_supported_band *bands; struct ieee80211_channel *channel; for (band = 0; band < NUM_NL80211_BANDS; band++) { bands = wdev->wiphy->bands[band]; if (!bands) continue; for (j = 0; j < bands->n_channels; j++) { channel = &bands->channels[j]; if (channel->flags & IEEE80211_CHAN_DISABLED) continue; request->channels[i++] = channel; } request->rates[band] = (1 << bands->n_bitrates) - 1; } n_channels = i; } request->n_channels = n_channels; request->ssids = (void *)&request->channels[n_channels]; request->n_ssids = 1; memcpy(request->ssids[0].ssid, wdev->conn->params.ssid, wdev->conn->params.ssid_len); request->ssids[0].ssid_len = wdev->conn->params.ssid_len; eth_broadcast_addr(request->bssid); request->wdev = wdev; request->wiphy = &rdev->wiphy; request->scan_start = jiffies; rdev->scan_req = request; err = rdev_scan(rdev, request); if (!err) { wdev->conn->state = CFG80211_CONN_SCANNING; nl80211_send_scan_start(rdev, wdev); dev_hold(wdev->netdev); } else { rdev->scan_req = NULL; kfree(request); } return err; } static int cfg80211_conn_do_work(struct wireless_dev *wdev, enum nl80211_timeout_reason *treason) { struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); struct cfg80211_connect_params *params; struct cfg80211_auth_request auth_req = {}; struct cfg80211_assoc_request req = {}; int err; ASSERT_WDEV_LOCK(wdev); if (!wdev->conn) return 0; params = &wdev->conn->params; switch (wdev->conn->state) { case CFG80211_CONN_SCANNING: /* didn't find it during scan ... */ return -ENOENT; case CFG80211_CONN_SCAN_AGAIN: return cfg80211_conn_scan(wdev); case CFG80211_CONN_AUTHENTICATE_NEXT: if (WARN_ON(!rdev->ops->auth)) return -EOPNOTSUPP; wdev->conn->state = CFG80211_CONN_AUTHENTICATING; auth_req.key = params->key; auth_req.key_len = params->key_len; auth_req.key_idx = params->key_idx; auth_req.auth_type = params->auth_type; auth_req.bss = cfg80211_get_bss(&rdev->wiphy, params->channel, params->bssid, params->ssid, params->ssid_len, IEEE80211_BSS_TYPE_ESS, IEEE80211_PRIVACY_ANY); auth_req.link_id = -1; err = cfg80211_mlme_auth(rdev, wdev->netdev, &auth_req); cfg80211_put_bss(&rdev->wiphy, auth_req.bss); return err; case CFG80211_CONN_AUTH_FAILED_TIMEOUT: *treason = NL80211_TIMEOUT_AUTH; return -ENOTCONN; case CFG80211_CONN_ASSOCIATE_NEXT: if (WARN_ON(!rdev->ops->assoc)) return -EOPNOTSUPP; wdev->conn->state = CFG80211_CONN_ASSOCIATING; if (wdev->conn->prev_bssid_valid) req.prev_bssid = wdev->conn->prev_bssid; req.ie = params->ie; req.ie_len = params->ie_len; req.use_mfp = params->mfp != NL80211_MFP_NO; req.crypto = params->crypto; req.flags = params->flags; req.ht_capa = params->ht_capa; req.ht_capa_mask = params->ht_capa_mask; req.vht_capa = params->vht_capa; req.vht_capa_mask = params->vht_capa_mask; req.link_id = -1; req.bss = cfg80211_get_bss(&rdev->wiphy, params->channel, params->bssid, params->ssid, params->ssid_len, IEEE80211_BSS_TYPE_ESS, IEEE80211_PRIVACY_ANY); if (!req.bss) { err = -ENOENT; } else { err = cfg80211_mlme_assoc(rdev, wdev->netdev, &req); cfg80211_put_bss(&rdev->wiphy, req.bss); } if (err) cfg80211_mlme_deauth(rdev, wdev->netdev, params->bssid, NULL, 0, WLAN_REASON_DEAUTH_LEAVING, false); return err; case CFG80211_CONN_ASSOC_FAILED_TIMEOUT: *treason = NL80211_TIMEOUT_ASSOC; fallthrough; case CFG80211_CONN_ASSOC_FAILED: cfg80211_mlme_deauth(rdev, wdev->netdev, params->bssid, NULL, 0, WLAN_REASON_DEAUTH_LEAVING, false); return -ENOTCONN; case CFG80211_CONN_DEAUTH: cfg80211_mlme_deauth(rdev, wdev->netdev, params->bssid, NULL, 0, WLAN_REASON_DEAUTH_LEAVING, false); fallthrough; case CFG80211_CONN_ABANDON: /* free directly, disconnected event already sent */ cfg80211_sme_free(wdev); return 0; default: return 0; } } void cfg80211_conn_work(struct work_struct *work) { struct cfg80211_registered_device *rdev = container_of(work, struct cfg80211_registered_device, conn_work); struct wireless_dev *wdev; u8 bssid_buf[ETH_ALEN], *bssid = NULL; enum nl80211_timeout_reason treason; wiphy_lock(&rdev->wiphy); list_for_each_entry(wdev, &rdev->wiphy.wdev_list, list) { if (!wdev->netdev) continue; wdev_lock(wdev); if (!netif_running(wdev->netdev)) { wdev_unlock(wdev); continue; } if (!wdev->conn || wdev->conn->state == CFG80211_CONN_CONNECTED) { wdev_unlock(wdev); continue; } if (wdev->conn->params.bssid) { memcpy(bssid_buf, wdev->conn->params.bssid, ETH_ALEN); bssid = bssid_buf; } treason = NL80211_TIMEOUT_UNSPECIFIED; if (cfg80211_conn_do_work(wdev, &treason)) { struct cfg80211_connect_resp_params cr; memset(&cr, 0, sizeof(cr)); cr.status = -1; cr.links[0].bssid = bssid; cr.timeout_reason = treason; __cfg80211_connect_result(wdev->netdev, &cr, false); } wdev_unlock(wdev); } wiphy_unlock(&rdev->wiphy); } static void cfg80211_step_auth_next(struct cfg80211_conn *conn, struct cfg80211_bss *bss) { memcpy(conn->bssid, bss->bssid, ETH_ALEN); conn->params.bssid = conn->bssid; conn->params.channel = bss->channel; conn->state = CFG80211_CONN_AUTHENTICATE_NEXT; } /* Returned bss is reference counted and must be cleaned up appropriately. */ static struct cfg80211_bss *cfg80211_get_conn_bss(struct wireless_dev *wdev) { struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); struct cfg80211_bss *bss; ASSERT_WDEV_LOCK(wdev); bss = cfg80211_get_bss(wdev->wiphy, wdev->conn->params.channel, wdev->conn->params.bssid, wdev->conn->params.ssid, wdev->conn->params.ssid_len, wdev->conn_bss_type, IEEE80211_PRIVACY(wdev->conn->params.privacy)); if (!bss) return NULL; cfg80211_step_auth_next(wdev->conn, bss); schedule_work(&rdev->conn_work); return bss; } static void __cfg80211_sme_scan_done(struct net_device *dev) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); struct cfg80211_bss *bss; ASSERT_WDEV_LOCK(wdev); if (!wdev->conn) return; if (wdev->conn->state != CFG80211_CONN_SCANNING && wdev->conn->state != CFG80211_CONN_SCAN_AGAIN) return; bss = cfg80211_get_conn_bss(wdev); if (bss) cfg80211_put_bss(&rdev->wiphy, bss); else schedule_work(&rdev->conn_work); } void cfg80211_sme_scan_done(struct net_device *dev) { struct wireless_dev *wdev = dev->ieee80211_ptr; wdev_lock(wdev); __cfg80211_sme_scan_done(dev); wdev_unlock(wdev); } void cfg80211_sme_rx_auth(struct wireless_dev *wdev, const u8 *buf, size_t len) { struct wiphy *wiphy = wdev->wiphy; struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy); struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)buf; u16 status_code = le16_to_cpu(mgmt->u.auth.status_code); ASSERT_WDEV_LOCK(wdev); if (!wdev->conn || wdev->conn->state == CFG80211_CONN_CONNECTED) return; if (status_code == WLAN_STATUS_NOT_SUPPORTED_AUTH_ALG && wdev->conn->auto_auth && wdev->conn->params.auth_type != NL80211_AUTHTYPE_NETWORK_EAP) { /* select automatically between only open, shared, leap */ switch (wdev->conn->params.auth_type) { case NL80211_AUTHTYPE_OPEN_SYSTEM: if (wdev->connect_keys) wdev->conn->params.auth_type = NL80211_AUTHTYPE_SHARED_KEY; else wdev->conn->params.auth_type = NL80211_AUTHTYPE_NETWORK_EAP; break; case NL80211_AUTHTYPE_SHARED_KEY: wdev->conn->params.auth_type = NL80211_AUTHTYPE_NETWORK_EAP; break; default: /* huh? */ wdev->conn->params.auth_type = NL80211_AUTHTYPE_OPEN_SYSTEM; break; } wdev->conn->state = CFG80211_CONN_AUTHENTICATE_NEXT; schedule_work(&rdev->conn_work); } else if (status_code != WLAN_STATUS_SUCCESS) { struct cfg80211_connect_resp_params cr; memset(&cr, 0, sizeof(cr)); cr.status = status_code; cr.links[0].bssid = mgmt->bssid; cr.timeout_reason = NL80211_TIMEOUT_UNSPECIFIED; __cfg80211_connect_result(wdev->netdev, &cr, false); } else if (wdev->conn->state == CFG80211_CONN_AUTHENTICATING) { wdev->conn->state = CFG80211_CONN_ASSOCIATE_NEXT; schedule_work(&rdev->conn_work); } } bool cfg80211_sme_rx_assoc_resp(struct wireless_dev *wdev, u16 status) { struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); if (!wdev->conn) return false; if (status == WLAN_STATUS_SUCCESS) { wdev->conn->state = CFG80211_CONN_CONNECTED; return false; } if (wdev->conn->prev_bssid_valid) { /* * Some stupid APs don't accept reassoc, so we * need to fall back to trying regular assoc; * return true so no event is sent to userspace. */ wdev->conn->prev_bssid_valid = false; wdev->conn->state = CFG80211_CONN_ASSOCIATE_NEXT; schedule_work(&rdev->conn_work); return true; } wdev->conn->state = CFG80211_CONN_ASSOC_FAILED; schedule_work(&rdev->conn_work); return false; } void cfg80211_sme_deauth(struct wireless_dev *wdev) { cfg80211_sme_free(wdev); } void cfg80211_sme_auth_timeout(struct wireless_dev *wdev) { struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); if (!wdev->conn) return; wdev->conn->state = CFG80211_CONN_AUTH_FAILED_TIMEOUT; schedule_work(&rdev->conn_work); } void cfg80211_sme_disassoc(struct wireless_dev *wdev) { struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); if (!wdev->conn) return; wdev->conn->state = CFG80211_CONN_DEAUTH; schedule_work(&rdev->conn_work); } void cfg80211_sme_assoc_timeout(struct wireless_dev *wdev) { struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); if (!wdev->conn) return; wdev->conn->state = CFG80211_CONN_ASSOC_FAILED_TIMEOUT; schedule_work(&rdev->conn_work); } void cfg80211_sme_abandon_assoc(struct wireless_dev *wdev) { struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); if (!wdev->conn) return; wdev->conn->state = CFG80211_CONN_ABANDON; schedule_work(&rdev->conn_work); } static void cfg80211_wdev_release_bsses(struct wireless_dev *wdev) { unsigned int link; for_each_valid_link(wdev, link) { if (!wdev->links[link].client.current_bss) continue; cfg80211_unhold_bss(wdev->links[link].client.current_bss); cfg80211_put_bss(wdev->wiphy, &wdev->links[link].client.current_bss->pub); wdev->links[link].client.current_bss = NULL; } } void cfg80211_wdev_release_link_bsses(struct wireless_dev *wdev, u16 link_mask) { unsigned int link; for_each_valid_link(wdev, link) { if (!wdev->links[link].client.current_bss || !(link_mask & BIT(link))) continue; cfg80211_unhold_bss(wdev->links[link].client.current_bss); cfg80211_put_bss(wdev->wiphy, &wdev->links[link].client.current_bss->pub); wdev->links[link].client.current_bss = NULL; } } static int cfg80211_sme_get_conn_ies(struct wireless_dev *wdev, const u8 *ies, size_t ies_len, const u8 **out_ies, size_t *out_ies_len) { struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); u8 *buf; size_t offs; if (!rdev->wiphy.extended_capabilities_len || (ies && cfg80211_find_ie(WLAN_EID_EXT_CAPABILITY, ies, ies_len))) { *out_ies = kmemdup(ies, ies_len, GFP_KERNEL); if (!*out_ies) return -ENOMEM; *out_ies_len = ies_len; return 0; } buf = kmalloc(ies_len + rdev->wiphy.extended_capabilities_len + 2, GFP_KERNEL); if (!buf) return -ENOMEM; if (ies_len) { static const u8 before_extcapa[] = { /* not listing IEs expected to be created by driver */ WLAN_EID_RSN, WLAN_EID_QOS_CAPA, WLAN_EID_RRM_ENABLED_CAPABILITIES, WLAN_EID_MOBILITY_DOMAIN, WLAN_EID_SUPPORTED_REGULATORY_CLASSES, WLAN_EID_BSS_COEX_2040, }; offs = ieee80211_ie_split(ies, ies_len, before_extcapa, ARRAY_SIZE(before_extcapa), 0); memcpy(buf, ies, offs); /* leave a whole for extended capabilities IE */ memcpy(buf + offs + rdev->wiphy.extended_capabilities_len + 2, ies + offs, ies_len - offs); } else { offs = 0; } /* place extended capabilities IE (with only driver capabilities) */ buf[offs] = WLAN_EID_EXT_CAPABILITY; buf[offs + 1] = rdev->wiphy.extended_capabilities_len; memcpy(buf + offs + 2, rdev->wiphy.extended_capabilities, rdev->wiphy.extended_capabilities_len); *out_ies = buf; *out_ies_len = ies_len + rdev->wiphy.extended_capabilities_len + 2; return 0; } static int cfg80211_sme_connect(struct wireless_dev *wdev, struct cfg80211_connect_params *connect, const u8 *prev_bssid) { struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); struct cfg80211_bss *bss; int err; if (!rdev->ops->auth || !rdev->ops->assoc) return -EOPNOTSUPP; cfg80211_wdev_release_bsses(wdev); if (wdev->connected) { cfg80211_sme_free(wdev); wdev->connected = false; } if (wdev->conn) return -EINPROGRESS; wdev->conn = kzalloc(sizeof(*wdev->conn), GFP_KERNEL); if (!wdev->conn) return -ENOMEM; /* * Copy all parameters, and treat explicitly IEs, BSSID, SSID. */ memcpy(&wdev->conn->params, connect, sizeof(*connect)); if (connect->bssid) { wdev->conn->params.bssid = wdev->conn->bssid; memcpy(wdev->conn->bssid, connect->bssid, ETH_ALEN); } if (cfg80211_sme_get_conn_ies(wdev, connect->ie, connect->ie_len, &wdev->conn->ie, &wdev->conn->params.ie_len)) { kfree(wdev->conn); wdev->conn = NULL; return -ENOMEM; } wdev->conn->params.ie = wdev->conn->ie; if (connect->auth_type == NL80211_AUTHTYPE_AUTOMATIC) { wdev->conn->auto_auth = true; /* start with open system ... should mostly work */ wdev->conn->params.auth_type = NL80211_AUTHTYPE_OPEN_SYSTEM; } else { wdev->conn->auto_auth = false; } wdev->conn->params.ssid = wdev->u.client.ssid; wdev->conn->params.ssid_len = wdev->u.client.ssid_len; /* see if we have the bss already */ bss = cfg80211_get_bss(wdev->wiphy, wdev->conn->params.channel, wdev->conn->params.bssid, wdev->conn->params.ssid, wdev->conn->params.ssid_len, wdev->conn_bss_type, IEEE80211_PRIVACY(wdev->conn->params.privacy)); if (prev_bssid) { memcpy(wdev->conn->prev_bssid, prev_bssid, ETH_ALEN); wdev->conn->prev_bssid_valid = true; } /* we're good if we have a matching bss struct */ if (bss) { enum nl80211_timeout_reason treason; cfg80211_step_auth_next(wdev->conn, bss); err = cfg80211_conn_do_work(wdev, &treason); cfg80211_put_bss(wdev->wiphy, bss); } else { /* otherwise we'll need to scan for the AP first */ err = cfg80211_conn_scan(wdev); /* * If we can't scan right now, then we need to scan again * after the current scan finished, since the parameters * changed (unless we find a good AP anyway). */ if (err == -EBUSY) { err = 0; wdev->conn->state = CFG80211_CONN_SCAN_AGAIN; } } if (err) cfg80211_sme_free(wdev); return err; } static int cfg80211_sme_disconnect(struct wireless_dev *wdev, u16 reason) { struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); int err; if (!wdev->conn) return 0; if (!rdev->ops->deauth) return -EOPNOTSUPP; if (wdev->conn->state == CFG80211_CONN_SCANNING || wdev->conn->state == CFG80211_CONN_SCAN_AGAIN) { err = 0; goto out; } /* wdev->conn->params.bssid must be set if > SCANNING */ err = cfg80211_mlme_deauth(rdev, wdev->netdev, wdev->conn->params.bssid, NULL, 0, reason, false); out: cfg80211_sme_free(wdev); return err; } /* * code shared for in-device and software SME */ static bool cfg80211_is_all_idle(void) { struct cfg80211_registered_device *rdev; struct wireless_dev *wdev; bool is_all_idle = true; /* * All devices must be idle as otherwise if you are actively * scanning some new beacon hints could be learned and would * count as new regulatory hints. * Also if there is any other active beaconing interface we * need not issue a disconnect hint and reset any info such * as chan dfs state, etc. */ list_for_each_entry(rdev, &cfg80211_rdev_list, list) { list_for_each_entry(wdev, &rdev->wiphy.wdev_list, list) { wdev_lock(wdev); if (wdev->conn || wdev->connected || cfg80211_beaconing_iface_active(wdev)) is_all_idle = false; wdev_unlock(wdev); } } return is_all_idle; } static void disconnect_work(struct work_struct *work) { rtnl_lock(); if (cfg80211_is_all_idle()) regulatory_hint_disconnect(); rtnl_unlock(); } DECLARE_WORK(cfg80211_disconnect_work, disconnect_work); static void cfg80211_connect_result_release_bsses(struct wireless_dev *wdev, struct cfg80211_connect_resp_params *cr) { unsigned int link; for_each_valid_link(cr, link) { if (!cr->links[link].bss) continue; cfg80211_unhold_bss(bss_from_pub(cr->links[link].bss)); cfg80211_put_bss(wdev->wiphy, cr->links[link].bss); } } /* * API calls for drivers implementing connect/disconnect and * SME event handling */ /* This method must consume bss one way or another */ void __cfg80211_connect_result(struct net_device *dev, struct cfg80211_connect_resp_params *cr, bool wextev) { struct wireless_dev *wdev = dev->ieee80211_ptr; const struct element *country_elem = NULL; const struct element *ssid; const u8 *country_data; u8 country_datalen; #ifdef CONFIG_CFG80211_WEXT union iwreq_data wrqu; #endif unsigned int link; const u8 *connected_addr; bool bss_not_found = false; ASSERT_WDEV_LOCK(wdev); if (WARN_ON(wdev->iftype != NL80211_IFTYPE_STATION && wdev->iftype != NL80211_IFTYPE_P2P_CLIENT)) goto out; if (cr->valid_links) { if (WARN_ON(!cr->ap_mld_addr)) goto out; for_each_valid_link(cr, link) { if (WARN_ON(!cr->links[link].addr)) goto out; } if (WARN_ON(wdev->connect_keys)) goto out; } wdev->unprot_beacon_reported = 0; nl80211_send_connect_result(wiphy_to_rdev(wdev->wiphy), dev, cr, GFP_KERNEL); connected_addr = cr->valid_links ? cr->ap_mld_addr : cr->links[0].bssid; #ifdef CONFIG_CFG80211_WEXT if (wextev && !cr->valid_links) { if (cr->req_ie && cr->status == WLAN_STATUS_SUCCESS) { memset(&wrqu, 0, sizeof(wrqu)); wrqu.data.length = cr->req_ie_len; wireless_send_event(dev, IWEVASSOCREQIE, &wrqu, cr->req_ie); } if (cr->resp_ie && cr->status == WLAN_STATUS_SUCCESS) { memset(&wrqu, 0, sizeof(wrqu)); wrqu.data.length = cr->resp_ie_len; wireless_send_event(dev, IWEVASSOCRESPIE, &wrqu, cr->resp_ie); } memset(&wrqu, 0, sizeof(wrqu)); wrqu.ap_addr.sa_family = ARPHRD_ETHER; if (connected_addr && cr->status == WLAN_STATUS_SUCCESS) { memcpy(wrqu.ap_addr.sa_data, connected_addr, ETH_ALEN); memcpy(wdev->wext.prev_bssid, connected_addr, ETH_ALEN); wdev->wext.prev_bssid_valid = true; } wireless_send_event(dev, SIOCGIWAP, &wrqu, NULL); } #endif if (cr->status == WLAN_STATUS_SUCCESS) { if (!wiphy_to_rdev(wdev->wiphy)->ops->connect) { for_each_valid_link(cr, link) { if (WARN_ON_ONCE(!cr->links[link].bss)) break; } } for_each_valid_link(cr, link) { /* don't do extra lookups for failures */ if (cr->links[link].status != WLAN_STATUS_SUCCESS) continue; if (cr->links[link].bss) continue; cr->links[link].bss = cfg80211_get_bss(wdev->wiphy, NULL, cr->links[link].bssid, wdev->u.client.ssid, wdev->u.client.ssid_len, wdev->conn_bss_type, IEEE80211_PRIVACY_ANY); if (!cr->links[link].bss) { bss_not_found = true; break; } cfg80211_hold_bss(bss_from_pub(cr->links[link].bss)); } } cfg80211_wdev_release_bsses(wdev); if (cr->status != WLAN_STATUS_SUCCESS) { kfree_sensitive(wdev->connect_keys); wdev->connect_keys = NULL; wdev->u.client.ssid_len = 0; wdev->conn_owner_nlportid = 0; cfg80211_connect_result_release_bsses(wdev, cr); cfg80211_sme_free(wdev); return; } if (WARN_ON(bss_not_found)) { cfg80211_connect_result_release_bsses(wdev, cr); return; } memset(wdev->links, 0, sizeof(wdev->links)); for_each_valid_link(cr, link) { if (cr->links[link].status == WLAN_STATUS_SUCCESS) continue; cr->valid_links &= ~BIT(link); /* don't require bss pointer for failed links */ if (!cr->links[link].bss) continue; cfg80211_unhold_bss(bss_from_pub(cr->links[link].bss)); cfg80211_put_bss(wdev->wiphy, cr->links[link].bss); } wdev->valid_links = cr->valid_links; for_each_valid_link(cr, link) wdev->links[link].client.current_bss = bss_from_pub(cr->links[link].bss); wdev->connected = true; ether_addr_copy(wdev->u.client.connected_addr, connected_addr); if (cr->valid_links) { for_each_valid_link(cr, link) memcpy(wdev->links[link].addr, cr->links[link].addr, ETH_ALEN); } cfg80211_upload_connect_keys(wdev); rcu_read_lock(); for_each_valid_link(cr, link) { country_elem = ieee80211_bss_get_elem(cr->links[link].bss, WLAN_EID_COUNTRY); if (country_elem) break; } if (!country_elem) { rcu_read_unlock(); return; } country_datalen = country_elem->datalen; country_data = kmemdup(country_elem->data, country_datalen, GFP_ATOMIC); rcu_read_unlock(); if (!country_data) return; regulatory_hint_country_ie(wdev->wiphy, cr->links[link].bss->channel->band, country_data, country_datalen); kfree(country_data); if (!wdev->u.client.ssid_len) { rcu_read_lock(); for_each_valid_link(cr, link) { ssid = ieee80211_bss_get_elem(cr->links[link].bss, WLAN_EID_SSID); if (!ssid || !ssid->datalen) continue; memcpy(wdev->u.client.ssid, ssid->data, ssid->datalen); wdev->u.client.ssid_len = ssid->datalen; break; } rcu_read_unlock(); } return; out: for_each_valid_link(cr, link) cfg80211_put_bss(wdev->wiphy, cr->links[link].bss); } static void cfg80211_update_link_bss(struct wireless_dev *wdev, struct cfg80211_bss **bss) { struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); struct cfg80211_internal_bss *ibss; if (!*bss) return; ibss = bss_from_pub(*bss); if (list_empty(&ibss->list)) { struct cfg80211_bss *found = NULL, *tmp = *bss; found = cfg80211_get_bss(wdev->wiphy, NULL, (*bss)->bssid, wdev->u.client.ssid, wdev->u.client.ssid_len, wdev->conn_bss_type, IEEE80211_PRIVACY_ANY); if (found) { /* The same BSS is already updated so use it * instead, as it has latest info. */ *bss = found; } else { /* Update with BSS provided by driver, it will * be freshly added and ref cnted, we can free * the old one. * * signal_valid can be false, as we are not * expecting the BSS to be found. * * keep the old timestamp to avoid confusion */ cfg80211_bss_update(rdev, ibss, false, ibss->ts); } cfg80211_put_bss(wdev->wiphy, tmp); } } /* Consumes bss object(s) one way or another */ void cfg80211_connect_done(struct net_device *dev, struct cfg80211_connect_resp_params *params, gfp_t gfp) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); struct cfg80211_event *ev; unsigned long flags; u8 *next; size_t link_info_size = 0; unsigned int link; for_each_valid_link(params, link) { cfg80211_update_link_bss(wdev, &params->links[link].bss); link_info_size += params->links[link].bssid ? ETH_ALEN : 0; link_info_size += params->links[link].addr ? ETH_ALEN : 0; } ev = kzalloc(sizeof(*ev) + (params->ap_mld_addr ? ETH_ALEN : 0) + params->req_ie_len + params->resp_ie_len + params->fils.kek_len + params->fils.pmk_len + (params->fils.pmkid ? WLAN_PMKID_LEN : 0) + link_info_size, gfp); if (!ev) { for_each_valid_link(params, link) cfg80211_put_bss(wdev->wiphy, params->links[link].bss); return; } ev->type = EVENT_CONNECT_RESULT; next = ((u8 *)ev) + sizeof(*ev); if (params->ap_mld_addr) { ev->cr.ap_mld_addr = next; memcpy((void *)ev->cr.ap_mld_addr, params->ap_mld_addr, ETH_ALEN); next += ETH_ALEN; } if (params->req_ie_len) { ev->cr.req_ie = next; ev->cr.req_ie_len = params->req_ie_len; memcpy((void *)ev->cr.req_ie, params->req_ie, params->req_ie_len); next += params->req_ie_len; } if (params->resp_ie_len) { ev->cr.resp_ie = next; ev->cr.resp_ie_len = params->resp_ie_len; memcpy((void *)ev->cr.resp_ie, params->resp_ie, params->resp_ie_len); next += params->resp_ie_len; } if (params->fils.kek_len) { ev->cr.fils.kek = next; ev->cr.fils.kek_len = params->fils.kek_len; memcpy((void *)ev->cr.fils.kek, params->fils.kek, params->fils.kek_len); next += params->fils.kek_len; } if (params->fils.pmk_len) { ev->cr.fils.pmk = next; ev->cr.fils.pmk_len = params->fils.pmk_len; memcpy((void *)ev->cr.fils.pmk, params->fils.pmk, params->fils.pmk_len); next += params->fils.pmk_len; } if (params->fils.pmkid) { ev->cr.fils.pmkid = next; memcpy((void *)ev->cr.fils.pmkid, params->fils.pmkid, WLAN_PMKID_LEN); next += WLAN_PMKID_LEN; } ev->cr.fils.update_erp_next_seq_num = params->fils.update_erp_next_seq_num; if (params->fils.update_erp_next_seq_num) ev->cr.fils.erp_next_seq_num = params->fils.erp_next_seq_num; ev->cr.valid_links = params->valid_links; for_each_valid_link(params, link) { if (params->links[link].bss) cfg80211_hold_bss( bss_from_pub(params->links[link].bss)); ev->cr.links[link].bss = params->links[link].bss; if (params->links[link].addr) { ev->cr.links[link].addr = next; memcpy((void *)ev->cr.links[link].addr, params->links[link].addr, ETH_ALEN); next += ETH_ALEN; } if (params->links[link].bssid) { ev->cr.links[link].bssid = next; memcpy((void *)ev->cr.links[link].bssid, params->links[link].bssid, ETH_ALEN); next += ETH_ALEN; } } ev->cr.status = params->status; ev->cr.timeout_reason = params->timeout_reason; spin_lock_irqsave(&wdev->event_lock, flags); list_add_tail(&ev->list, &wdev->event_list); spin_unlock_irqrestore(&wdev->event_lock, flags); queue_work(cfg80211_wq, &rdev->event_work); } EXPORT_SYMBOL(cfg80211_connect_done); /* Consumes bss object one way or another */ void __cfg80211_roamed(struct wireless_dev *wdev, struct cfg80211_roam_info *info) { #ifdef CONFIG_CFG80211_WEXT union iwreq_data wrqu; #endif unsigned int link; const u8 *connected_addr; ASSERT_WDEV_LOCK(wdev); if (WARN_ON(wdev->iftype != NL80211_IFTYPE_STATION && wdev->iftype != NL80211_IFTYPE_P2P_CLIENT)) goto out; if (WARN_ON(!wdev->connected)) goto out; if (info->valid_links) { if (WARN_ON(!info->ap_mld_addr)) goto out; for_each_valid_link(info, link) { if (WARN_ON(!info->links[link].addr)) goto out; } } cfg80211_wdev_release_bsses(wdev); for_each_valid_link(info, link) { if (WARN_ON(!info->links[link].bss)) goto out; } memset(wdev->links, 0, sizeof(wdev->links)); wdev->valid_links = info->valid_links; for_each_valid_link(info, link) { cfg80211_hold_bss(bss_from_pub(info->links[link].bss)); wdev->links[link].client.current_bss = bss_from_pub(info->links[link].bss); } connected_addr = info->valid_links ? info->ap_mld_addr : info->links[0].bss->bssid; ether_addr_copy(wdev->u.client.connected_addr, connected_addr); if (info->valid_links) { for_each_valid_link(info, link) memcpy(wdev->links[link].addr, info->links[link].addr, ETH_ALEN); } wdev->unprot_beacon_reported = 0; nl80211_send_roamed(wiphy_to_rdev(wdev->wiphy), wdev->netdev, info, GFP_KERNEL); #ifdef CONFIG_CFG80211_WEXT if (!info->valid_links) { if (info->req_ie) { memset(&wrqu, 0, sizeof(wrqu)); wrqu.data.length = info->req_ie_len; wireless_send_event(wdev->netdev, IWEVASSOCREQIE, &wrqu, info->req_ie); } if (info->resp_ie) { memset(&wrqu, 0, sizeof(wrqu)); wrqu.data.length = info->resp_ie_len; wireless_send_event(wdev->netdev, IWEVASSOCRESPIE, &wrqu, info->resp_ie); } memset(&wrqu, 0, sizeof(wrqu)); wrqu.ap_addr.sa_family = ARPHRD_ETHER; memcpy(wrqu.ap_addr.sa_data, connected_addr, ETH_ALEN); memcpy(wdev->wext.prev_bssid, connected_addr, ETH_ALEN); wdev->wext.prev_bssid_valid = true; wireless_send_event(wdev->netdev, SIOCGIWAP, &wrqu, NULL); } #endif return; out: for_each_valid_link(info, link) cfg80211_put_bss(wdev->wiphy, info->links[link].bss); } /* Consumes info->links.bss object(s) one way or another */ void cfg80211_roamed(struct net_device *dev, struct cfg80211_roam_info *info, gfp_t gfp) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); struct cfg80211_event *ev; unsigned long flags; u8 *next; unsigned int link; size_t link_info_size = 0; bool bss_not_found = false; for_each_valid_link(info, link) { link_info_size += info->links[link].addr ? ETH_ALEN : 0; link_info_size += info->links[link].bssid ? ETH_ALEN : 0; if (info->links[link].bss) continue; info->links[link].bss = cfg80211_get_bss(wdev->wiphy, info->links[link].channel, info->links[link].bssid, wdev->u.client.ssid, wdev->u.client.ssid_len, wdev->conn_bss_type, IEEE80211_PRIVACY_ANY); if (!info->links[link].bss) { bss_not_found = true; break; } } if (WARN_ON(bss_not_found)) goto out; ev = kzalloc(sizeof(*ev) + info->req_ie_len + info->resp_ie_len + info->fils.kek_len + info->fils.pmk_len + (info->fils.pmkid ? WLAN_PMKID_LEN : 0) + (info->ap_mld_addr ? ETH_ALEN : 0) + link_info_size, gfp); if (!ev) goto out; ev->type = EVENT_ROAMED; next = ((u8 *)ev) + sizeof(*ev); if (info->req_ie_len) { ev->rm.req_ie = next; ev->rm.req_ie_len = info->req_ie_len; memcpy((void *)ev->rm.req_ie, info->req_ie, info->req_ie_len); next += info->req_ie_len; } if (info->resp_ie_len) { ev->rm.resp_ie = next; ev->rm.resp_ie_len = info->resp_ie_len; memcpy((void *)ev->rm.resp_ie, info->resp_ie, info->resp_ie_len); next += info->resp_ie_len; } if (info->fils.kek_len) { ev->rm.fils.kek = next; ev->rm.fils.kek_len = info->fils.kek_len; memcpy((void *)ev->rm.fils.kek, info->fils.kek, info->fils.kek_len); next += info->fils.kek_len; } if (info->fils.pmk_len) { ev->rm.fils.pmk = next; ev->rm.fils.pmk_len = info->fils.pmk_len; memcpy((void *)ev->rm.fils.pmk, info->fils.pmk, info->fils.pmk_len); next += info->fils.pmk_len; } if (info->fils.pmkid) { ev->rm.fils.pmkid = next; memcpy((void *)ev->rm.fils.pmkid, info->fils.pmkid, WLAN_PMKID_LEN); next += WLAN_PMKID_LEN; } ev->rm.fils.update_erp_next_seq_num = info->fils.update_erp_next_seq_num; if (info->fils.update_erp_next_seq_num) ev->rm.fils.erp_next_seq_num = info->fils.erp_next_seq_num; if (info->ap_mld_addr) { ev->rm.ap_mld_addr = next; memcpy((void *)ev->rm.ap_mld_addr, info->ap_mld_addr, ETH_ALEN); next += ETH_ALEN; } ev->rm.valid_links = info->valid_links; for_each_valid_link(info, link) { ev->rm.links[link].bss = info->links[link].bss; if (info->links[link].addr) { ev->rm.links[link].addr = next; memcpy((void *)ev->rm.links[link].addr, info->links[link].addr, ETH_ALEN); next += ETH_ALEN; } if (info->links[link].bssid) { ev->rm.links[link].bssid = next; memcpy((void *)ev->rm.links[link].bssid, info->links[link].bssid, ETH_ALEN); next += ETH_ALEN; } } spin_lock_irqsave(&wdev->event_lock, flags); list_add_tail(&ev->list, &wdev->event_list); spin_unlock_irqrestore(&wdev->event_lock, flags); queue_work(cfg80211_wq, &rdev->event_work); return; out: for_each_valid_link(info, link) cfg80211_put_bss(wdev->wiphy, info->links[link].bss); } EXPORT_SYMBOL(cfg80211_roamed); void __cfg80211_port_authorized(struct wireless_dev *wdev, const u8 *bssid, const u8 *td_bitmap, u8 td_bitmap_len) { ASSERT_WDEV_LOCK(wdev); if (WARN_ON(wdev->iftype != NL80211_IFTYPE_STATION && wdev->iftype != NL80211_IFTYPE_P2P_CLIENT)) return; if (WARN_ON(!wdev->connected) || WARN_ON(!ether_addr_equal(wdev->u.client.connected_addr, bssid))) return; nl80211_send_port_authorized(wiphy_to_rdev(wdev->wiphy), wdev->netdev, bssid, td_bitmap, td_bitmap_len); } void cfg80211_port_authorized(struct net_device *dev, const u8 *bssid, const u8 *td_bitmap, u8 td_bitmap_len, gfp_t gfp) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); struct cfg80211_event *ev; unsigned long flags; if (WARN_ON(!bssid)) return; ev = kzalloc(sizeof(*ev) + td_bitmap_len, gfp); if (!ev) return; ev->type = EVENT_PORT_AUTHORIZED; memcpy(ev->pa.bssid, bssid, ETH_ALEN); ev->pa.td_bitmap = ((u8 *)ev) + sizeof(*ev); ev->pa.td_bitmap_len = td_bitmap_len; memcpy((void *)ev->pa.td_bitmap, td_bitmap, td_bitmap_len); /* * Use the wdev event list so that if there are pending * connected/roamed events, they will be reported first. */ spin_lock_irqsave(&wdev->event_lock, flags); list_add_tail(&ev->list, &wdev->event_list); spin_unlock_irqrestore(&wdev->event_lock, flags); queue_work(cfg80211_wq, &rdev->event_work); } EXPORT_SYMBOL(cfg80211_port_authorized); void __cfg80211_disconnected(struct net_device *dev, const u8 *ie, size_t ie_len, u16 reason, bool from_ap) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); int i; #ifdef CONFIG_CFG80211_WEXT union iwreq_data wrqu; #endif ASSERT_WDEV_LOCK(wdev); if (WARN_ON(wdev->iftype != NL80211_IFTYPE_STATION && wdev->iftype != NL80211_IFTYPE_P2P_CLIENT)) return; cfg80211_wdev_release_bsses(wdev); wdev->connected = false; wdev->u.client.ssid_len = 0; wdev->conn_owner_nlportid = 0; kfree_sensitive(wdev->connect_keys); wdev->connect_keys = NULL; nl80211_send_disconnected(rdev, dev, reason, ie, ie_len, from_ap); /* stop critical protocol if supported */ if (rdev->ops->crit_proto_stop && rdev->crit_proto_nlportid) { rdev->crit_proto_nlportid = 0; rdev_crit_proto_stop(rdev, wdev); } /* * Delete all the keys ... pairwise keys can't really * exist any more anyway, but default keys might. */ if (rdev->ops->del_key) { int max_key_idx = 5; if (wiphy_ext_feature_isset( wdev->wiphy, NL80211_EXT_FEATURE_BEACON_PROTECTION) || wiphy_ext_feature_isset( wdev->wiphy, NL80211_EXT_FEATURE_BEACON_PROTECTION_CLIENT)) max_key_idx = 7; for (i = 0; i <= max_key_idx; i++) rdev_del_key(rdev, dev, -1, i, false, NULL); } rdev_set_qos_map(rdev, dev, NULL); #ifdef CONFIG_CFG80211_WEXT memset(&wrqu, 0, sizeof(wrqu)); wrqu.ap_addr.sa_family = ARPHRD_ETHER; wireless_send_event(dev, SIOCGIWAP, &wrqu, NULL); wdev->wext.connect.ssid_len = 0; #endif schedule_work(&cfg80211_disconnect_work); } void cfg80211_disconnected(struct net_device *dev, u16 reason, const u8 *ie, size_t ie_len, bool locally_generated, gfp_t gfp) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); struct cfg80211_event *ev; unsigned long flags; ev = kzalloc(sizeof(*ev) + ie_len, gfp); if (!ev) return; ev->type = EVENT_DISCONNECTED; ev->dc.ie = ((u8 *)ev) + sizeof(*ev); ev->dc.ie_len = ie_len; memcpy((void *)ev->dc.ie, ie, ie_len); ev->dc.reason = reason; ev->dc.locally_generated = locally_generated; spin_lock_irqsave(&wdev->event_lock, flags); list_add_tail(&ev->list, &wdev->event_list); spin_unlock_irqrestore(&wdev->event_lock, flags); queue_work(cfg80211_wq, &rdev->event_work); } EXPORT_SYMBOL(cfg80211_disconnected); /* * API calls for nl80211/wext compatibility code */ int cfg80211_connect(struct cfg80211_registered_device *rdev, struct net_device *dev, struct cfg80211_connect_params *connect, struct cfg80211_cached_keys *connkeys, const u8 *prev_bssid) { struct wireless_dev *wdev = dev->ieee80211_ptr; int err; ASSERT_WDEV_LOCK(wdev); /* * If we have an ssid_len, we're trying to connect or are * already connected, so reject a new SSID unless it's the * same (which is the case for re-association.) */ if (wdev->u.client.ssid_len && (wdev->u.client.ssid_len != connect->ssid_len || memcmp(wdev->u.client.ssid, connect->ssid, wdev->u.client.ssid_len))) return -EALREADY; /* * If connected, reject (re-)association unless prev_bssid * matches the current BSSID. */ if (wdev->connected) { if (!prev_bssid) return -EALREADY; if (!ether_addr_equal(prev_bssid, wdev->u.client.connected_addr)) return -ENOTCONN; } /* * Reject if we're in the process of connecting with WEP, * this case isn't very interesting and trying to handle * it would make the code much more complex. */ if (wdev->connect_keys) return -EINPROGRESS; cfg80211_oper_and_ht_capa(&connect->ht_capa_mask, rdev->wiphy.ht_capa_mod_mask); cfg80211_oper_and_vht_capa(&connect->vht_capa_mask, rdev->wiphy.vht_capa_mod_mask); if (connkeys && connkeys->def >= 0) { int idx; u32 cipher; idx = connkeys->def; cipher = connkeys->params[idx].cipher; /* If given a WEP key we may need it for shared key auth */ if (cipher == WLAN_CIPHER_SUITE_WEP40 || cipher == WLAN_CIPHER_SUITE_WEP104) { connect->key_idx = idx; connect->key = connkeys->params[idx].key; connect->key_len = connkeys->params[idx].key_len; /* * If ciphers are not set (e.g. when going through * iwconfig), we have to set them appropriately here. */ if (connect->crypto.cipher_group == 0) connect->crypto.cipher_group = cipher; if (connect->crypto.n_ciphers_pairwise == 0) { connect->crypto.n_ciphers_pairwise = 1; connect->crypto.ciphers_pairwise[0] = cipher; } } } else { if (WARN_ON(connkeys)) return -EINVAL; /* connect can point to wdev->wext.connect which * can hold key data from a previous connection */ connect->key = NULL; connect->key_len = 0; connect->key_idx = 0; } wdev->connect_keys = connkeys; memcpy(wdev->u.client.ssid, connect->ssid, connect->ssid_len); wdev->u.client.ssid_len = connect->ssid_len; wdev->conn_bss_type = connect->pbss ? IEEE80211_BSS_TYPE_PBSS : IEEE80211_BSS_TYPE_ESS; if (!rdev->ops->connect) err = cfg80211_sme_connect(wdev, connect, prev_bssid); else err = rdev_connect(rdev, dev, connect); if (err) { wdev->connect_keys = NULL; /* * This could be reassoc getting refused, don't clear * ssid_len in that case. */ if (!wdev->connected) wdev->u.client.ssid_len = 0; return err; } return 0; } int cfg80211_disconnect(struct cfg80211_registered_device *rdev, struct net_device *dev, u16 reason, bool wextev) { struct wireless_dev *wdev = dev->ieee80211_ptr; int err = 0; ASSERT_WDEV_LOCK(wdev); kfree_sensitive(wdev->connect_keys); wdev->connect_keys = NULL; wdev->conn_owner_nlportid = 0; if (wdev->conn) err = cfg80211_sme_disconnect(wdev, reason); else if (!rdev->ops->disconnect) cfg80211_mlme_down(rdev, dev); else if (wdev->u.client.ssid_len) err = rdev_disconnect(rdev, dev, reason); /* * Clear ssid_len unless we actually were fully connected, * in which case cfg80211_disconnected() will take care of * this later. */ if (!wdev->connected) wdev->u.client.ssid_len = 0; return err; } /* * Used to clean up after the connection / connection attempt owner socket * disconnects */ void cfg80211_autodisconnect_wk(struct work_struct *work) { struct wireless_dev *wdev = container_of(work, struct wireless_dev, disconnect_wk); struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); wiphy_lock(wdev->wiphy); wdev_lock(wdev); if (wdev->conn_owner_nlportid) { switch (wdev->iftype) { case NL80211_IFTYPE_ADHOC: __cfg80211_leave_ibss(rdev, wdev->netdev, false); break; case NL80211_IFTYPE_AP: case NL80211_IFTYPE_P2P_GO: __cfg80211_stop_ap(rdev, wdev->netdev, -1, false); break; case NL80211_IFTYPE_MESH_POINT: __cfg80211_leave_mesh(rdev, wdev->netdev); break; case NL80211_IFTYPE_STATION: case NL80211_IFTYPE_P2P_CLIENT: /* * Use disconnect_bssid if still connecting and * ops->disconnect not implemented. Otherwise we can * use cfg80211_disconnect. */ if (rdev->ops->disconnect || wdev->connected) cfg80211_disconnect(rdev, wdev->netdev, WLAN_REASON_DEAUTH_LEAVING, true); else cfg80211_mlme_deauth(rdev, wdev->netdev, wdev->disconnect_bssid, NULL, 0, WLAN_REASON_DEAUTH_LEAVING, false); break; default: break; } } wdev_unlock(wdev); wiphy_unlock(wdev->wiphy); }
linux-master
net/wireless/sme.c
// SPDX-License-Identifier: GPL-2.0-only /* * lib80211 -- common bits for IEEE802.11 drivers * * Copyright(c) 2008 John W. Linville <[email protected]> * * Portions copied from old ieee80211 component, w/ original copyright * notices below: * * Host AP crypto routines * * Copyright (c) 2002-2003, Jouni Malinen <[email protected]> * Portions Copyright (C) 2004, Intel Corporation <[email protected]> * */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/ctype.h> #include <linux/ieee80211.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/string.h> #include <net/lib80211.h> #define DRV_DESCRIPTION "common routines for IEEE802.11 drivers" MODULE_DESCRIPTION(DRV_DESCRIPTION); MODULE_AUTHOR("John W. Linville <[email protected]>"); MODULE_LICENSE("GPL"); struct lib80211_crypto_alg { struct list_head list; struct lib80211_crypto_ops *ops; }; static LIST_HEAD(lib80211_crypto_algs); static DEFINE_SPINLOCK(lib80211_crypto_lock); static void lib80211_crypt_deinit_entries(struct lib80211_crypt_info *info, int force); static void lib80211_crypt_quiescing(struct lib80211_crypt_info *info); static void lib80211_crypt_deinit_handler(struct timer_list *t); int lib80211_crypt_info_init(struct lib80211_crypt_info *info, char *name, spinlock_t *lock) { memset(info, 0, sizeof(*info)); info->name = name; info->lock = lock; INIT_LIST_HEAD(&info->crypt_deinit_list); timer_setup(&info->crypt_deinit_timer, lib80211_crypt_deinit_handler, 0); return 0; } EXPORT_SYMBOL(lib80211_crypt_info_init); void lib80211_crypt_info_free(struct lib80211_crypt_info *info) { int i; lib80211_crypt_quiescing(info); del_timer_sync(&info->crypt_deinit_timer); lib80211_crypt_deinit_entries(info, 1); for (i = 0; i < NUM_WEP_KEYS; i++) { struct lib80211_crypt_data *crypt = info->crypt[i]; if (crypt) { if (crypt->ops) { crypt->ops->deinit(crypt->priv); module_put(crypt->ops->owner); } kfree(crypt); info->crypt[i] = NULL; } } } EXPORT_SYMBOL(lib80211_crypt_info_free); static void lib80211_crypt_deinit_entries(struct lib80211_crypt_info *info, int force) { struct lib80211_crypt_data *entry, *next; unsigned long flags; spin_lock_irqsave(info->lock, flags); list_for_each_entry_safe(entry, next, &info->crypt_deinit_list, list) { if (atomic_read(&entry->refcnt) != 0 && !force) continue; list_del(&entry->list); if (entry->ops) { entry->ops->deinit(entry->priv); module_put(entry->ops->owner); } kfree(entry); } spin_unlock_irqrestore(info->lock, flags); } /* After this, crypt_deinit_list won't accept new members */ static void lib80211_crypt_quiescing(struct lib80211_crypt_info *info) { unsigned long flags; spin_lock_irqsave(info->lock, flags); info->crypt_quiesced = 1; spin_unlock_irqrestore(info->lock, flags); } static void lib80211_crypt_deinit_handler(struct timer_list *t) { struct lib80211_crypt_info *info = from_timer(info, t, crypt_deinit_timer); unsigned long flags; lib80211_crypt_deinit_entries(info, 0); spin_lock_irqsave(info->lock, flags); if (!list_empty(&info->crypt_deinit_list) && !info->crypt_quiesced) { printk(KERN_DEBUG "%s: entries remaining in delayed crypt " "deletion list\n", info->name); info->crypt_deinit_timer.expires = jiffies + HZ; add_timer(&info->crypt_deinit_timer); } spin_unlock_irqrestore(info->lock, flags); } void lib80211_crypt_delayed_deinit(struct lib80211_crypt_info *info, struct lib80211_crypt_data **crypt) { struct lib80211_crypt_data *tmp; unsigned long flags; if (*crypt == NULL) return; tmp = *crypt; *crypt = NULL; /* must not run ops->deinit() while there may be pending encrypt or * decrypt operations. Use a list of delayed deinits to avoid needing * locking. */ spin_lock_irqsave(info->lock, flags); if (!info->crypt_quiesced) { list_add(&tmp->list, &info->crypt_deinit_list); if (!timer_pending(&info->crypt_deinit_timer)) { info->crypt_deinit_timer.expires = jiffies + HZ; add_timer(&info->crypt_deinit_timer); } } spin_unlock_irqrestore(info->lock, flags); } EXPORT_SYMBOL(lib80211_crypt_delayed_deinit); int lib80211_register_crypto_ops(struct lib80211_crypto_ops *ops) { unsigned long flags; struct lib80211_crypto_alg *alg; alg = kzalloc(sizeof(*alg), GFP_KERNEL); if (alg == NULL) return -ENOMEM; alg->ops = ops; spin_lock_irqsave(&lib80211_crypto_lock, flags); list_add(&alg->list, &lib80211_crypto_algs); spin_unlock_irqrestore(&lib80211_crypto_lock, flags); printk(KERN_DEBUG "lib80211_crypt: registered algorithm '%s'\n", ops->name); return 0; } EXPORT_SYMBOL(lib80211_register_crypto_ops); int lib80211_unregister_crypto_ops(struct lib80211_crypto_ops *ops) { struct lib80211_crypto_alg *alg; unsigned long flags; spin_lock_irqsave(&lib80211_crypto_lock, flags); list_for_each_entry(alg, &lib80211_crypto_algs, list) { if (alg->ops == ops) goto found; } spin_unlock_irqrestore(&lib80211_crypto_lock, flags); return -EINVAL; found: printk(KERN_DEBUG "lib80211_crypt: unregistered algorithm '%s'\n", ops->name); list_del(&alg->list); spin_unlock_irqrestore(&lib80211_crypto_lock, flags); kfree(alg); return 0; } EXPORT_SYMBOL(lib80211_unregister_crypto_ops); struct lib80211_crypto_ops *lib80211_get_crypto_ops(const char *name) { struct lib80211_crypto_alg *alg; unsigned long flags; spin_lock_irqsave(&lib80211_crypto_lock, flags); list_for_each_entry(alg, &lib80211_crypto_algs, list) { if (strcmp(alg->ops->name, name) == 0) goto found; } spin_unlock_irqrestore(&lib80211_crypto_lock, flags); return NULL; found: spin_unlock_irqrestore(&lib80211_crypto_lock, flags); return alg->ops; } EXPORT_SYMBOL(lib80211_get_crypto_ops); static void *lib80211_crypt_null_init(int keyidx) { return (void *)1; } static void lib80211_crypt_null_deinit(void *priv) { } static struct lib80211_crypto_ops lib80211_crypt_null = { .name = "NULL", .init = lib80211_crypt_null_init, .deinit = lib80211_crypt_null_deinit, .owner = THIS_MODULE, }; static int __init lib80211_init(void) { pr_info(DRV_DESCRIPTION "\n"); return lib80211_register_crypto_ops(&lib80211_crypt_null); } static void __exit lib80211_exit(void) { lib80211_unregister_crypto_ops(&lib80211_crypt_null); BUG_ON(!list_empty(&lib80211_crypto_algs)); } module_init(lib80211_init); module_exit(lib80211_exit);
linux-master
net/wireless/lib80211.c
/* * Copyright (C) 2017 Rafał Miłecki <[email protected]> * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include <linux/of.h> #include <net/cfg80211.h> #include "core.h" static bool wiphy_freq_limits_valid_chan(struct wiphy *wiphy, struct ieee80211_freq_range *freq_limits, unsigned int n_freq_limits, struct ieee80211_channel *chan) { u32 bw = MHZ_TO_KHZ(20); int i; for (i = 0; i < n_freq_limits; i++) { struct ieee80211_freq_range *limit = &freq_limits[i]; if (cfg80211_does_bw_fit_range(limit, MHZ_TO_KHZ(chan->center_freq), bw)) return true; } return false; } static void wiphy_freq_limits_apply(struct wiphy *wiphy, struct ieee80211_freq_range *freq_limits, unsigned int n_freq_limits) { enum nl80211_band band; int i; if (WARN_ON(!n_freq_limits)) return; for (band = 0; band < NUM_NL80211_BANDS; band++) { struct ieee80211_supported_band *sband = wiphy->bands[band]; if (!sband) continue; for (i = 0; i < sband->n_channels; i++) { struct ieee80211_channel *chan = &sband->channels[i]; if (chan->flags & IEEE80211_CHAN_DISABLED) continue; if (!wiphy_freq_limits_valid_chan(wiphy, freq_limits, n_freq_limits, chan)) { pr_debug("Disabling freq %d MHz as it's out of OF limits\n", chan->center_freq); chan->flags |= IEEE80211_CHAN_DISABLED; } } } } void wiphy_read_of_freq_limits(struct wiphy *wiphy) { struct device *dev = wiphy_dev(wiphy); struct device_node *np; struct property *prop; struct ieee80211_freq_range *freq_limits; unsigned int n_freq_limits; const __be32 *p; int len, i; int err = 0; if (!dev) return; np = dev_of_node(dev); if (!np) return; prop = of_find_property(np, "ieee80211-freq-limit", &len); if (!prop) return; if (!len || len % sizeof(u32) || len / sizeof(u32) % 2) { dev_err(dev, "ieee80211-freq-limit wrong format"); return; } n_freq_limits = len / sizeof(u32) / 2; freq_limits = kcalloc(n_freq_limits, sizeof(*freq_limits), GFP_KERNEL); if (!freq_limits) { err = -ENOMEM; goto out_kfree; } p = NULL; for (i = 0; i < n_freq_limits; i++) { struct ieee80211_freq_range *limit = &freq_limits[i]; p = of_prop_next_u32(prop, p, &limit->start_freq_khz); if (!p) { err = -EINVAL; goto out_kfree; } p = of_prop_next_u32(prop, p, &limit->end_freq_khz); if (!p) { err = -EINVAL; goto out_kfree; } if (!limit->start_freq_khz || !limit->end_freq_khz || limit->start_freq_khz >= limit->end_freq_khz) { err = -EINVAL; goto out_kfree; } } wiphy_freq_limits_apply(wiphy, freq_limits, n_freq_limits); out_kfree: kfree(freq_limits); if (err) dev_err(dev, "Failed to get limits: %d\n", err); } EXPORT_SYMBOL(wiphy_read_of_freq_limits);
linux-master
net/wireless/of.c
// SPDX-License-Identifier: GPL-2.0 /* * cfg80211 MLME SAP interface * * Copyright (c) 2009, Jouni Malinen <[email protected]> * Copyright (c) 2015 Intel Deutschland GmbH * Copyright (C) 2019-2020, 2022 Intel Corporation */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/etherdevice.h> #include <linux/netdevice.h> #include <linux/nl80211.h> #include <linux/slab.h> #include <linux/wireless.h> #include <net/cfg80211.h> #include <net/iw_handler.h> #include "core.h" #include "nl80211.h" #include "rdev-ops.h" void cfg80211_rx_assoc_resp(struct net_device *dev, struct cfg80211_rx_assoc_resp *data) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct wiphy *wiphy = wdev->wiphy; struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy); struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)data->buf; struct cfg80211_connect_resp_params cr = { .timeout_reason = NL80211_TIMEOUT_UNSPECIFIED, .req_ie = data->req_ies, .req_ie_len = data->req_ies_len, .resp_ie = mgmt->u.assoc_resp.variable, .resp_ie_len = data->len - offsetof(struct ieee80211_mgmt, u.assoc_resp.variable), .status = le16_to_cpu(mgmt->u.assoc_resp.status_code), .ap_mld_addr = data->ap_mld_addr, }; unsigned int link_id; for (link_id = 0; link_id < ARRAY_SIZE(data->links); link_id++) { cr.links[link_id].status = data->links[link_id].status; WARN_ON_ONCE(cr.links[link_id].status != WLAN_STATUS_SUCCESS && (!cr.ap_mld_addr || !cr.links[link_id].bss)); cr.links[link_id].bss = data->links[link_id].bss; if (!cr.links[link_id].bss) continue; cr.links[link_id].bssid = data->links[link_id].bss->bssid; cr.links[link_id].addr = data->links[link_id].addr; /* need to have local link addresses for MLO connections */ WARN_ON(cr.ap_mld_addr && !cr.links[link_id].addr); BUG_ON(!cr.links[link_id].bss->channel); if (cr.links[link_id].bss->channel->band == NL80211_BAND_S1GHZ) { WARN_ON(link_id); cr.resp_ie = (u8 *)&mgmt->u.s1g_assoc_resp.variable; cr.resp_ie_len = data->len - offsetof(struct ieee80211_mgmt, u.s1g_assoc_resp.variable); } if (cr.ap_mld_addr) cr.valid_links |= BIT(link_id); } trace_cfg80211_send_rx_assoc(dev, data); /* * This is a bit of a hack, we don't notify userspace of * a (re-)association reply if we tried to send a reassoc * and got a reject -- we only try again with an assoc * frame instead of reassoc. */ if (cfg80211_sme_rx_assoc_resp(wdev, cr.status)) { for (link_id = 0; link_id < ARRAY_SIZE(data->links); link_id++) { struct cfg80211_bss *bss = data->links[link_id].bss; if (!bss) continue; cfg80211_unhold_bss(bss_from_pub(bss)); cfg80211_put_bss(wiphy, bss); } return; } nl80211_send_rx_assoc(rdev, dev, data); /* update current_bss etc., consumes the bss reference */ __cfg80211_connect_result(dev, &cr, cr.status == WLAN_STATUS_SUCCESS); } EXPORT_SYMBOL(cfg80211_rx_assoc_resp); static void cfg80211_process_auth(struct wireless_dev *wdev, const u8 *buf, size_t len) { struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); nl80211_send_rx_auth(rdev, wdev->netdev, buf, len, GFP_KERNEL); cfg80211_sme_rx_auth(wdev, buf, len); } static void cfg80211_process_deauth(struct wireless_dev *wdev, const u8 *buf, size_t len, bool reconnect) { struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)buf; const u8 *bssid = mgmt->bssid; u16 reason_code = le16_to_cpu(mgmt->u.deauth.reason_code); bool from_ap = !ether_addr_equal(mgmt->sa, wdev->netdev->dev_addr); nl80211_send_deauth(rdev, wdev->netdev, buf, len, reconnect, GFP_KERNEL); if (!wdev->connected || !ether_addr_equal(wdev->u.client.connected_addr, bssid)) return; __cfg80211_disconnected(wdev->netdev, NULL, 0, reason_code, from_ap); cfg80211_sme_deauth(wdev); } static void cfg80211_process_disassoc(struct wireless_dev *wdev, const u8 *buf, size_t len, bool reconnect) { struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)buf; const u8 *bssid = mgmt->bssid; u16 reason_code = le16_to_cpu(mgmt->u.disassoc.reason_code); bool from_ap = !ether_addr_equal(mgmt->sa, wdev->netdev->dev_addr); nl80211_send_disassoc(rdev, wdev->netdev, buf, len, reconnect, GFP_KERNEL); if (WARN_ON(!wdev->connected || !ether_addr_equal(wdev->u.client.connected_addr, bssid))) return; __cfg80211_disconnected(wdev->netdev, NULL, 0, reason_code, from_ap); cfg80211_sme_disassoc(wdev); } void cfg80211_rx_mlme_mgmt(struct net_device *dev, const u8 *buf, size_t len) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct ieee80211_mgmt *mgmt = (void *)buf; ASSERT_WDEV_LOCK(wdev); trace_cfg80211_rx_mlme_mgmt(dev, buf, len); if (WARN_ON(len < 2)) return; if (ieee80211_is_auth(mgmt->frame_control)) cfg80211_process_auth(wdev, buf, len); else if (ieee80211_is_deauth(mgmt->frame_control)) cfg80211_process_deauth(wdev, buf, len, false); else if (ieee80211_is_disassoc(mgmt->frame_control)) cfg80211_process_disassoc(wdev, buf, len, false); } EXPORT_SYMBOL(cfg80211_rx_mlme_mgmt); void cfg80211_auth_timeout(struct net_device *dev, const u8 *addr) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct wiphy *wiphy = wdev->wiphy; struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy); trace_cfg80211_send_auth_timeout(dev, addr); nl80211_send_auth_timeout(rdev, dev, addr, GFP_KERNEL); cfg80211_sme_auth_timeout(wdev); } EXPORT_SYMBOL(cfg80211_auth_timeout); void cfg80211_assoc_failure(struct net_device *dev, struct cfg80211_assoc_failure *data) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct wiphy *wiphy = wdev->wiphy; struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy); const u8 *addr = data->ap_mld_addr ?: data->bss[0]->bssid; int i; trace_cfg80211_send_assoc_failure(dev, data); if (data->timeout) { nl80211_send_assoc_timeout(rdev, dev, addr, GFP_KERNEL); cfg80211_sme_assoc_timeout(wdev); } else { cfg80211_sme_abandon_assoc(wdev); } for (i = 0; i < ARRAY_SIZE(data->bss); i++) { struct cfg80211_bss *bss = data->bss[i]; if (!bss) continue; cfg80211_unhold_bss(bss_from_pub(bss)); cfg80211_put_bss(wiphy, bss); } } EXPORT_SYMBOL(cfg80211_assoc_failure); void cfg80211_tx_mlme_mgmt(struct net_device *dev, const u8 *buf, size_t len, bool reconnect) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct ieee80211_mgmt *mgmt = (void *)buf; ASSERT_WDEV_LOCK(wdev); trace_cfg80211_tx_mlme_mgmt(dev, buf, len, reconnect); if (WARN_ON(len < 2)) return; if (ieee80211_is_deauth(mgmt->frame_control)) cfg80211_process_deauth(wdev, buf, len, reconnect); else cfg80211_process_disassoc(wdev, buf, len, reconnect); } EXPORT_SYMBOL(cfg80211_tx_mlme_mgmt); void cfg80211_michael_mic_failure(struct net_device *dev, const u8 *addr, enum nl80211_key_type key_type, int key_id, const u8 *tsc, gfp_t gfp) { struct wiphy *wiphy = dev->ieee80211_ptr->wiphy; struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy); #ifdef CONFIG_CFG80211_WEXT union iwreq_data wrqu; char *buf = kmalloc(128, gfp); if (buf) { sprintf(buf, "MLME-MICHAELMICFAILURE.indication(" "keyid=%d %scast addr=%pM)", key_id, key_type == NL80211_KEYTYPE_GROUP ? "broad" : "uni", addr); memset(&wrqu, 0, sizeof(wrqu)); wrqu.data.length = strlen(buf); wireless_send_event(dev, IWEVCUSTOM, &wrqu, buf); kfree(buf); } #endif trace_cfg80211_michael_mic_failure(dev, addr, key_type, key_id, tsc); nl80211_michael_mic_failure(rdev, dev, addr, key_type, key_id, tsc, gfp); } EXPORT_SYMBOL(cfg80211_michael_mic_failure); /* some MLME handling for userspace SME */ int cfg80211_mlme_auth(struct cfg80211_registered_device *rdev, struct net_device *dev, struct cfg80211_auth_request *req) { struct wireless_dev *wdev = dev->ieee80211_ptr; ASSERT_WDEV_LOCK(wdev); if (!req->bss) return -ENOENT; if (req->link_id >= 0 && !(wdev->wiphy->flags & WIPHY_FLAG_SUPPORTS_MLO)) return -EINVAL; if (req->auth_type == NL80211_AUTHTYPE_SHARED_KEY) { if (!req->key || !req->key_len || req->key_idx < 0 || req->key_idx > 3) return -EINVAL; } if (wdev->connected && ether_addr_equal(req->bss->bssid, wdev->u.client.connected_addr)) return -EALREADY; if (ether_addr_equal(req->bss->bssid, dev->dev_addr) || (req->link_id >= 0 && ether_addr_equal(req->ap_mld_addr, dev->dev_addr))) return -EINVAL; return rdev_auth(rdev, dev, req); } /* Do a logical ht_capa &= ht_capa_mask. */ void cfg80211_oper_and_ht_capa(struct ieee80211_ht_cap *ht_capa, const struct ieee80211_ht_cap *ht_capa_mask) { int i; u8 *p1, *p2; if (!ht_capa_mask) { memset(ht_capa, 0, sizeof(*ht_capa)); return; } p1 = (u8*)(ht_capa); p2 = (u8*)(ht_capa_mask); for (i = 0; i < sizeof(*ht_capa); i++) p1[i] &= p2[i]; } /* Do a logical vht_capa &= vht_capa_mask. */ void cfg80211_oper_and_vht_capa(struct ieee80211_vht_cap *vht_capa, const struct ieee80211_vht_cap *vht_capa_mask) { int i; u8 *p1, *p2; if (!vht_capa_mask) { memset(vht_capa, 0, sizeof(*vht_capa)); return; } p1 = (u8*)(vht_capa); p2 = (u8*)(vht_capa_mask); for (i = 0; i < sizeof(*vht_capa); i++) p1[i] &= p2[i]; } /* Note: caller must cfg80211_put_bss() regardless of result */ int cfg80211_mlme_assoc(struct cfg80211_registered_device *rdev, struct net_device *dev, struct cfg80211_assoc_request *req) { struct wireless_dev *wdev = dev->ieee80211_ptr; int err, i, j; ASSERT_WDEV_LOCK(wdev); for (i = 1; i < ARRAY_SIZE(req->links); i++) { if (!req->links[i].bss) continue; for (j = 0; j < i; j++) { if (req->links[i].bss == req->links[j].bss) return -EINVAL; } if (ether_addr_equal(req->links[i].bss->bssid, dev->dev_addr)) return -EINVAL; } if (wdev->connected && (!req->prev_bssid || !ether_addr_equal(wdev->u.client.connected_addr, req->prev_bssid))) return -EALREADY; if ((req->bss && ether_addr_equal(req->bss->bssid, dev->dev_addr)) || (req->link_id >= 0 && ether_addr_equal(req->ap_mld_addr, dev->dev_addr))) return -EINVAL; cfg80211_oper_and_ht_capa(&req->ht_capa_mask, rdev->wiphy.ht_capa_mod_mask); cfg80211_oper_and_vht_capa(&req->vht_capa_mask, rdev->wiphy.vht_capa_mod_mask); err = rdev_assoc(rdev, dev, req); if (!err) { int link_id; if (req->bss) { cfg80211_ref_bss(&rdev->wiphy, req->bss); cfg80211_hold_bss(bss_from_pub(req->bss)); } for (link_id = 0; link_id < ARRAY_SIZE(req->links); link_id++) { if (!req->links[link_id].bss) continue; cfg80211_ref_bss(&rdev->wiphy, req->links[link_id].bss); cfg80211_hold_bss(bss_from_pub(req->links[link_id].bss)); } } return err; } int cfg80211_mlme_deauth(struct cfg80211_registered_device *rdev, struct net_device *dev, const u8 *bssid, const u8 *ie, int ie_len, u16 reason, bool local_state_change) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct cfg80211_deauth_request req = { .bssid = bssid, .reason_code = reason, .ie = ie, .ie_len = ie_len, .local_state_change = local_state_change, }; ASSERT_WDEV_LOCK(wdev); if (local_state_change && (!wdev->connected || !ether_addr_equal(wdev->u.client.connected_addr, bssid))) return 0; if (ether_addr_equal(wdev->disconnect_bssid, bssid) || (wdev->connected && ether_addr_equal(wdev->u.client.connected_addr, bssid))) wdev->conn_owner_nlportid = 0; return rdev_deauth(rdev, dev, &req); } int cfg80211_mlme_disassoc(struct cfg80211_registered_device *rdev, struct net_device *dev, const u8 *ap_addr, const u8 *ie, int ie_len, u16 reason, bool local_state_change) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct cfg80211_disassoc_request req = { .reason_code = reason, .local_state_change = local_state_change, .ie = ie, .ie_len = ie_len, .ap_addr = ap_addr, }; int err; ASSERT_WDEV_LOCK(wdev); if (!wdev->connected) return -ENOTCONN; if (memcmp(wdev->u.client.connected_addr, ap_addr, ETH_ALEN)) return -ENOTCONN; err = rdev_disassoc(rdev, dev, &req); if (err) return err; /* driver should have reported the disassoc */ WARN_ON(wdev->connected); return 0; } void cfg80211_mlme_down(struct cfg80211_registered_device *rdev, struct net_device *dev) { struct wireless_dev *wdev = dev->ieee80211_ptr; u8 bssid[ETH_ALEN]; ASSERT_WDEV_LOCK(wdev); if (!rdev->ops->deauth) return; if (!wdev->connected) return; memcpy(bssid, wdev->u.client.connected_addr, ETH_ALEN); cfg80211_mlme_deauth(rdev, dev, bssid, NULL, 0, WLAN_REASON_DEAUTH_LEAVING, false); } struct cfg80211_mgmt_registration { struct list_head list; struct wireless_dev *wdev; u32 nlportid; int match_len; __le16 frame_type; bool multicast_rx; u8 match[]; }; static void cfg80211_mgmt_registrations_update(struct wireless_dev *wdev) { struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); struct wireless_dev *tmp; struct cfg80211_mgmt_registration *reg; struct mgmt_frame_regs upd = {}; lockdep_assert_held(&rdev->wiphy.mtx); spin_lock_bh(&rdev->mgmt_registrations_lock); if (!wdev->mgmt_registrations_need_update) { spin_unlock_bh(&rdev->mgmt_registrations_lock); return; } rcu_read_lock(); list_for_each_entry_rcu(tmp, &rdev->wiphy.wdev_list, list) { list_for_each_entry(reg, &tmp->mgmt_registrations, list) { u32 mask = BIT(le16_to_cpu(reg->frame_type) >> 4); u32 mcast_mask = 0; if (reg->multicast_rx) mcast_mask = mask; upd.global_stypes |= mask; upd.global_mcast_stypes |= mcast_mask; if (tmp == wdev) { upd.interface_stypes |= mask; upd.interface_mcast_stypes |= mcast_mask; } } } rcu_read_unlock(); wdev->mgmt_registrations_need_update = 0; spin_unlock_bh(&rdev->mgmt_registrations_lock); rdev_update_mgmt_frame_registrations(rdev, wdev, &upd); } void cfg80211_mgmt_registrations_update_wk(struct work_struct *wk) { struct cfg80211_registered_device *rdev; struct wireless_dev *wdev; rdev = container_of(wk, struct cfg80211_registered_device, mgmt_registrations_update_wk); wiphy_lock(&rdev->wiphy); list_for_each_entry(wdev, &rdev->wiphy.wdev_list, list) cfg80211_mgmt_registrations_update(wdev); wiphy_unlock(&rdev->wiphy); } int cfg80211_mlme_register_mgmt(struct wireless_dev *wdev, u32 snd_portid, u16 frame_type, const u8 *match_data, int match_len, bool multicast_rx, struct netlink_ext_ack *extack) { struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); struct cfg80211_mgmt_registration *reg, *nreg; int err = 0; u16 mgmt_type; bool update_multicast = false; if (!wdev->wiphy->mgmt_stypes) return -EOPNOTSUPP; if ((frame_type & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_MGMT) { NL_SET_ERR_MSG(extack, "frame type not management"); return -EINVAL; } if (frame_type & ~(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) { NL_SET_ERR_MSG(extack, "Invalid frame type"); return -EINVAL; } mgmt_type = (frame_type & IEEE80211_FCTL_STYPE) >> 4; if (!(wdev->wiphy->mgmt_stypes[wdev->iftype].rx & BIT(mgmt_type))) { NL_SET_ERR_MSG(extack, "Registration to specific type not supported"); return -EINVAL; } /* * To support Pre Association Security Negotiation (PASN), registration * for authentication frames should be supported. However, as some * versions of the user space daemons wrongly register to all types of * authentication frames (which might result in unexpected behavior) * allow such registration if the request is for a specific * authentication algorithm number. */ if (wdev->iftype == NL80211_IFTYPE_STATION && (frame_type & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_AUTH && !(match_data && match_len >= 2)) { NL_SET_ERR_MSG(extack, "Authentication algorithm number required"); return -EINVAL; } nreg = kzalloc(sizeof(*reg) + match_len, GFP_KERNEL); if (!nreg) return -ENOMEM; spin_lock_bh(&rdev->mgmt_registrations_lock); list_for_each_entry(reg, &wdev->mgmt_registrations, list) { int mlen = min(match_len, reg->match_len); if (frame_type != le16_to_cpu(reg->frame_type)) continue; if (memcmp(reg->match, match_data, mlen) == 0) { if (reg->multicast_rx != multicast_rx) { update_multicast = true; reg->multicast_rx = multicast_rx; break; } NL_SET_ERR_MSG(extack, "Match already configured"); err = -EALREADY; break; } } if (err) goto out; if (update_multicast) { kfree(nreg); } else { memcpy(nreg->match, match_data, match_len); nreg->match_len = match_len; nreg->nlportid = snd_portid; nreg->frame_type = cpu_to_le16(frame_type); nreg->wdev = wdev; nreg->multicast_rx = multicast_rx; list_add(&nreg->list, &wdev->mgmt_registrations); } wdev->mgmt_registrations_need_update = 1; spin_unlock_bh(&rdev->mgmt_registrations_lock); cfg80211_mgmt_registrations_update(wdev); return 0; out: kfree(nreg); spin_unlock_bh(&rdev->mgmt_registrations_lock); return err; } void cfg80211_mlme_unregister_socket(struct wireless_dev *wdev, u32 nlportid) { struct wiphy *wiphy = wdev->wiphy; struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy); struct cfg80211_mgmt_registration *reg, *tmp; spin_lock_bh(&rdev->mgmt_registrations_lock); list_for_each_entry_safe(reg, tmp, &wdev->mgmt_registrations, list) { if (reg->nlportid != nlportid) continue; list_del(&reg->list); kfree(reg); wdev->mgmt_registrations_need_update = 1; schedule_work(&rdev->mgmt_registrations_update_wk); } spin_unlock_bh(&rdev->mgmt_registrations_lock); if (nlportid && rdev->crit_proto_nlportid == nlportid) { rdev->crit_proto_nlportid = 0; rdev_crit_proto_stop(rdev, wdev); } if (nlportid == wdev->ap_unexpected_nlportid) wdev->ap_unexpected_nlportid = 0; } void cfg80211_mlme_purge_registrations(struct wireless_dev *wdev) { struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); struct cfg80211_mgmt_registration *reg, *tmp; spin_lock_bh(&rdev->mgmt_registrations_lock); list_for_each_entry_safe(reg, tmp, &wdev->mgmt_registrations, list) { list_del(&reg->list); kfree(reg); } wdev->mgmt_registrations_need_update = 1; spin_unlock_bh(&rdev->mgmt_registrations_lock); cfg80211_mgmt_registrations_update(wdev); } static bool cfg80211_allowed_address(struct wireless_dev *wdev, const u8 *addr) { int i; for_each_valid_link(wdev, i) { if (ether_addr_equal(addr, wdev->links[i].addr)) return true; } return ether_addr_equal(addr, wdev_address(wdev)); } static bool cfg80211_allowed_random_address(struct wireless_dev *wdev, const struct ieee80211_mgmt *mgmt) { if (ieee80211_is_auth(mgmt->frame_control) || ieee80211_is_deauth(mgmt->frame_control)) { /* Allow random TA to be used with authentication and * deauthentication frames if the driver has indicated support. */ if (wiphy_ext_feature_isset( wdev->wiphy, NL80211_EXT_FEATURE_AUTH_AND_DEAUTH_RANDOM_TA)) return true; } else if (ieee80211_is_action(mgmt->frame_control) && mgmt->u.action.category == WLAN_CATEGORY_PUBLIC) { /* Allow random TA to be used with Public Action frames if the * driver has indicated support. */ if (!wdev->connected && wiphy_ext_feature_isset( wdev->wiphy, NL80211_EXT_FEATURE_MGMT_TX_RANDOM_TA)) return true; if (wdev->connected && wiphy_ext_feature_isset( wdev->wiphy, NL80211_EXT_FEATURE_MGMT_TX_RANDOM_TA_CONNECTED)) return true; } return false; } int cfg80211_mlme_mgmt_tx(struct cfg80211_registered_device *rdev, struct wireless_dev *wdev, struct cfg80211_mgmt_tx_params *params, u64 *cookie) { const struct ieee80211_mgmt *mgmt; u16 stype; if (!wdev->wiphy->mgmt_stypes) return -EOPNOTSUPP; if (!rdev->ops->mgmt_tx) return -EOPNOTSUPP; if (params->len < 24 + 1) return -EINVAL; mgmt = (const struct ieee80211_mgmt *)params->buf; if (!ieee80211_is_mgmt(mgmt->frame_control)) return -EINVAL; stype = le16_to_cpu(mgmt->frame_control) & IEEE80211_FCTL_STYPE; if (!(wdev->wiphy->mgmt_stypes[wdev->iftype].tx & BIT(stype >> 4))) return -EINVAL; if (ieee80211_is_action(mgmt->frame_control) && mgmt->u.action.category != WLAN_CATEGORY_PUBLIC) { int err = 0; wdev_lock(wdev); switch (wdev->iftype) { case NL80211_IFTYPE_ADHOC: /* * check for IBSS DA must be done by driver as * cfg80211 doesn't track the stations */ if (!wdev->u.ibss.current_bss || !ether_addr_equal(wdev->u.ibss.current_bss->pub.bssid, mgmt->bssid)) { err = -ENOTCONN; break; } break; case NL80211_IFTYPE_STATION: case NL80211_IFTYPE_P2P_CLIENT: if (!wdev->connected) { err = -ENOTCONN; break; } /* FIXME: MLD may address this differently */ if (!ether_addr_equal(wdev->u.client.connected_addr, mgmt->bssid)) { err = -ENOTCONN; break; } /* for station, check that DA is the AP */ if (!ether_addr_equal(wdev->u.client.connected_addr, mgmt->da)) { err = -ENOTCONN; break; } break; case NL80211_IFTYPE_AP: case NL80211_IFTYPE_P2P_GO: case NL80211_IFTYPE_AP_VLAN: if (!ether_addr_equal(mgmt->bssid, wdev_address(wdev)) && (params->link_id < 0 || !ether_addr_equal(mgmt->bssid, wdev->links[params->link_id].addr))) err = -EINVAL; break; case NL80211_IFTYPE_MESH_POINT: if (!ether_addr_equal(mgmt->sa, mgmt->bssid)) { err = -EINVAL; break; } /* * check for mesh DA must be done by driver as * cfg80211 doesn't track the stations */ break; case NL80211_IFTYPE_P2P_DEVICE: /* * fall through, P2P device only supports * public action frames */ case NL80211_IFTYPE_NAN: default: err = -EOPNOTSUPP; break; } wdev_unlock(wdev); if (err) return err; } if (!cfg80211_allowed_address(wdev, mgmt->sa) && !cfg80211_allowed_random_address(wdev, mgmt)) return -EINVAL; /* Transmit the management frame as requested by user space */ return rdev_mgmt_tx(rdev, wdev, params, cookie); } bool cfg80211_rx_mgmt_ext(struct wireless_dev *wdev, struct cfg80211_rx_info *info) { struct wiphy *wiphy = wdev->wiphy; struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy); struct cfg80211_mgmt_registration *reg; const struct ieee80211_txrx_stypes *stypes = &wiphy->mgmt_stypes[wdev->iftype]; struct ieee80211_mgmt *mgmt = (void *)info->buf; const u8 *data; int data_len; bool result = false; __le16 ftype = mgmt->frame_control & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE); u16 stype; trace_cfg80211_rx_mgmt(wdev, info); stype = (le16_to_cpu(mgmt->frame_control) & IEEE80211_FCTL_STYPE) >> 4; if (!(stypes->rx & BIT(stype))) { trace_cfg80211_return_bool(false); return false; } data = info->buf + ieee80211_hdrlen(mgmt->frame_control); data_len = info->len - ieee80211_hdrlen(mgmt->frame_control); spin_lock_bh(&rdev->mgmt_registrations_lock); list_for_each_entry(reg, &wdev->mgmt_registrations, list) { if (reg->frame_type != ftype) continue; if (reg->match_len > data_len) continue; if (memcmp(reg->match, data, reg->match_len)) continue; /* found match! */ /* Indicate the received Action frame to user space */ if (nl80211_send_mgmt(rdev, wdev, reg->nlportid, info, GFP_ATOMIC)) continue; result = true; break; } spin_unlock_bh(&rdev->mgmt_registrations_lock); trace_cfg80211_return_bool(result); return result; } EXPORT_SYMBOL(cfg80211_rx_mgmt_ext); void cfg80211_sched_dfs_chan_update(struct cfg80211_registered_device *rdev) { cancel_delayed_work(&rdev->dfs_update_channels_wk); queue_delayed_work(cfg80211_wq, &rdev->dfs_update_channels_wk, 0); } void cfg80211_dfs_channels_update_work(struct work_struct *work) { struct delayed_work *delayed_work = to_delayed_work(work); struct cfg80211_registered_device *rdev; struct cfg80211_chan_def chandef; struct ieee80211_supported_band *sband; struct ieee80211_channel *c; struct wiphy *wiphy; bool check_again = false; unsigned long timeout, next_time = 0; unsigned long time_dfs_update; enum nl80211_radar_event radar_event; int bandid, i; rdev = container_of(delayed_work, struct cfg80211_registered_device, dfs_update_channels_wk); wiphy = &rdev->wiphy; rtnl_lock(); for (bandid = 0; bandid < NUM_NL80211_BANDS; bandid++) { sband = wiphy->bands[bandid]; if (!sband) continue; for (i = 0; i < sband->n_channels; i++) { c = &sband->channels[i]; if (!(c->flags & IEEE80211_CHAN_RADAR)) continue; if (c->dfs_state != NL80211_DFS_UNAVAILABLE && c->dfs_state != NL80211_DFS_AVAILABLE) continue; if (c->dfs_state == NL80211_DFS_UNAVAILABLE) { time_dfs_update = IEEE80211_DFS_MIN_NOP_TIME_MS; radar_event = NL80211_RADAR_NOP_FINISHED; } else { if (regulatory_pre_cac_allowed(wiphy) || cfg80211_any_wiphy_oper_chan(wiphy, c)) continue; time_dfs_update = REG_PRE_CAC_EXPIRY_GRACE_MS; radar_event = NL80211_RADAR_PRE_CAC_EXPIRED; } timeout = c->dfs_state_entered + msecs_to_jiffies(time_dfs_update); if (time_after_eq(jiffies, timeout)) { c->dfs_state = NL80211_DFS_USABLE; c->dfs_state_entered = jiffies; cfg80211_chandef_create(&chandef, c, NL80211_CHAN_NO_HT); nl80211_radar_notify(rdev, &chandef, radar_event, NULL, GFP_ATOMIC); regulatory_propagate_dfs_state(wiphy, &chandef, c->dfs_state, radar_event); continue; } if (!check_again) next_time = timeout - jiffies; else next_time = min(next_time, timeout - jiffies); check_again = true; } } rtnl_unlock(); /* reschedule if there are other channels waiting to be cleared again */ if (check_again) queue_delayed_work(cfg80211_wq, &rdev->dfs_update_channels_wk, next_time); } void __cfg80211_radar_event(struct wiphy *wiphy, struct cfg80211_chan_def *chandef, bool offchan, gfp_t gfp) { struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy); trace_cfg80211_radar_event(wiphy, chandef, offchan); /* only set the chandef supplied channel to unavailable, in * case the radar is detected on only one of multiple channels * spanned by the chandef. */ cfg80211_set_dfs_state(wiphy, chandef, NL80211_DFS_UNAVAILABLE); if (offchan) queue_work(cfg80211_wq, &rdev->background_cac_abort_wk); cfg80211_sched_dfs_chan_update(rdev); nl80211_radar_notify(rdev, chandef, NL80211_RADAR_DETECTED, NULL, gfp); memcpy(&rdev->radar_chandef, chandef, sizeof(struct cfg80211_chan_def)); queue_work(cfg80211_wq, &rdev->propagate_radar_detect_wk); } EXPORT_SYMBOL(__cfg80211_radar_event); void cfg80211_cac_event(struct net_device *netdev, const struct cfg80211_chan_def *chandef, enum nl80211_radar_event event, gfp_t gfp) { struct wireless_dev *wdev = netdev->ieee80211_ptr; struct wiphy *wiphy = wdev->wiphy; struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy); unsigned long timeout; /* not yet supported */ if (wdev->valid_links) return; trace_cfg80211_cac_event(netdev, event); if (WARN_ON(!wdev->cac_started && event != NL80211_RADAR_CAC_STARTED)) return; switch (event) { case NL80211_RADAR_CAC_FINISHED: timeout = wdev->cac_start_time + msecs_to_jiffies(wdev->cac_time_ms); WARN_ON(!time_after_eq(jiffies, timeout)); cfg80211_set_dfs_state(wiphy, chandef, NL80211_DFS_AVAILABLE); memcpy(&rdev->cac_done_chandef, chandef, sizeof(struct cfg80211_chan_def)); queue_work(cfg80211_wq, &rdev->propagate_cac_done_wk); cfg80211_sched_dfs_chan_update(rdev); fallthrough; case NL80211_RADAR_CAC_ABORTED: wdev->cac_started = false; break; case NL80211_RADAR_CAC_STARTED: wdev->cac_started = true; break; default: WARN_ON(1); return; } nl80211_radar_notify(rdev, chandef, event, netdev, gfp); } EXPORT_SYMBOL(cfg80211_cac_event); static void __cfg80211_background_cac_event(struct cfg80211_registered_device *rdev, struct wireless_dev *wdev, const struct cfg80211_chan_def *chandef, enum nl80211_radar_event event) { struct wiphy *wiphy = &rdev->wiphy; struct net_device *netdev; lockdep_assert_wiphy(&rdev->wiphy); if (!cfg80211_chandef_valid(chandef)) return; if (!rdev->background_radar_wdev) return; switch (event) { case NL80211_RADAR_CAC_FINISHED: cfg80211_set_dfs_state(wiphy, chandef, NL80211_DFS_AVAILABLE); memcpy(&rdev->cac_done_chandef, chandef, sizeof(*chandef)); queue_work(cfg80211_wq, &rdev->propagate_cac_done_wk); cfg80211_sched_dfs_chan_update(rdev); wdev = rdev->background_radar_wdev; break; case NL80211_RADAR_CAC_ABORTED: if (!cancel_delayed_work(&rdev->background_cac_done_wk)) return; wdev = rdev->background_radar_wdev; break; case NL80211_RADAR_CAC_STARTED: break; default: return; } netdev = wdev ? wdev->netdev : NULL; nl80211_radar_notify(rdev, chandef, event, netdev, GFP_KERNEL); } static void cfg80211_background_cac_event(struct cfg80211_registered_device *rdev, const struct cfg80211_chan_def *chandef, enum nl80211_radar_event event) { wiphy_lock(&rdev->wiphy); __cfg80211_background_cac_event(rdev, rdev->background_radar_wdev, chandef, event); wiphy_unlock(&rdev->wiphy); } void cfg80211_background_cac_done_wk(struct work_struct *work) { struct delayed_work *delayed_work = to_delayed_work(work); struct cfg80211_registered_device *rdev; rdev = container_of(delayed_work, struct cfg80211_registered_device, background_cac_done_wk); cfg80211_background_cac_event(rdev, &rdev->background_radar_chandef, NL80211_RADAR_CAC_FINISHED); } void cfg80211_background_cac_abort_wk(struct work_struct *work) { struct cfg80211_registered_device *rdev; rdev = container_of(work, struct cfg80211_registered_device, background_cac_abort_wk); cfg80211_background_cac_event(rdev, &rdev->background_radar_chandef, NL80211_RADAR_CAC_ABORTED); } void cfg80211_background_cac_abort(struct wiphy *wiphy) { struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy); queue_work(cfg80211_wq, &rdev->background_cac_abort_wk); } EXPORT_SYMBOL(cfg80211_background_cac_abort); int cfg80211_start_background_radar_detection(struct cfg80211_registered_device *rdev, struct wireless_dev *wdev, struct cfg80211_chan_def *chandef) { unsigned int cac_time_ms; int err; lockdep_assert_wiphy(&rdev->wiphy); if (!wiphy_ext_feature_isset(&rdev->wiphy, NL80211_EXT_FEATURE_RADAR_BACKGROUND)) return -EOPNOTSUPP; /* Offchannel chain already locked by another wdev */ if (rdev->background_radar_wdev && rdev->background_radar_wdev != wdev) return -EBUSY; /* CAC already in progress on the offchannel chain */ if (rdev->background_radar_wdev == wdev && delayed_work_pending(&rdev->background_cac_done_wk)) return -EBUSY; err = rdev_set_radar_background(rdev, chandef); if (err) return err; cac_time_ms = cfg80211_chandef_dfs_cac_time(&rdev->wiphy, chandef); if (!cac_time_ms) cac_time_ms = IEEE80211_DFS_MIN_CAC_TIME_MS; rdev->background_radar_chandef = *chandef; rdev->background_radar_wdev = wdev; /* Get offchain ownership */ __cfg80211_background_cac_event(rdev, wdev, chandef, NL80211_RADAR_CAC_STARTED); queue_delayed_work(cfg80211_wq, &rdev->background_cac_done_wk, msecs_to_jiffies(cac_time_ms)); return 0; } void cfg80211_stop_background_radar_detection(struct wireless_dev *wdev) { struct wiphy *wiphy = wdev->wiphy; struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy); lockdep_assert_wiphy(wiphy); if (wdev != rdev->background_radar_wdev) return; rdev_set_radar_background(rdev, NULL); rdev->background_radar_wdev = NULL; /* Release offchain ownership */ __cfg80211_background_cac_event(rdev, wdev, &rdev->background_radar_chandef, NL80211_RADAR_CAC_ABORTED); }
linux-master
net/wireless/mlme.c
/* * This file implement the Wireless Extensions core API. * * Authors : Jean Tourrilhes - HPL - <[email protected]> * Copyright (c) 1997-2007 Jean Tourrilhes, All Rights Reserved. * Copyright 2009 Johannes Berg <[email protected]> * * (As all part of the Linux kernel, this file is GPL) */ #include <linux/kernel.h> #include <linux/netdevice.h> #include <linux/rtnetlink.h> #include <linux/slab.h> #include <linux/wireless.h> #include <linux/uaccess.h> #include <linux/export.h> #include <net/cfg80211.h> #include <net/iw_handler.h> #include <net/netlink.h> #include <net/wext.h> #include <net/net_namespace.h> typedef int (*wext_ioctl_func)(struct net_device *, struct iwreq *, unsigned int, struct iw_request_info *, iw_handler); /* * Meta-data about all the standard Wireless Extension request we * know about. */ static const struct iw_ioctl_description standard_ioctl[] = { [IW_IOCTL_IDX(SIOCSIWCOMMIT)] = { .header_type = IW_HEADER_TYPE_NULL, }, [IW_IOCTL_IDX(SIOCGIWNAME)] = { .header_type = IW_HEADER_TYPE_CHAR, .flags = IW_DESCR_FLAG_DUMP, }, [IW_IOCTL_IDX(SIOCSIWNWID)] = { .header_type = IW_HEADER_TYPE_PARAM, .flags = IW_DESCR_FLAG_EVENT, }, [IW_IOCTL_IDX(SIOCGIWNWID)] = { .header_type = IW_HEADER_TYPE_PARAM, .flags = IW_DESCR_FLAG_DUMP, }, [IW_IOCTL_IDX(SIOCSIWFREQ)] = { .header_type = IW_HEADER_TYPE_FREQ, .flags = IW_DESCR_FLAG_EVENT, }, [IW_IOCTL_IDX(SIOCGIWFREQ)] = { .header_type = IW_HEADER_TYPE_FREQ, .flags = IW_DESCR_FLAG_DUMP, }, [IW_IOCTL_IDX(SIOCSIWMODE)] = { .header_type = IW_HEADER_TYPE_UINT, .flags = IW_DESCR_FLAG_EVENT, }, [IW_IOCTL_IDX(SIOCGIWMODE)] = { .header_type = IW_HEADER_TYPE_UINT, .flags = IW_DESCR_FLAG_DUMP, }, [IW_IOCTL_IDX(SIOCSIWSENS)] = { .header_type = IW_HEADER_TYPE_PARAM, }, [IW_IOCTL_IDX(SIOCGIWSENS)] = { .header_type = IW_HEADER_TYPE_PARAM, }, [IW_IOCTL_IDX(SIOCSIWRANGE)] = { .header_type = IW_HEADER_TYPE_NULL, }, [IW_IOCTL_IDX(SIOCGIWRANGE)] = { .header_type = IW_HEADER_TYPE_POINT, .token_size = 1, .max_tokens = sizeof(struct iw_range), .flags = IW_DESCR_FLAG_DUMP, }, [IW_IOCTL_IDX(SIOCSIWPRIV)] = { .header_type = IW_HEADER_TYPE_NULL, }, [IW_IOCTL_IDX(SIOCGIWPRIV)] = { /* (handled directly by us) */ .header_type = IW_HEADER_TYPE_POINT, .token_size = sizeof(struct iw_priv_args), .max_tokens = 16, .flags = IW_DESCR_FLAG_NOMAX, }, [IW_IOCTL_IDX(SIOCSIWSTATS)] = { .header_type = IW_HEADER_TYPE_NULL, }, [IW_IOCTL_IDX(SIOCGIWSTATS)] = { /* (handled directly by us) */ .header_type = IW_HEADER_TYPE_POINT, .token_size = 1, .max_tokens = sizeof(struct iw_statistics), .flags = IW_DESCR_FLAG_DUMP, }, [IW_IOCTL_IDX(SIOCSIWSPY)] = { .header_type = IW_HEADER_TYPE_POINT, .token_size = sizeof(struct sockaddr), .max_tokens = IW_MAX_SPY, }, [IW_IOCTL_IDX(SIOCGIWSPY)] = { .header_type = IW_HEADER_TYPE_POINT, .token_size = sizeof(struct sockaddr) + sizeof(struct iw_quality), .max_tokens = IW_MAX_SPY, }, [IW_IOCTL_IDX(SIOCSIWTHRSPY)] = { .header_type = IW_HEADER_TYPE_POINT, .token_size = sizeof(struct iw_thrspy), .min_tokens = 1, .max_tokens = 1, }, [IW_IOCTL_IDX(SIOCGIWTHRSPY)] = { .header_type = IW_HEADER_TYPE_POINT, .token_size = sizeof(struct iw_thrspy), .min_tokens = 1, .max_tokens = 1, }, [IW_IOCTL_IDX(SIOCSIWAP)] = { .header_type = IW_HEADER_TYPE_ADDR, }, [IW_IOCTL_IDX(SIOCGIWAP)] = { .header_type = IW_HEADER_TYPE_ADDR, .flags = IW_DESCR_FLAG_DUMP, }, [IW_IOCTL_IDX(SIOCSIWMLME)] = { .header_type = IW_HEADER_TYPE_POINT, .token_size = 1, .min_tokens = sizeof(struct iw_mlme), .max_tokens = sizeof(struct iw_mlme), }, [IW_IOCTL_IDX(SIOCGIWAPLIST)] = { .header_type = IW_HEADER_TYPE_POINT, .token_size = sizeof(struct sockaddr) + sizeof(struct iw_quality), .max_tokens = IW_MAX_AP, .flags = IW_DESCR_FLAG_NOMAX, }, [IW_IOCTL_IDX(SIOCSIWSCAN)] = { .header_type = IW_HEADER_TYPE_POINT, .token_size = 1, .min_tokens = 0, .max_tokens = sizeof(struct iw_scan_req), }, [IW_IOCTL_IDX(SIOCGIWSCAN)] = { .header_type = IW_HEADER_TYPE_POINT, .token_size = 1, .max_tokens = IW_SCAN_MAX_DATA, .flags = IW_DESCR_FLAG_NOMAX, }, [IW_IOCTL_IDX(SIOCSIWESSID)] = { .header_type = IW_HEADER_TYPE_POINT, .token_size = 1, .max_tokens = IW_ESSID_MAX_SIZE, .flags = IW_DESCR_FLAG_EVENT, }, [IW_IOCTL_IDX(SIOCGIWESSID)] = { .header_type = IW_HEADER_TYPE_POINT, .token_size = 1, .max_tokens = IW_ESSID_MAX_SIZE, .flags = IW_DESCR_FLAG_DUMP, }, [IW_IOCTL_IDX(SIOCSIWNICKN)] = { .header_type = IW_HEADER_TYPE_POINT, .token_size = 1, .max_tokens = IW_ESSID_MAX_SIZE, }, [IW_IOCTL_IDX(SIOCGIWNICKN)] = { .header_type = IW_HEADER_TYPE_POINT, .token_size = 1, .max_tokens = IW_ESSID_MAX_SIZE, }, [IW_IOCTL_IDX(SIOCSIWRATE)] = { .header_type = IW_HEADER_TYPE_PARAM, }, [IW_IOCTL_IDX(SIOCGIWRATE)] = { .header_type = IW_HEADER_TYPE_PARAM, }, [IW_IOCTL_IDX(SIOCSIWRTS)] = { .header_type = IW_HEADER_TYPE_PARAM, }, [IW_IOCTL_IDX(SIOCGIWRTS)] = { .header_type = IW_HEADER_TYPE_PARAM, }, [IW_IOCTL_IDX(SIOCSIWFRAG)] = { .header_type = IW_HEADER_TYPE_PARAM, }, [IW_IOCTL_IDX(SIOCGIWFRAG)] = { .header_type = IW_HEADER_TYPE_PARAM, }, [IW_IOCTL_IDX(SIOCSIWTXPOW)] = { .header_type = IW_HEADER_TYPE_PARAM, }, [IW_IOCTL_IDX(SIOCGIWTXPOW)] = { .header_type = IW_HEADER_TYPE_PARAM, }, [IW_IOCTL_IDX(SIOCSIWRETRY)] = { .header_type = IW_HEADER_TYPE_PARAM, }, [IW_IOCTL_IDX(SIOCGIWRETRY)] = { .header_type = IW_HEADER_TYPE_PARAM, }, [IW_IOCTL_IDX(SIOCSIWENCODE)] = { .header_type = IW_HEADER_TYPE_POINT, .token_size = 1, .max_tokens = IW_ENCODING_TOKEN_MAX, .flags = IW_DESCR_FLAG_EVENT | IW_DESCR_FLAG_RESTRICT, }, [IW_IOCTL_IDX(SIOCGIWENCODE)] = { .header_type = IW_HEADER_TYPE_POINT, .token_size = 1, .max_tokens = IW_ENCODING_TOKEN_MAX, .flags = IW_DESCR_FLAG_DUMP | IW_DESCR_FLAG_RESTRICT, }, [IW_IOCTL_IDX(SIOCSIWPOWER)] = { .header_type = IW_HEADER_TYPE_PARAM, }, [IW_IOCTL_IDX(SIOCGIWPOWER)] = { .header_type = IW_HEADER_TYPE_PARAM, }, [IW_IOCTL_IDX(SIOCSIWGENIE)] = { .header_type = IW_HEADER_TYPE_POINT, .token_size = 1, .max_tokens = IW_GENERIC_IE_MAX, }, [IW_IOCTL_IDX(SIOCGIWGENIE)] = { .header_type = IW_HEADER_TYPE_POINT, .token_size = 1, .max_tokens = IW_GENERIC_IE_MAX, }, [IW_IOCTL_IDX(SIOCSIWAUTH)] = { .header_type = IW_HEADER_TYPE_PARAM, }, [IW_IOCTL_IDX(SIOCGIWAUTH)] = { .header_type = IW_HEADER_TYPE_PARAM, }, [IW_IOCTL_IDX(SIOCSIWENCODEEXT)] = { .header_type = IW_HEADER_TYPE_POINT, .token_size = 1, .min_tokens = sizeof(struct iw_encode_ext), .max_tokens = sizeof(struct iw_encode_ext) + IW_ENCODING_TOKEN_MAX, }, [IW_IOCTL_IDX(SIOCGIWENCODEEXT)] = { .header_type = IW_HEADER_TYPE_POINT, .token_size = 1, .min_tokens = sizeof(struct iw_encode_ext), .max_tokens = sizeof(struct iw_encode_ext) + IW_ENCODING_TOKEN_MAX, }, [IW_IOCTL_IDX(SIOCSIWPMKSA)] = { .header_type = IW_HEADER_TYPE_POINT, .token_size = 1, .min_tokens = sizeof(struct iw_pmksa), .max_tokens = sizeof(struct iw_pmksa), }, }; static const unsigned int standard_ioctl_num = ARRAY_SIZE(standard_ioctl); /* * Meta-data about all the additional standard Wireless Extension events * we know about. */ static const struct iw_ioctl_description standard_event[] = { [IW_EVENT_IDX(IWEVTXDROP)] = { .header_type = IW_HEADER_TYPE_ADDR, }, [IW_EVENT_IDX(IWEVQUAL)] = { .header_type = IW_HEADER_TYPE_QUAL, }, [IW_EVENT_IDX(IWEVCUSTOM)] = { .header_type = IW_HEADER_TYPE_POINT, .token_size = 1, .max_tokens = IW_CUSTOM_MAX, }, [IW_EVENT_IDX(IWEVREGISTERED)] = { .header_type = IW_HEADER_TYPE_ADDR, }, [IW_EVENT_IDX(IWEVEXPIRED)] = { .header_type = IW_HEADER_TYPE_ADDR, }, [IW_EVENT_IDX(IWEVGENIE)] = { .header_type = IW_HEADER_TYPE_POINT, .token_size = 1, .max_tokens = IW_GENERIC_IE_MAX, }, [IW_EVENT_IDX(IWEVMICHAELMICFAILURE)] = { .header_type = IW_HEADER_TYPE_POINT, .token_size = 1, .max_tokens = sizeof(struct iw_michaelmicfailure), }, [IW_EVENT_IDX(IWEVASSOCREQIE)] = { .header_type = IW_HEADER_TYPE_POINT, .token_size = 1, .max_tokens = IW_GENERIC_IE_MAX, }, [IW_EVENT_IDX(IWEVASSOCRESPIE)] = { .header_type = IW_HEADER_TYPE_POINT, .token_size = 1, .max_tokens = IW_GENERIC_IE_MAX, }, [IW_EVENT_IDX(IWEVPMKIDCAND)] = { .header_type = IW_HEADER_TYPE_POINT, .token_size = 1, .max_tokens = sizeof(struct iw_pmkid_cand), }, }; static const unsigned int standard_event_num = ARRAY_SIZE(standard_event); /* Size (in bytes) of various events */ static const int event_type_size[] = { IW_EV_LCP_LEN, /* IW_HEADER_TYPE_NULL */ 0, IW_EV_CHAR_LEN, /* IW_HEADER_TYPE_CHAR */ 0, IW_EV_UINT_LEN, /* IW_HEADER_TYPE_UINT */ IW_EV_FREQ_LEN, /* IW_HEADER_TYPE_FREQ */ IW_EV_ADDR_LEN, /* IW_HEADER_TYPE_ADDR */ 0, IW_EV_POINT_LEN, /* Without variable payload */ IW_EV_PARAM_LEN, /* IW_HEADER_TYPE_PARAM */ IW_EV_QUAL_LEN, /* IW_HEADER_TYPE_QUAL */ }; #ifdef CONFIG_COMPAT static const int compat_event_type_size[] = { IW_EV_COMPAT_LCP_LEN, /* IW_HEADER_TYPE_NULL */ 0, IW_EV_COMPAT_CHAR_LEN, /* IW_HEADER_TYPE_CHAR */ 0, IW_EV_COMPAT_UINT_LEN, /* IW_HEADER_TYPE_UINT */ IW_EV_COMPAT_FREQ_LEN, /* IW_HEADER_TYPE_FREQ */ IW_EV_COMPAT_ADDR_LEN, /* IW_HEADER_TYPE_ADDR */ 0, IW_EV_COMPAT_POINT_LEN, /* Without variable payload */ IW_EV_COMPAT_PARAM_LEN, /* IW_HEADER_TYPE_PARAM */ IW_EV_COMPAT_QUAL_LEN, /* IW_HEADER_TYPE_QUAL */ }; #endif /* IW event code */ void wireless_nlevent_flush(void) { struct sk_buff *skb; struct net *net; down_read(&net_rwsem); for_each_net(net) { while ((skb = skb_dequeue(&net->wext_nlevents))) rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, GFP_KERNEL); } up_read(&net_rwsem); } EXPORT_SYMBOL_GPL(wireless_nlevent_flush); static int wext_netdev_notifier_call(struct notifier_block *nb, unsigned long state, void *ptr) { /* * When a netdev changes state in any way, flush all pending messages * to avoid them going out in a strange order, e.g. RTM_NEWLINK after * RTM_DELLINK, or with IFF_UP after without IFF_UP during dev_close() * or similar - all of which could otherwise happen due to delays from * schedule_work(). */ wireless_nlevent_flush(); return NOTIFY_OK; } static struct notifier_block wext_netdev_notifier = { .notifier_call = wext_netdev_notifier_call, }; static int __net_init wext_pernet_init(struct net *net) { skb_queue_head_init(&net->wext_nlevents); return 0; } static void __net_exit wext_pernet_exit(struct net *net) { skb_queue_purge(&net->wext_nlevents); } static struct pernet_operations wext_pernet_ops = { .init = wext_pernet_init, .exit = wext_pernet_exit, }; static int __init wireless_nlevent_init(void) { int err = register_pernet_subsys(&wext_pernet_ops); if (err) return err; err = register_netdevice_notifier(&wext_netdev_notifier); if (err) unregister_pernet_subsys(&wext_pernet_ops); return err; } subsys_initcall(wireless_nlevent_init); /* Process events generated by the wireless layer or the driver. */ static void wireless_nlevent_process(struct work_struct *work) { wireless_nlevent_flush(); } static DECLARE_WORK(wireless_nlevent_work, wireless_nlevent_process); static struct nlmsghdr *rtnetlink_ifinfo_prep(struct net_device *dev, struct sk_buff *skb) { struct ifinfomsg *r; struct nlmsghdr *nlh; nlh = nlmsg_put(skb, 0, 0, RTM_NEWLINK, sizeof(*r), 0); if (!nlh) return NULL; r = nlmsg_data(nlh); r->ifi_family = AF_UNSPEC; r->__ifi_pad = 0; r->ifi_type = dev->type; r->ifi_index = dev->ifindex; r->ifi_flags = dev_get_flags(dev); r->ifi_change = 0; /* Wireless changes don't affect those flags */ if (nla_put_string(skb, IFLA_IFNAME, dev->name)) goto nla_put_failure; return nlh; nla_put_failure: nlmsg_cancel(skb, nlh); return NULL; } /* * Main event dispatcher. Called from other parts and drivers. * Send the event on the appropriate channels. * May be called from interrupt context. */ void wireless_send_event(struct net_device * dev, unsigned int cmd, union iwreq_data * wrqu, const char * extra) { const struct iw_ioctl_description * descr = NULL; int extra_len = 0; struct iw_event *event; /* Mallocated whole event */ int event_len; /* Its size */ int hdr_len; /* Size of the event header */ int wrqu_off = 0; /* Offset in wrqu */ /* Don't "optimise" the following variable, it will crash */ unsigned int cmd_index; /* *MUST* be unsigned */ struct sk_buff *skb; struct nlmsghdr *nlh; struct nlattr *nla; #ifdef CONFIG_COMPAT struct __compat_iw_event *compat_event; struct compat_iw_point compat_wrqu; struct sk_buff *compskb; int ptr_len; #endif /* * Nothing in the kernel sends scan events with data, be safe. * This is necessary because we cannot fix up scan event data * for compat, due to being contained in 'extra', but normally * applications are required to retrieve the scan data anyway * and no data is included in the event, this codifies that * practice. */ if (WARN_ON(cmd == SIOCGIWSCAN && extra)) extra = NULL; /* Get the description of the Event */ if (cmd <= SIOCIWLAST) { cmd_index = IW_IOCTL_IDX(cmd); if (cmd_index < standard_ioctl_num) descr = &(standard_ioctl[cmd_index]); } else { cmd_index = IW_EVENT_IDX(cmd); if (cmd_index < standard_event_num) descr = &(standard_event[cmd_index]); } /* Don't accept unknown events */ if (descr == NULL) { /* Note : we don't return an error to the driver, because * the driver would not know what to do about it. It can't * return an error to the user, because the event is not * initiated by a user request. * The best the driver could do is to log an error message. * We will do it ourselves instead... */ netdev_err(dev, "(WE) : Invalid/Unknown Wireless Event (0x%04X)\n", cmd); return; } /* Check extra parameters and set extra_len */ if (descr->header_type == IW_HEADER_TYPE_POINT) { /* Check if number of token fits within bounds */ if (wrqu->data.length > descr->max_tokens) { netdev_err(dev, "(WE) : Wireless Event (cmd=0x%04X) too big (%d)\n", cmd, wrqu->data.length); return; } if (wrqu->data.length < descr->min_tokens) { netdev_err(dev, "(WE) : Wireless Event (cmd=0x%04X) too small (%d)\n", cmd, wrqu->data.length); return; } /* Calculate extra_len - extra is NULL for restricted events */ if (extra != NULL) extra_len = wrqu->data.length * descr->token_size; /* Always at an offset in wrqu */ wrqu_off = IW_EV_POINT_OFF; } /* Total length of the event */ hdr_len = event_type_size[descr->header_type]; event_len = hdr_len + extra_len; /* * The problem for 64/32 bit. * * On 64-bit, a regular event is laid out as follows: * | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | * | event.len | event.cmd | p a d d i n g | * | wrqu data ... (with the correct size) | * * This padding exists because we manipulate event->u, * and 'event' is not packed. * * An iw_point event is laid out like this instead: * | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | * | event.len | event.cmd | p a d d i n g | * | iwpnt.len | iwpnt.flg | p a d d i n g | * | extra data ... * * The second padding exists because struct iw_point is extended, * but this depends on the platform... * * On 32-bit, all the padding shouldn't be there. */ skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC); if (!skb) return; /* Send via the RtNetlink event channel */ nlh = rtnetlink_ifinfo_prep(dev, skb); if (WARN_ON(!nlh)) { kfree_skb(skb); return; } /* Add the wireless events in the netlink packet */ nla = nla_reserve(skb, IFLA_WIRELESS, event_len); if (!nla) { kfree_skb(skb); return; } event = nla_data(nla); /* Fill event - first clear to avoid data leaking */ memset(event, 0, hdr_len); event->len = event_len; event->cmd = cmd; memcpy(&event->u, ((char *) wrqu) + wrqu_off, hdr_len - IW_EV_LCP_LEN); if (extra_len) memcpy(((char *) event) + hdr_len, extra, extra_len); nlmsg_end(skb, nlh); #ifdef CONFIG_COMPAT hdr_len = compat_event_type_size[descr->header_type]; /* ptr_len is remaining size in event header apart from LCP */ ptr_len = hdr_len - IW_EV_COMPAT_LCP_LEN; event_len = hdr_len + extra_len; compskb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC); if (!compskb) { kfree_skb(skb); return; } /* Send via the RtNetlink event channel */ nlh = rtnetlink_ifinfo_prep(dev, compskb); if (WARN_ON(!nlh)) { kfree_skb(skb); kfree_skb(compskb); return; } /* Add the wireless events in the netlink packet */ nla = nla_reserve(compskb, IFLA_WIRELESS, event_len); if (!nla) { kfree_skb(skb); kfree_skb(compskb); return; } compat_event = nla_data(nla); compat_event->len = event_len; compat_event->cmd = cmd; if (descr->header_type == IW_HEADER_TYPE_POINT) { compat_wrqu.length = wrqu->data.length; compat_wrqu.flags = wrqu->data.flags; memcpy(compat_event->ptr_bytes, ((char *)&compat_wrqu) + IW_EV_COMPAT_POINT_OFF, ptr_len); if (extra_len) memcpy(&compat_event->ptr_bytes[ptr_len], extra, extra_len); } else { /* extra_len must be zero, so no if (extra) needed */ memcpy(compat_event->ptr_bytes, wrqu, ptr_len); } nlmsg_end(compskb, nlh); skb_shinfo(skb)->frag_list = compskb; #endif skb_queue_tail(&dev_net(dev)->wext_nlevents, skb); schedule_work(&wireless_nlevent_work); } EXPORT_SYMBOL(wireless_send_event); #ifdef CONFIG_CFG80211_WEXT static void wireless_warn_cfg80211_wext(void) { char name[sizeof(current->comm)]; pr_warn_once("warning: `%s' uses wireless extensions which will stop working for Wi-Fi 7 hardware; use nl80211\n", get_task_comm(name, current)); } #endif /* IW handlers */ struct iw_statistics *get_wireless_stats(struct net_device *dev) { #ifdef CONFIG_WIRELESS_EXT if ((dev->wireless_handlers != NULL) && (dev->wireless_handlers->get_wireless_stats != NULL)) return dev->wireless_handlers->get_wireless_stats(dev); #endif #ifdef CONFIG_CFG80211_WEXT if (dev->ieee80211_ptr && dev->ieee80211_ptr->wiphy && dev->ieee80211_ptr->wiphy->wext && dev->ieee80211_ptr->wiphy->wext->get_wireless_stats) { wireless_warn_cfg80211_wext(); if (dev->ieee80211_ptr->wiphy->flags & WIPHY_FLAG_SUPPORTS_MLO) return NULL; return dev->ieee80211_ptr->wiphy->wext->get_wireless_stats(dev); } #endif /* not found */ return NULL; } /* noinline to avoid a bogus warning with -O3 */ static noinline int iw_handler_get_iwstats(struct net_device * dev, struct iw_request_info * info, union iwreq_data * wrqu, char * extra) { /* Get stats from the driver */ struct iw_statistics *stats; stats = get_wireless_stats(dev); if (stats) { /* Copy statistics to extra */ memcpy(extra, stats, sizeof(struct iw_statistics)); wrqu->data.length = sizeof(struct iw_statistics); /* Check if we need to clear the updated flag */ if (wrqu->data.flags != 0) stats->qual.updated &= ~IW_QUAL_ALL_UPDATED; return 0; } else return -EOPNOTSUPP; } static iw_handler get_handler(struct net_device *dev, unsigned int cmd) { /* Don't "optimise" the following variable, it will crash */ unsigned int index; /* *MUST* be unsigned */ const struct iw_handler_def *handlers = NULL; #ifdef CONFIG_CFG80211_WEXT if (dev->ieee80211_ptr && dev->ieee80211_ptr->wiphy) { wireless_warn_cfg80211_wext(); if (dev->ieee80211_ptr->wiphy->flags & WIPHY_FLAG_SUPPORTS_MLO) return NULL; handlers = dev->ieee80211_ptr->wiphy->wext; } #endif #ifdef CONFIG_WIRELESS_EXT if (dev->wireless_handlers) handlers = dev->wireless_handlers; #endif if (!handlers) return NULL; /* Try as a standard command */ index = IW_IOCTL_IDX(cmd); if (index < handlers->num_standard) return handlers->standard[index]; #ifdef CONFIG_WEXT_PRIV /* Try as a private command */ index = cmd - SIOCIWFIRSTPRIV; if (index < handlers->num_private) return handlers->private[index]; #endif /* Not found */ return NULL; } static int ioctl_standard_iw_point(struct iw_point *iwp, unsigned int cmd, const struct iw_ioctl_description *descr, iw_handler handler, struct net_device *dev, struct iw_request_info *info) { int err, extra_size, user_length = 0, essid_compat = 0; char *extra; /* Calculate space needed by arguments. Always allocate * for max space. */ extra_size = descr->max_tokens * descr->token_size; /* Check need for ESSID compatibility for WE < 21 */ switch (cmd) { case SIOCSIWESSID: case SIOCGIWESSID: case SIOCSIWNICKN: case SIOCGIWNICKN: if (iwp->length == descr->max_tokens + 1) essid_compat = 1; else if (IW_IS_SET(cmd) && (iwp->length != 0)) { char essid[IW_ESSID_MAX_SIZE + 1]; unsigned int len; len = iwp->length * descr->token_size; if (len > IW_ESSID_MAX_SIZE) return -EFAULT; err = copy_from_user(essid, iwp->pointer, len); if (err) return -EFAULT; if (essid[iwp->length - 1] == '\0') essid_compat = 1; } break; default: break; } iwp->length -= essid_compat; /* Check what user space is giving us */ if (IW_IS_SET(cmd)) { /* Check NULL pointer */ if (!iwp->pointer && iwp->length != 0) return -EFAULT; /* Check if number of token fits within bounds */ if (iwp->length > descr->max_tokens) return -E2BIG; if (iwp->length < descr->min_tokens) return -EINVAL; } else { /* Check NULL pointer */ if (!iwp->pointer) return -EFAULT; /* Save user space buffer size for checking */ user_length = iwp->length; /* Don't check if user_length > max to allow forward * compatibility. The test user_length < min is * implied by the test at the end. */ /* Support for very large requests */ if ((descr->flags & IW_DESCR_FLAG_NOMAX) && (user_length > descr->max_tokens)) { /* Allow userspace to GET more than max so * we can support any size GET requests. * There is still a limit : -ENOMEM. */ extra_size = user_length * descr->token_size; /* Note : user_length is originally a __u16, * and token_size is controlled by us, * so extra_size won't get negative and * won't overflow... */ } } /* Sanity-check to ensure we never end up _allocating_ zero * bytes of data for extra. */ if (extra_size <= 0) return -EFAULT; /* kzalloc() ensures NULL-termination for essid_compat. */ extra = kzalloc(extra_size, GFP_KERNEL); if (!extra) return -ENOMEM; /* If it is a SET, get all the extra data in here */ if (IW_IS_SET(cmd) && (iwp->length != 0)) { if (copy_from_user(extra, iwp->pointer, iwp->length * descr->token_size)) { err = -EFAULT; goto out; } if (cmd == SIOCSIWENCODEEXT) { struct iw_encode_ext *ee = (void *) extra; if (iwp->length < sizeof(*ee) + ee->key_len) { err = -EFAULT; goto out; } } } if (IW_IS_GET(cmd) && !(descr->flags & IW_DESCR_FLAG_NOMAX)) { /* * If this is a GET, but not NOMAX, it means that the extra * data is not bounded by userspace, but by max_tokens. Thus * set the length to max_tokens. This matches the extra data * allocation. * The driver should fill it with the number of tokens it * provided, and it may check iwp->length rather than having * knowledge of max_tokens. If the driver doesn't change the * iwp->length, this ioctl just copies back max_token tokens * filled with zeroes. Hopefully the driver isn't claiming * them to be valid data. */ iwp->length = descr->max_tokens; } err = handler(dev, info, (union iwreq_data *) iwp, extra); iwp->length += essid_compat; /* If we have something to return to the user */ if (!err && IW_IS_GET(cmd)) { /* Check if there is enough buffer up there */ if (user_length < iwp->length) { err = -E2BIG; goto out; } if (copy_to_user(iwp->pointer, extra, iwp->length * descr->token_size)) { err = -EFAULT; goto out; } } /* Generate an event to notify listeners of the change */ if ((descr->flags & IW_DESCR_FLAG_EVENT) && ((err == 0) || (err == -EIWCOMMIT))) { union iwreq_data *data = (union iwreq_data *) iwp; if (descr->flags & IW_DESCR_FLAG_RESTRICT) /* If the event is restricted, don't * export the payload. */ wireless_send_event(dev, cmd, data, NULL); else wireless_send_event(dev, cmd, data, extra); } out: kfree(extra); return err; } /* * Call the commit handler in the driver * (if exist and if conditions are right) * * Note : our current commit strategy is currently pretty dumb, * but we will be able to improve on that... * The goal is to try to agreagate as many changes as possible * before doing the commit. Drivers that will define a commit handler * are usually those that need a reset after changing parameters, so * we want to minimise the number of reset. * A cool idea is to use a timer : at each "set" command, we re-set the * timer, when the timer eventually fires, we call the driver. * Hopefully, more on that later. * * Also, I'm waiting to see how many people will complain about the * netif_running(dev) test. I'm open on that one... * Hopefully, the driver will remember to do a commit in "open()" ;-) */ int call_commit_handler(struct net_device *dev) { #ifdef CONFIG_WIRELESS_EXT if (netif_running(dev) && dev->wireless_handlers && dev->wireless_handlers->standard[0]) /* Call the commit handler on the driver */ return dev->wireless_handlers->standard[0](dev, NULL, NULL, NULL); else return 0; /* Command completed successfully */ #else /* cfg80211 has no commit */ return 0; #endif } /* * Main IOCTl dispatcher. * Check the type of IOCTL and call the appropriate wrapper... */ static int wireless_process_ioctl(struct net *net, struct iwreq *iwr, unsigned int cmd, struct iw_request_info *info, wext_ioctl_func standard, wext_ioctl_func private) { struct net_device *dev; iw_handler handler; /* Permissions are already checked in dev_ioctl() before calling us. * The copy_to/from_user() of ifr is also dealt with in there */ /* Make sure the device exist */ if ((dev = __dev_get_by_name(net, iwr->ifr_name)) == NULL) return -ENODEV; /* A bunch of special cases, then the generic case... * Note that 'cmd' is already filtered in dev_ioctl() with * (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST) */ if (cmd == SIOCGIWSTATS) return standard(dev, iwr, cmd, info, &iw_handler_get_iwstats); #ifdef CONFIG_WEXT_PRIV if (cmd == SIOCGIWPRIV && dev->wireless_handlers) return standard(dev, iwr, cmd, info, iw_handler_get_private); #endif /* Basic check */ if (!netif_device_present(dev)) return -ENODEV; /* New driver API : try to find the handler */ handler = get_handler(dev, cmd); if (handler) { /* Standard and private are not the same */ if (cmd < SIOCIWFIRSTPRIV) return standard(dev, iwr, cmd, info, handler); else if (private) return private(dev, iwr, cmd, info, handler); } return -EOPNOTSUPP; } /* If command is `set a parameter', or `get the encoding parameters', * check if the user has the right to do it. */ static int wext_permission_check(unsigned int cmd) { if ((IW_IS_SET(cmd) || cmd == SIOCGIWENCODE || cmd == SIOCGIWENCODEEXT) && !capable(CAP_NET_ADMIN)) return -EPERM; return 0; } /* entry point from dev ioctl */ static int wext_ioctl_dispatch(struct net *net, struct iwreq *iwr, unsigned int cmd, struct iw_request_info *info, wext_ioctl_func standard, wext_ioctl_func private) { int ret = wext_permission_check(cmd); if (ret) return ret; dev_load(net, iwr->ifr_name); rtnl_lock(); ret = wireless_process_ioctl(net, iwr, cmd, info, standard, private); rtnl_unlock(); return ret; } /* * Wrapper to call a standard Wireless Extension handler. * We do various checks and also take care of moving data between * user space and kernel space. */ static int ioctl_standard_call(struct net_device * dev, struct iwreq *iwr, unsigned int cmd, struct iw_request_info *info, iw_handler handler) { const struct iw_ioctl_description * descr; int ret = -EINVAL; /* Get the description of the IOCTL */ if (IW_IOCTL_IDX(cmd) >= standard_ioctl_num) return -EOPNOTSUPP; descr = &(standard_ioctl[IW_IOCTL_IDX(cmd)]); /* Check if we have a pointer to user space data or not */ if (descr->header_type != IW_HEADER_TYPE_POINT) { /* No extra arguments. Trivial to handle */ ret = handler(dev, info, &(iwr->u), NULL); /* Generate an event to notify listeners of the change */ if ((descr->flags & IW_DESCR_FLAG_EVENT) && ((ret == 0) || (ret == -EIWCOMMIT))) wireless_send_event(dev, cmd, &(iwr->u), NULL); } else { ret = ioctl_standard_iw_point(&iwr->u.data, cmd, descr, handler, dev, info); } /* Call commit handler if needed and defined */ if (ret == -EIWCOMMIT) ret = call_commit_handler(dev); /* Here, we will generate the appropriate event if needed */ return ret; } int wext_handle_ioctl(struct net *net, unsigned int cmd, void __user *arg) { struct iw_request_info info = { .cmd = cmd, .flags = 0 }; struct iwreq iwr; int ret; if (copy_from_user(&iwr, arg, sizeof(iwr))) return -EFAULT; iwr.ifr_name[sizeof(iwr.ifr_name) - 1] = 0; ret = wext_ioctl_dispatch(net, &iwr, cmd, &info, ioctl_standard_call, ioctl_private_call); if (ret >= 0 && IW_IS_GET(cmd) && copy_to_user(arg, &iwr, sizeof(struct iwreq))) return -EFAULT; return ret; } #ifdef CONFIG_COMPAT static int compat_standard_call(struct net_device *dev, struct iwreq *iwr, unsigned int cmd, struct iw_request_info *info, iw_handler handler) { const struct iw_ioctl_description *descr; struct compat_iw_point *iwp_compat; struct iw_point iwp; int err; descr = standard_ioctl + IW_IOCTL_IDX(cmd); if (descr->header_type != IW_HEADER_TYPE_POINT) return ioctl_standard_call(dev, iwr, cmd, info, handler); iwp_compat = (struct compat_iw_point *) &iwr->u.data; iwp.pointer = compat_ptr(iwp_compat->pointer); iwp.length = iwp_compat->length; iwp.flags = iwp_compat->flags; err = ioctl_standard_iw_point(&iwp, cmd, descr, handler, dev, info); iwp_compat->pointer = ptr_to_compat(iwp.pointer); iwp_compat->length = iwp.length; iwp_compat->flags = iwp.flags; return err; } int compat_wext_handle_ioctl(struct net *net, unsigned int cmd, unsigned long arg) { void __user *argp = (void __user *)arg; struct iw_request_info info; struct iwreq iwr; char *colon; int ret; if (copy_from_user(&iwr, argp, sizeof(struct iwreq))) return -EFAULT; iwr.ifr_name[IFNAMSIZ-1] = 0; colon = strchr(iwr.ifr_name, ':'); if (colon) *colon = 0; info.cmd = cmd; info.flags = IW_REQUEST_FLAG_COMPAT; ret = wext_ioctl_dispatch(net, &iwr, cmd, &info, compat_standard_call, compat_private_call); if (ret >= 0 && IW_IS_GET(cmd) && copy_to_user(argp, &iwr, sizeof(struct iwreq))) return -EFAULT; return ret; } #endif char *iwe_stream_add_event(struct iw_request_info *info, char *stream, char *ends, struct iw_event *iwe, int event_len) { int lcp_len = iwe_stream_lcp_len(info); event_len = iwe_stream_event_len_adjust(info, event_len); /* Check if it's possible */ if (likely((stream + event_len) < ends)) { iwe->len = event_len; /* Beware of alignement issues on 64 bits */ memcpy(stream, (char *) iwe, IW_EV_LCP_PK_LEN); memcpy(stream + lcp_len, &iwe->u, event_len - lcp_len); stream += event_len; } return stream; } EXPORT_SYMBOL(iwe_stream_add_event); char *iwe_stream_add_point(struct iw_request_info *info, char *stream, char *ends, struct iw_event *iwe, char *extra) { int event_len = iwe_stream_point_len(info) + iwe->u.data.length; int point_len = iwe_stream_point_len(info); int lcp_len = iwe_stream_lcp_len(info); /* Check if it's possible */ if (likely((stream + event_len) < ends)) { iwe->len = event_len; memcpy(stream, (char *) iwe, IW_EV_LCP_PK_LEN); memcpy(stream + lcp_len, ((char *) &iwe->u) + IW_EV_POINT_OFF, IW_EV_POINT_PK_LEN - IW_EV_LCP_PK_LEN); if (iwe->u.data.length && extra) memcpy(stream + point_len, extra, iwe->u.data.length); stream += event_len; } return stream; } EXPORT_SYMBOL(iwe_stream_add_point); char *iwe_stream_add_value(struct iw_request_info *info, char *event, char *value, char *ends, struct iw_event *iwe, int event_len) { int lcp_len = iwe_stream_lcp_len(info); /* Don't duplicate LCP */ event_len -= IW_EV_LCP_LEN; /* Check if it's possible */ if (likely((value + event_len) < ends)) { /* Add new value */ memcpy(value, &iwe->u, event_len); value += event_len; /* Patch LCP */ iwe->len = value - event; memcpy(event, (char *) iwe, lcp_len); } return value; } EXPORT_SYMBOL(iwe_stream_add_value);
linux-master
net/wireless/wext-core.c
// SPDX-License-Identifier: GPL-2.0 /* * Parts of this file are * Copyright (C) 2022 Intel Corporation */ #include <linux/ieee80211.h> #include <linux/export.h> #include <net/cfg80211.h> #include "nl80211.h" #include "core.h" #include "rdev-ops.h" static int ___cfg80211_stop_ap(struct cfg80211_registered_device *rdev, struct net_device *dev, unsigned int link_id, bool notify) { struct wireless_dev *wdev = dev->ieee80211_ptr; int err; ASSERT_WDEV_LOCK(wdev); if (!rdev->ops->stop_ap) return -EOPNOTSUPP; if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP && dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_GO) return -EOPNOTSUPP; if (!wdev->links[link_id].ap.beacon_interval) return -ENOENT; err = rdev_stop_ap(rdev, dev, link_id); if (!err) { wdev->conn_owner_nlportid = 0; wdev->links[link_id].ap.beacon_interval = 0; memset(&wdev->links[link_id].ap.chandef, 0, sizeof(wdev->links[link_id].ap.chandef)); wdev->u.ap.ssid_len = 0; rdev_set_qos_map(rdev, dev, NULL); if (notify) nl80211_send_ap_stopped(wdev, link_id); /* Should we apply the grace period during beaconing interface * shutdown also? */ cfg80211_sched_dfs_chan_update(rdev); } schedule_work(&cfg80211_disconnect_work); return err; } int __cfg80211_stop_ap(struct cfg80211_registered_device *rdev, struct net_device *dev, int link_id, bool notify) { unsigned int link; int ret = 0; if (link_id >= 0) return ___cfg80211_stop_ap(rdev, dev, link_id, notify); for_each_valid_link(dev->ieee80211_ptr, link) { int ret1 = ___cfg80211_stop_ap(rdev, dev, link, notify); if (ret1) ret = ret1; /* try the next one also if one errored */ } return ret; } int cfg80211_stop_ap(struct cfg80211_registered_device *rdev, struct net_device *dev, int link_id, bool notify) { struct wireless_dev *wdev = dev->ieee80211_ptr; int err; wdev_lock(wdev); err = __cfg80211_stop_ap(rdev, dev, link_id, notify); wdev_unlock(wdev); return err; }
linux-master
net/wireless/ap.c
// SPDX-License-Identifier: GPL-2.0-only /* * This is the new netlink-based wireless configuration interface. * * Copyright 2006-2010 Johannes Berg <[email protected]> * Copyright 2013-2014 Intel Mobile Communications GmbH * Copyright 2015-2017 Intel Deutschland GmbH * Copyright (C) 2018-2023 Intel Corporation */ #include <linux/if.h> #include <linux/module.h> #include <linux/err.h> #include <linux/slab.h> #include <linux/list.h> #include <linux/if_ether.h> #include <linux/ieee80211.h> #include <linux/nl80211.h> #include <linux/rtnetlink.h> #include <linux/netlink.h> #include <linux/nospec.h> #include <linux/etherdevice.h> #include <linux/if_vlan.h> #include <net/net_namespace.h> #include <net/genetlink.h> #include <net/cfg80211.h> #include <net/sock.h> #include <net/inet_connection_sock.h> #include "core.h" #include "nl80211.h" #include "reg.h" #include "rdev-ops.h" static int nl80211_crypto_settings(struct cfg80211_registered_device *rdev, struct genl_info *info, struct cfg80211_crypto_settings *settings, int cipher_limit); /* the netlink family */ static struct genl_family nl80211_fam; /* multicast groups */ enum nl80211_multicast_groups { NL80211_MCGRP_CONFIG, NL80211_MCGRP_SCAN, NL80211_MCGRP_REGULATORY, NL80211_MCGRP_MLME, NL80211_MCGRP_VENDOR, NL80211_MCGRP_NAN, NL80211_MCGRP_TESTMODE /* keep last - ifdef! */ }; static const struct genl_multicast_group nl80211_mcgrps[] = { [NL80211_MCGRP_CONFIG] = { .name = NL80211_MULTICAST_GROUP_CONFIG }, [NL80211_MCGRP_SCAN] = { .name = NL80211_MULTICAST_GROUP_SCAN }, [NL80211_MCGRP_REGULATORY] = { .name = NL80211_MULTICAST_GROUP_REG }, [NL80211_MCGRP_MLME] = { .name = NL80211_MULTICAST_GROUP_MLME }, [NL80211_MCGRP_VENDOR] = { .name = NL80211_MULTICAST_GROUP_VENDOR }, [NL80211_MCGRP_NAN] = { .name = NL80211_MULTICAST_GROUP_NAN }, #ifdef CONFIG_NL80211_TESTMODE [NL80211_MCGRP_TESTMODE] = { .name = NL80211_MULTICAST_GROUP_TESTMODE } #endif }; /* returns ERR_PTR values */ static struct wireless_dev * __cfg80211_wdev_from_attrs(struct cfg80211_registered_device *rdev, struct net *netns, struct nlattr **attrs) { struct wireless_dev *result = NULL; bool have_ifidx = attrs[NL80211_ATTR_IFINDEX]; bool have_wdev_id = attrs[NL80211_ATTR_WDEV]; u64 wdev_id = 0; int wiphy_idx = -1; int ifidx = -1; if (!have_ifidx && !have_wdev_id) return ERR_PTR(-EINVAL); if (have_ifidx) ifidx = nla_get_u32(attrs[NL80211_ATTR_IFINDEX]); if (have_wdev_id) { wdev_id = nla_get_u64(attrs[NL80211_ATTR_WDEV]); wiphy_idx = wdev_id >> 32; } if (rdev) { struct wireless_dev *wdev; lockdep_assert_held(&rdev->wiphy.mtx); list_for_each_entry(wdev, &rdev->wiphy.wdev_list, list) { if (have_ifidx && wdev->netdev && wdev->netdev->ifindex == ifidx) { result = wdev; break; } if (have_wdev_id && wdev->identifier == (u32)wdev_id) { result = wdev; break; } } return result ?: ERR_PTR(-ENODEV); } ASSERT_RTNL(); list_for_each_entry(rdev, &cfg80211_rdev_list, list) { struct wireless_dev *wdev; if (wiphy_net(&rdev->wiphy) != netns) continue; if (have_wdev_id && rdev->wiphy_idx != wiphy_idx) continue; list_for_each_entry(wdev, &rdev->wiphy.wdev_list, list) { if (have_ifidx && wdev->netdev && wdev->netdev->ifindex == ifidx) { result = wdev; break; } if (have_wdev_id && wdev->identifier == (u32)wdev_id) { result = wdev; break; } } if (result) break; } if (result) return result; return ERR_PTR(-ENODEV); } static struct cfg80211_registered_device * __cfg80211_rdev_from_attrs(struct net *netns, struct nlattr **attrs) { struct cfg80211_registered_device *rdev = NULL, *tmp; struct net_device *netdev; ASSERT_RTNL(); if (!attrs[NL80211_ATTR_WIPHY] && !attrs[NL80211_ATTR_IFINDEX] && !attrs[NL80211_ATTR_WDEV]) return ERR_PTR(-EINVAL); if (attrs[NL80211_ATTR_WIPHY]) rdev = cfg80211_rdev_by_wiphy_idx( nla_get_u32(attrs[NL80211_ATTR_WIPHY])); if (attrs[NL80211_ATTR_WDEV]) { u64 wdev_id = nla_get_u64(attrs[NL80211_ATTR_WDEV]); struct wireless_dev *wdev; bool found = false; tmp = cfg80211_rdev_by_wiphy_idx(wdev_id >> 32); if (tmp) { /* make sure wdev exists */ list_for_each_entry(wdev, &tmp->wiphy.wdev_list, list) { if (wdev->identifier != (u32)wdev_id) continue; found = true; break; } if (!found) tmp = NULL; if (rdev && tmp != rdev) return ERR_PTR(-EINVAL); rdev = tmp; } } if (attrs[NL80211_ATTR_IFINDEX]) { int ifindex = nla_get_u32(attrs[NL80211_ATTR_IFINDEX]); netdev = __dev_get_by_index(netns, ifindex); if (netdev) { if (netdev->ieee80211_ptr) tmp = wiphy_to_rdev( netdev->ieee80211_ptr->wiphy); else tmp = NULL; /* not wireless device -- return error */ if (!tmp) return ERR_PTR(-EINVAL); /* mismatch -- return error */ if (rdev && tmp != rdev) return ERR_PTR(-EINVAL); rdev = tmp; } } if (!rdev) return ERR_PTR(-ENODEV); if (netns != wiphy_net(&rdev->wiphy)) return ERR_PTR(-ENODEV); return rdev; } /* * This function returns a pointer to the driver * that the genl_info item that is passed refers to. * * The result of this can be a PTR_ERR and hence must * be checked with IS_ERR() for errors. */ static struct cfg80211_registered_device * cfg80211_get_dev_from_info(struct net *netns, struct genl_info *info) { return __cfg80211_rdev_from_attrs(netns, info->attrs); } static int validate_beacon_head(const struct nlattr *attr, struct netlink_ext_ack *extack) { const u8 *data = nla_data(attr); unsigned int len = nla_len(attr); const struct element *elem; const struct ieee80211_mgmt *mgmt = (void *)data; unsigned int fixedlen, hdrlen; bool s1g_bcn; if (len < offsetofend(typeof(*mgmt), frame_control)) goto err; s1g_bcn = ieee80211_is_s1g_beacon(mgmt->frame_control); if (s1g_bcn) { fixedlen = offsetof(struct ieee80211_ext, u.s1g_beacon.variable); hdrlen = offsetof(struct ieee80211_ext, u.s1g_beacon); } else { fixedlen = offsetof(struct ieee80211_mgmt, u.beacon.variable); hdrlen = offsetof(struct ieee80211_mgmt, u.beacon); } if (len < fixedlen) goto err; if (ieee80211_hdrlen(mgmt->frame_control) != hdrlen) goto err; data += fixedlen; len -= fixedlen; for_each_element(elem, data, len) { /* nothing */ } if (for_each_element_completed(elem, data, len)) return 0; err: NL_SET_ERR_MSG_ATTR(extack, attr, "malformed beacon head"); return -EINVAL; } static int validate_ie_attr(const struct nlattr *attr, struct netlink_ext_ack *extack) { const u8 *data = nla_data(attr); unsigned int len = nla_len(attr); const struct element *elem; for_each_element(elem, data, len) { /* nothing */ } if (for_each_element_completed(elem, data, len)) return 0; NL_SET_ERR_MSG_ATTR(extack, attr, "malformed information elements"); return -EINVAL; } static int validate_he_capa(const struct nlattr *attr, struct netlink_ext_ack *extack) { if (!ieee80211_he_capa_size_ok(nla_data(attr), nla_len(attr))) return -EINVAL; return 0; } /* policy for the attributes */ static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR]; static const struct nla_policy nl80211_ftm_responder_policy[NL80211_FTM_RESP_ATTR_MAX + 1] = { [NL80211_FTM_RESP_ATTR_ENABLED] = { .type = NLA_FLAG, }, [NL80211_FTM_RESP_ATTR_LCI] = { .type = NLA_BINARY, .len = U8_MAX }, [NL80211_FTM_RESP_ATTR_CIVICLOC] = { .type = NLA_BINARY, .len = U8_MAX }, }; static const struct nla_policy nl80211_pmsr_ftm_req_attr_policy[NL80211_PMSR_FTM_REQ_ATTR_MAX + 1] = { [NL80211_PMSR_FTM_REQ_ATTR_ASAP] = { .type = NLA_FLAG }, [NL80211_PMSR_FTM_REQ_ATTR_PREAMBLE] = { .type = NLA_U32 }, [NL80211_PMSR_FTM_REQ_ATTR_NUM_BURSTS_EXP] = NLA_POLICY_MAX(NLA_U8, 15), [NL80211_PMSR_FTM_REQ_ATTR_BURST_PERIOD] = { .type = NLA_U16 }, [NL80211_PMSR_FTM_REQ_ATTR_BURST_DURATION] = NLA_POLICY_MAX(NLA_U8, 15), [NL80211_PMSR_FTM_REQ_ATTR_FTMS_PER_BURST] = NLA_POLICY_MAX(NLA_U8, 31), [NL80211_PMSR_FTM_REQ_ATTR_NUM_FTMR_RETRIES] = { .type = NLA_U8 }, [NL80211_PMSR_FTM_REQ_ATTR_REQUEST_LCI] = { .type = NLA_FLAG }, [NL80211_PMSR_FTM_REQ_ATTR_REQUEST_CIVICLOC] = { .type = NLA_FLAG }, [NL80211_PMSR_FTM_REQ_ATTR_TRIGGER_BASED] = { .type = NLA_FLAG }, [NL80211_PMSR_FTM_REQ_ATTR_NON_TRIGGER_BASED] = { .type = NLA_FLAG }, [NL80211_PMSR_FTM_REQ_ATTR_LMR_FEEDBACK] = { .type = NLA_FLAG }, [NL80211_PMSR_FTM_REQ_ATTR_BSS_COLOR] = { .type = NLA_U8 }, }; static const struct nla_policy nl80211_pmsr_req_data_policy[NL80211_PMSR_TYPE_MAX + 1] = { [NL80211_PMSR_TYPE_FTM] = NLA_POLICY_NESTED(nl80211_pmsr_ftm_req_attr_policy), }; static const struct nla_policy nl80211_pmsr_req_attr_policy[NL80211_PMSR_REQ_ATTR_MAX + 1] = { [NL80211_PMSR_REQ_ATTR_DATA] = NLA_POLICY_NESTED(nl80211_pmsr_req_data_policy), [NL80211_PMSR_REQ_ATTR_GET_AP_TSF] = { .type = NLA_FLAG }, }; static const struct nla_policy nl80211_pmsr_peer_attr_policy[NL80211_PMSR_PEER_ATTR_MAX + 1] = { [NL80211_PMSR_PEER_ATTR_ADDR] = NLA_POLICY_ETH_ADDR, [NL80211_PMSR_PEER_ATTR_CHAN] = NLA_POLICY_NESTED(nl80211_policy), [NL80211_PMSR_PEER_ATTR_REQ] = NLA_POLICY_NESTED(nl80211_pmsr_req_attr_policy), [NL80211_PMSR_PEER_ATTR_RESP] = { .type = NLA_REJECT }, }; static const struct nla_policy nl80211_pmsr_attr_policy[NL80211_PMSR_ATTR_MAX + 1] = { [NL80211_PMSR_ATTR_MAX_PEERS] = { .type = NLA_REJECT }, [NL80211_PMSR_ATTR_REPORT_AP_TSF] = { .type = NLA_REJECT }, [NL80211_PMSR_ATTR_RANDOMIZE_MAC_ADDR] = { .type = NLA_REJECT }, [NL80211_PMSR_ATTR_TYPE_CAPA] = { .type = NLA_REJECT }, [NL80211_PMSR_ATTR_PEERS] = NLA_POLICY_NESTED_ARRAY(nl80211_pmsr_peer_attr_policy), }; static const struct nla_policy he_obss_pd_policy[NL80211_HE_OBSS_PD_ATTR_MAX + 1] = { [NL80211_HE_OBSS_PD_ATTR_MIN_OFFSET] = NLA_POLICY_RANGE(NLA_U8, 1, 20), [NL80211_HE_OBSS_PD_ATTR_MAX_OFFSET] = NLA_POLICY_RANGE(NLA_U8, 1, 20), [NL80211_HE_OBSS_PD_ATTR_NON_SRG_MAX_OFFSET] = NLA_POLICY_RANGE(NLA_U8, 1, 20), [NL80211_HE_OBSS_PD_ATTR_BSS_COLOR_BITMAP] = NLA_POLICY_EXACT_LEN(8), [NL80211_HE_OBSS_PD_ATTR_PARTIAL_BSSID_BITMAP] = NLA_POLICY_EXACT_LEN(8), [NL80211_HE_OBSS_PD_ATTR_SR_CTRL] = { .type = NLA_U8 }, }; static const struct nla_policy he_bss_color_policy[NL80211_HE_BSS_COLOR_ATTR_MAX + 1] = { [NL80211_HE_BSS_COLOR_ATTR_COLOR] = NLA_POLICY_RANGE(NLA_U8, 1, 63), [NL80211_HE_BSS_COLOR_ATTR_DISABLED] = { .type = NLA_FLAG }, [NL80211_HE_BSS_COLOR_ATTR_PARTIAL] = { .type = NLA_FLAG }, }; static const struct nla_policy nl80211_txattr_policy[NL80211_TXRATE_MAX + 1] = { [NL80211_TXRATE_LEGACY] = { .type = NLA_BINARY, .len = NL80211_MAX_SUPP_RATES }, [NL80211_TXRATE_HT] = { .type = NLA_BINARY, .len = NL80211_MAX_SUPP_HT_RATES }, [NL80211_TXRATE_VHT] = NLA_POLICY_EXACT_LEN_WARN(sizeof(struct nl80211_txrate_vht)), [NL80211_TXRATE_GI] = { .type = NLA_U8 }, [NL80211_TXRATE_HE] = NLA_POLICY_EXACT_LEN(sizeof(struct nl80211_txrate_he)), [NL80211_TXRATE_HE_GI] = NLA_POLICY_RANGE(NLA_U8, NL80211_RATE_INFO_HE_GI_0_8, NL80211_RATE_INFO_HE_GI_3_2), [NL80211_TXRATE_HE_LTF] = NLA_POLICY_RANGE(NLA_U8, NL80211_RATE_INFO_HE_1XLTF, NL80211_RATE_INFO_HE_4XLTF), }; static const struct nla_policy nl80211_tid_config_attr_policy[NL80211_TID_CONFIG_ATTR_MAX + 1] = { [NL80211_TID_CONFIG_ATTR_VIF_SUPP] = { .type = NLA_U64 }, [NL80211_TID_CONFIG_ATTR_PEER_SUPP] = { .type = NLA_U64 }, [NL80211_TID_CONFIG_ATTR_OVERRIDE] = { .type = NLA_FLAG }, [NL80211_TID_CONFIG_ATTR_TIDS] = NLA_POLICY_RANGE(NLA_U16, 1, 0xff), [NL80211_TID_CONFIG_ATTR_NOACK] = NLA_POLICY_MAX(NLA_U8, NL80211_TID_CONFIG_DISABLE), [NL80211_TID_CONFIG_ATTR_RETRY_SHORT] = NLA_POLICY_MIN(NLA_U8, 1), [NL80211_TID_CONFIG_ATTR_RETRY_LONG] = NLA_POLICY_MIN(NLA_U8, 1), [NL80211_TID_CONFIG_ATTR_AMPDU_CTRL] = NLA_POLICY_MAX(NLA_U8, NL80211_TID_CONFIG_DISABLE), [NL80211_TID_CONFIG_ATTR_RTSCTS_CTRL] = NLA_POLICY_MAX(NLA_U8, NL80211_TID_CONFIG_DISABLE), [NL80211_TID_CONFIG_ATTR_AMSDU_CTRL] = NLA_POLICY_MAX(NLA_U8, NL80211_TID_CONFIG_DISABLE), [NL80211_TID_CONFIG_ATTR_TX_RATE_TYPE] = NLA_POLICY_MAX(NLA_U8, NL80211_TX_RATE_FIXED), [NL80211_TID_CONFIG_ATTR_TX_RATE] = NLA_POLICY_NESTED(nl80211_txattr_policy), }; static const struct nla_policy nl80211_fils_discovery_policy[NL80211_FILS_DISCOVERY_ATTR_MAX + 1] = { [NL80211_FILS_DISCOVERY_ATTR_INT_MIN] = NLA_POLICY_MAX(NLA_U32, 10000), [NL80211_FILS_DISCOVERY_ATTR_INT_MAX] = NLA_POLICY_MAX(NLA_U32, 10000), [NL80211_FILS_DISCOVERY_ATTR_TMPL] = NLA_POLICY_RANGE(NLA_BINARY, NL80211_FILS_DISCOVERY_TMPL_MIN_LEN, IEEE80211_MAX_DATA_LEN), }; static const struct nla_policy nl80211_unsol_bcast_probe_resp_policy[NL80211_UNSOL_BCAST_PROBE_RESP_ATTR_MAX + 1] = { [NL80211_UNSOL_BCAST_PROBE_RESP_ATTR_INT] = NLA_POLICY_MAX(NLA_U32, 20), [NL80211_UNSOL_BCAST_PROBE_RESP_ATTR_TMPL] = { .type = NLA_BINARY, .len = IEEE80211_MAX_DATA_LEN } }; static const struct nla_policy sar_specs_policy[NL80211_SAR_ATTR_SPECS_MAX + 1] = { [NL80211_SAR_ATTR_SPECS_POWER] = { .type = NLA_S32 }, [NL80211_SAR_ATTR_SPECS_RANGE_INDEX] = {.type = NLA_U32 }, }; static const struct nla_policy sar_policy[NL80211_SAR_ATTR_MAX + 1] = { [NL80211_SAR_ATTR_TYPE] = NLA_POLICY_MAX(NLA_U32, NUM_NL80211_SAR_TYPE), [NL80211_SAR_ATTR_SPECS] = NLA_POLICY_NESTED_ARRAY(sar_specs_policy), }; static const struct nla_policy nl80211_mbssid_config_policy[NL80211_MBSSID_CONFIG_ATTR_MAX + 1] = { [NL80211_MBSSID_CONFIG_ATTR_MAX_INTERFACES] = NLA_POLICY_MIN(NLA_U8, 2), [NL80211_MBSSID_CONFIG_ATTR_MAX_EMA_PROFILE_PERIODICITY] = NLA_POLICY_MIN(NLA_U8, 1), [NL80211_MBSSID_CONFIG_ATTR_INDEX] = { .type = NLA_U8 }, [NL80211_MBSSID_CONFIG_ATTR_TX_IFINDEX] = { .type = NLA_U32 }, [NL80211_MBSSID_CONFIG_ATTR_EMA] = { .type = NLA_FLAG }, }; static const struct nla_policy nl80211_sta_wme_policy[NL80211_STA_WME_MAX + 1] = { [NL80211_STA_WME_UAPSD_QUEUES] = { .type = NLA_U8 }, [NL80211_STA_WME_MAX_SP] = { .type = NLA_U8 }, }; static struct netlink_range_validation nl80211_punct_bitmap_range = { .min = 0, .max = 0xffff, }; static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = { [0] = { .strict_start_type = NL80211_ATTR_HE_OBSS_PD }, [NL80211_ATTR_WIPHY] = { .type = NLA_U32 }, [NL80211_ATTR_WIPHY_NAME] = { .type = NLA_NUL_STRING, .len = 20-1 }, [NL80211_ATTR_WIPHY_TXQ_PARAMS] = { .type = NLA_NESTED }, [NL80211_ATTR_WIPHY_FREQ] = { .type = NLA_U32 }, [NL80211_ATTR_WIPHY_CHANNEL_TYPE] = { .type = NLA_U32 }, [NL80211_ATTR_WIPHY_EDMG_CHANNELS] = NLA_POLICY_RANGE(NLA_U8, NL80211_EDMG_CHANNELS_MIN, NL80211_EDMG_CHANNELS_MAX), [NL80211_ATTR_WIPHY_EDMG_BW_CONFIG] = NLA_POLICY_RANGE(NLA_U8, NL80211_EDMG_BW_CONFIG_MIN, NL80211_EDMG_BW_CONFIG_MAX), [NL80211_ATTR_CHANNEL_WIDTH] = { .type = NLA_U32 }, [NL80211_ATTR_CENTER_FREQ1] = { .type = NLA_U32 }, [NL80211_ATTR_CENTER_FREQ1_OFFSET] = NLA_POLICY_RANGE(NLA_U32, 0, 999), [NL80211_ATTR_CENTER_FREQ2] = { .type = NLA_U32 }, [NL80211_ATTR_WIPHY_RETRY_SHORT] = NLA_POLICY_MIN(NLA_U8, 1), [NL80211_ATTR_WIPHY_RETRY_LONG] = NLA_POLICY_MIN(NLA_U8, 1), [NL80211_ATTR_WIPHY_FRAG_THRESHOLD] = { .type = NLA_U32 }, [NL80211_ATTR_WIPHY_RTS_THRESHOLD] = { .type = NLA_U32 }, [NL80211_ATTR_WIPHY_COVERAGE_CLASS] = { .type = NLA_U8 }, [NL80211_ATTR_WIPHY_DYN_ACK] = { .type = NLA_FLAG }, [NL80211_ATTR_IFTYPE] = NLA_POLICY_MAX(NLA_U32, NL80211_IFTYPE_MAX), [NL80211_ATTR_IFINDEX] = { .type = NLA_U32 }, [NL80211_ATTR_IFNAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ-1 }, [NL80211_ATTR_MAC] = NLA_POLICY_EXACT_LEN_WARN(ETH_ALEN), [NL80211_ATTR_PREV_BSSID] = NLA_POLICY_EXACT_LEN_WARN(ETH_ALEN), [NL80211_ATTR_KEY] = { .type = NLA_NESTED, }, [NL80211_ATTR_KEY_DATA] = { .type = NLA_BINARY, .len = WLAN_MAX_KEY_LEN }, [NL80211_ATTR_KEY_IDX] = NLA_POLICY_MAX(NLA_U8, 7), [NL80211_ATTR_KEY_CIPHER] = { .type = NLA_U32 }, [NL80211_ATTR_KEY_DEFAULT] = { .type = NLA_FLAG }, [NL80211_ATTR_KEY_SEQ] = { .type = NLA_BINARY, .len = 16 }, [NL80211_ATTR_KEY_TYPE] = NLA_POLICY_MAX(NLA_U32, NUM_NL80211_KEYTYPES), [NL80211_ATTR_BEACON_INTERVAL] = { .type = NLA_U32 }, [NL80211_ATTR_DTIM_PERIOD] = { .type = NLA_U32 }, [NL80211_ATTR_BEACON_HEAD] = NLA_POLICY_VALIDATE_FN(NLA_BINARY, validate_beacon_head, IEEE80211_MAX_DATA_LEN), [NL80211_ATTR_BEACON_TAIL] = NLA_POLICY_VALIDATE_FN(NLA_BINARY, validate_ie_attr, IEEE80211_MAX_DATA_LEN), [NL80211_ATTR_STA_AID] = NLA_POLICY_RANGE(NLA_U16, 1, IEEE80211_MAX_AID), [NL80211_ATTR_STA_FLAGS] = { .type = NLA_NESTED }, [NL80211_ATTR_STA_LISTEN_INTERVAL] = { .type = NLA_U16 }, [NL80211_ATTR_STA_SUPPORTED_RATES] = { .type = NLA_BINARY, .len = NL80211_MAX_SUPP_RATES }, [NL80211_ATTR_STA_PLINK_ACTION] = NLA_POLICY_MAX(NLA_U8, NUM_NL80211_PLINK_ACTIONS - 1), [NL80211_ATTR_STA_TX_POWER_SETTING] = NLA_POLICY_RANGE(NLA_U8, NL80211_TX_POWER_AUTOMATIC, NL80211_TX_POWER_FIXED), [NL80211_ATTR_STA_TX_POWER] = { .type = NLA_S16 }, [NL80211_ATTR_STA_VLAN] = { .type = NLA_U32 }, [NL80211_ATTR_MNTR_FLAGS] = { /* NLA_NESTED can't be empty */ }, [NL80211_ATTR_MESH_ID] = { .type = NLA_BINARY, .len = IEEE80211_MAX_MESH_ID_LEN }, [NL80211_ATTR_MPATH_NEXT_HOP] = NLA_POLICY_ETH_ADDR_COMPAT, /* allow 3 for NUL-termination, we used to declare this NLA_STRING */ [NL80211_ATTR_REG_ALPHA2] = NLA_POLICY_RANGE(NLA_BINARY, 2, 3), [NL80211_ATTR_REG_RULES] = { .type = NLA_NESTED }, [NL80211_ATTR_BSS_CTS_PROT] = { .type = NLA_U8 }, [NL80211_ATTR_BSS_SHORT_PREAMBLE] = { .type = NLA_U8 }, [NL80211_ATTR_BSS_SHORT_SLOT_TIME] = { .type = NLA_U8 }, [NL80211_ATTR_BSS_BASIC_RATES] = { .type = NLA_BINARY, .len = NL80211_MAX_SUPP_RATES }, [NL80211_ATTR_BSS_HT_OPMODE] = { .type = NLA_U16 }, [NL80211_ATTR_MESH_CONFIG] = { .type = NLA_NESTED }, [NL80211_ATTR_SUPPORT_MESH_AUTH] = { .type = NLA_FLAG }, [NL80211_ATTR_HT_CAPABILITY] = NLA_POLICY_EXACT_LEN_WARN(NL80211_HT_CAPABILITY_LEN), [NL80211_ATTR_MGMT_SUBTYPE] = { .type = NLA_U8 }, [NL80211_ATTR_IE] = NLA_POLICY_VALIDATE_FN(NLA_BINARY, validate_ie_attr, IEEE80211_MAX_DATA_LEN), [NL80211_ATTR_SCAN_FREQUENCIES] = { .type = NLA_NESTED }, [NL80211_ATTR_SCAN_SSIDS] = { .type = NLA_NESTED }, [NL80211_ATTR_SSID] = { .type = NLA_BINARY, .len = IEEE80211_MAX_SSID_LEN }, [NL80211_ATTR_AUTH_TYPE] = { .type = NLA_U32 }, [NL80211_ATTR_REASON_CODE] = { .type = NLA_U16 }, [NL80211_ATTR_FREQ_FIXED] = { .type = NLA_FLAG }, [NL80211_ATTR_TIMED_OUT] = { .type = NLA_FLAG }, [NL80211_ATTR_USE_MFP] = NLA_POLICY_RANGE(NLA_U32, NL80211_MFP_NO, NL80211_MFP_OPTIONAL), [NL80211_ATTR_STA_FLAGS2] = NLA_POLICY_EXACT_LEN_WARN(sizeof(struct nl80211_sta_flag_update)), [NL80211_ATTR_CONTROL_PORT] = { .type = NLA_FLAG }, [NL80211_ATTR_CONTROL_PORT_ETHERTYPE] = { .type = NLA_U16 }, [NL80211_ATTR_CONTROL_PORT_NO_ENCRYPT] = { .type = NLA_FLAG }, [NL80211_ATTR_CONTROL_PORT_OVER_NL80211] = { .type = NLA_FLAG }, [NL80211_ATTR_PRIVACY] = { .type = NLA_FLAG }, [NL80211_ATTR_STATUS_CODE] = { .type = NLA_U16 }, [NL80211_ATTR_CIPHER_SUITE_GROUP] = { .type = NLA_U32 }, [NL80211_ATTR_WPA_VERSIONS] = { .type = NLA_U32 }, [NL80211_ATTR_PID] = { .type = NLA_U32 }, [NL80211_ATTR_4ADDR] = { .type = NLA_U8 }, [NL80211_ATTR_PMKID] = NLA_POLICY_EXACT_LEN_WARN(WLAN_PMKID_LEN), [NL80211_ATTR_DURATION] = { .type = NLA_U32 }, [NL80211_ATTR_COOKIE] = { .type = NLA_U64 }, [NL80211_ATTR_TX_RATES] = { .type = NLA_NESTED }, [NL80211_ATTR_FRAME] = { .type = NLA_BINARY, .len = IEEE80211_MAX_DATA_LEN }, [NL80211_ATTR_FRAME_MATCH] = { .type = NLA_BINARY, }, [NL80211_ATTR_PS_STATE] = NLA_POLICY_RANGE(NLA_U32, NL80211_PS_DISABLED, NL80211_PS_ENABLED), [NL80211_ATTR_CQM] = { .type = NLA_NESTED, }, [NL80211_ATTR_LOCAL_STATE_CHANGE] = { .type = NLA_FLAG }, [NL80211_ATTR_AP_ISOLATE] = { .type = NLA_U8 }, [NL80211_ATTR_WIPHY_TX_POWER_SETTING] = { .type = NLA_U32 }, [NL80211_ATTR_WIPHY_TX_POWER_LEVEL] = { .type = NLA_U32 }, [NL80211_ATTR_FRAME_TYPE] = { .type = NLA_U16 }, [NL80211_ATTR_WIPHY_ANTENNA_TX] = { .type = NLA_U32 }, [NL80211_ATTR_WIPHY_ANTENNA_RX] = { .type = NLA_U32 }, [NL80211_ATTR_MCAST_RATE] = { .type = NLA_U32 }, [NL80211_ATTR_OFFCHANNEL_TX_OK] = { .type = NLA_FLAG }, [NL80211_ATTR_KEY_DEFAULT_TYPES] = { .type = NLA_NESTED }, [NL80211_ATTR_WOWLAN_TRIGGERS] = { .type = NLA_NESTED }, [NL80211_ATTR_STA_PLINK_STATE] = NLA_POLICY_MAX(NLA_U8, NUM_NL80211_PLINK_STATES - 1), [NL80211_ATTR_MEASUREMENT_DURATION] = { .type = NLA_U16 }, [NL80211_ATTR_MEASUREMENT_DURATION_MANDATORY] = { .type = NLA_FLAG }, [NL80211_ATTR_MESH_PEER_AID] = NLA_POLICY_RANGE(NLA_U16, 1, IEEE80211_MAX_AID), [NL80211_ATTR_SCHED_SCAN_INTERVAL] = { .type = NLA_U32 }, [NL80211_ATTR_REKEY_DATA] = { .type = NLA_NESTED }, [NL80211_ATTR_SCAN_SUPP_RATES] = { .type = NLA_NESTED }, [NL80211_ATTR_HIDDEN_SSID] = NLA_POLICY_RANGE(NLA_U32, NL80211_HIDDEN_SSID_NOT_IN_USE, NL80211_HIDDEN_SSID_ZERO_CONTENTS), [NL80211_ATTR_IE_PROBE_RESP] = NLA_POLICY_VALIDATE_FN(NLA_BINARY, validate_ie_attr, IEEE80211_MAX_DATA_LEN), [NL80211_ATTR_IE_ASSOC_RESP] = NLA_POLICY_VALIDATE_FN(NLA_BINARY, validate_ie_attr, IEEE80211_MAX_DATA_LEN), [NL80211_ATTR_ROAM_SUPPORT] = { .type = NLA_FLAG }, [NL80211_ATTR_STA_WME] = NLA_POLICY_NESTED(nl80211_sta_wme_policy), [NL80211_ATTR_SCHED_SCAN_MATCH] = { .type = NLA_NESTED }, [NL80211_ATTR_TX_NO_CCK_RATE] = { .type = NLA_FLAG }, [NL80211_ATTR_TDLS_ACTION] = { .type = NLA_U8 }, [NL80211_ATTR_TDLS_DIALOG_TOKEN] = { .type = NLA_U8 }, [NL80211_ATTR_TDLS_OPERATION] = { .type = NLA_U8 }, [NL80211_ATTR_TDLS_SUPPORT] = { .type = NLA_FLAG }, [NL80211_ATTR_TDLS_EXTERNAL_SETUP] = { .type = NLA_FLAG }, [NL80211_ATTR_TDLS_INITIATOR] = { .type = NLA_FLAG }, [NL80211_ATTR_DONT_WAIT_FOR_ACK] = { .type = NLA_FLAG }, [NL80211_ATTR_PROBE_RESP] = { .type = NLA_BINARY, .len = IEEE80211_MAX_DATA_LEN }, [NL80211_ATTR_DFS_REGION] = { .type = NLA_U8 }, [NL80211_ATTR_DISABLE_HT] = { .type = NLA_FLAG }, [NL80211_ATTR_HT_CAPABILITY_MASK] = { .len = NL80211_HT_CAPABILITY_LEN }, [NL80211_ATTR_NOACK_MAP] = { .type = NLA_U16 }, [NL80211_ATTR_INACTIVITY_TIMEOUT] = { .type = NLA_U16 }, [NL80211_ATTR_BG_SCAN_PERIOD] = { .type = NLA_U16 }, [NL80211_ATTR_WDEV] = { .type = NLA_U64 }, [NL80211_ATTR_USER_REG_HINT_TYPE] = { .type = NLA_U32 }, /* need to include at least Auth Transaction and Status Code */ [NL80211_ATTR_AUTH_DATA] = NLA_POLICY_MIN_LEN(4), [NL80211_ATTR_VHT_CAPABILITY] = NLA_POLICY_EXACT_LEN_WARN(NL80211_VHT_CAPABILITY_LEN), [NL80211_ATTR_SCAN_FLAGS] = { .type = NLA_U32 }, [NL80211_ATTR_P2P_CTWINDOW] = NLA_POLICY_MAX(NLA_U8, 127), [NL80211_ATTR_P2P_OPPPS] = NLA_POLICY_MAX(NLA_U8, 1), [NL80211_ATTR_LOCAL_MESH_POWER_MODE] = NLA_POLICY_RANGE(NLA_U32, NL80211_MESH_POWER_UNKNOWN + 1, NL80211_MESH_POWER_MAX), [NL80211_ATTR_ACL_POLICY] = {. type = NLA_U32 }, [NL80211_ATTR_MAC_ADDRS] = { .type = NLA_NESTED }, [NL80211_ATTR_STA_CAPABILITY] = { .type = NLA_U16 }, [NL80211_ATTR_STA_EXT_CAPABILITY] = { .type = NLA_BINARY, }, [NL80211_ATTR_SPLIT_WIPHY_DUMP] = { .type = NLA_FLAG, }, [NL80211_ATTR_DISABLE_VHT] = { .type = NLA_FLAG }, [NL80211_ATTR_VHT_CAPABILITY_MASK] = { .len = NL80211_VHT_CAPABILITY_LEN, }, [NL80211_ATTR_MDID] = { .type = NLA_U16 }, [NL80211_ATTR_IE_RIC] = { .type = NLA_BINARY, .len = IEEE80211_MAX_DATA_LEN }, [NL80211_ATTR_CRIT_PROT_ID] = { .type = NLA_U16 }, [NL80211_ATTR_MAX_CRIT_PROT_DURATION] = NLA_POLICY_MAX(NLA_U16, NL80211_CRIT_PROTO_MAX_DURATION), [NL80211_ATTR_PEER_AID] = NLA_POLICY_RANGE(NLA_U16, 1, IEEE80211_MAX_AID), [NL80211_ATTR_CH_SWITCH_COUNT] = { .type = NLA_U32 }, [NL80211_ATTR_CH_SWITCH_BLOCK_TX] = { .type = NLA_FLAG }, [NL80211_ATTR_CSA_IES] = { .type = NLA_NESTED }, [NL80211_ATTR_CNTDWN_OFFS_BEACON] = { .type = NLA_BINARY }, [NL80211_ATTR_CNTDWN_OFFS_PRESP] = { .type = NLA_BINARY }, [NL80211_ATTR_STA_SUPPORTED_CHANNELS] = NLA_POLICY_MIN_LEN(2), /* * The value of the Length field of the Supported Operating * Classes element is between 2 and 253. */ [NL80211_ATTR_STA_SUPPORTED_OPER_CLASSES] = NLA_POLICY_RANGE(NLA_BINARY, 2, 253), [NL80211_ATTR_HANDLE_DFS] = { .type = NLA_FLAG }, [NL80211_ATTR_OPMODE_NOTIF] = { .type = NLA_U8 }, [NL80211_ATTR_VENDOR_ID] = { .type = NLA_U32 }, [NL80211_ATTR_VENDOR_SUBCMD] = { .type = NLA_U32 }, [NL80211_ATTR_VENDOR_DATA] = { .type = NLA_BINARY }, [NL80211_ATTR_QOS_MAP] = NLA_POLICY_RANGE(NLA_BINARY, IEEE80211_QOS_MAP_LEN_MIN, IEEE80211_QOS_MAP_LEN_MAX), [NL80211_ATTR_MAC_HINT] = NLA_POLICY_EXACT_LEN_WARN(ETH_ALEN), [NL80211_ATTR_WIPHY_FREQ_HINT] = { .type = NLA_U32 }, [NL80211_ATTR_TDLS_PEER_CAPABILITY] = { .type = NLA_U32 }, [NL80211_ATTR_SOCKET_OWNER] = { .type = NLA_FLAG }, [NL80211_ATTR_CSA_C_OFFSETS_TX] = { .type = NLA_BINARY }, [NL80211_ATTR_USE_RRM] = { .type = NLA_FLAG }, [NL80211_ATTR_TSID] = NLA_POLICY_MAX(NLA_U8, IEEE80211_NUM_TIDS - 1), [NL80211_ATTR_USER_PRIO] = NLA_POLICY_MAX(NLA_U8, IEEE80211_NUM_UPS - 1), [NL80211_ATTR_ADMITTED_TIME] = { .type = NLA_U16 }, [NL80211_ATTR_SMPS_MODE] = { .type = NLA_U8 }, [NL80211_ATTR_OPER_CLASS] = { .type = NLA_U8 }, [NL80211_ATTR_MAC_MASK] = NLA_POLICY_EXACT_LEN_WARN(ETH_ALEN), [NL80211_ATTR_WIPHY_SELF_MANAGED_REG] = { .type = NLA_FLAG }, [NL80211_ATTR_NETNS_FD] = { .type = NLA_U32 }, [NL80211_ATTR_SCHED_SCAN_DELAY] = { .type = NLA_U32 }, [NL80211_ATTR_REG_INDOOR] = { .type = NLA_FLAG }, [NL80211_ATTR_PBSS] = { .type = NLA_FLAG }, [NL80211_ATTR_BSS_SELECT] = { .type = NLA_NESTED }, [NL80211_ATTR_STA_SUPPORT_P2P_PS] = NLA_POLICY_MAX(NLA_U8, NUM_NL80211_P2P_PS_STATUS - 1), [NL80211_ATTR_MU_MIMO_GROUP_DATA] = { .len = VHT_MUMIMO_GROUPS_DATA_LEN }, [NL80211_ATTR_MU_MIMO_FOLLOW_MAC_ADDR] = NLA_POLICY_EXACT_LEN_WARN(ETH_ALEN), [NL80211_ATTR_NAN_MASTER_PREF] = NLA_POLICY_MIN(NLA_U8, 1), [NL80211_ATTR_BANDS] = { .type = NLA_U32 }, [NL80211_ATTR_NAN_FUNC] = { .type = NLA_NESTED }, [NL80211_ATTR_FILS_KEK] = { .type = NLA_BINARY, .len = FILS_MAX_KEK_LEN }, [NL80211_ATTR_FILS_NONCES] = NLA_POLICY_EXACT_LEN_WARN(2 * FILS_NONCE_LEN), [NL80211_ATTR_MULTICAST_TO_UNICAST_ENABLED] = { .type = NLA_FLAG, }, [NL80211_ATTR_BSSID] = NLA_POLICY_EXACT_LEN_WARN(ETH_ALEN), [NL80211_ATTR_SCHED_SCAN_RELATIVE_RSSI] = { .type = NLA_S8 }, [NL80211_ATTR_SCHED_SCAN_RSSI_ADJUST] = { .len = sizeof(struct nl80211_bss_select_rssi_adjust) }, [NL80211_ATTR_TIMEOUT_REASON] = { .type = NLA_U32 }, [NL80211_ATTR_FILS_ERP_USERNAME] = { .type = NLA_BINARY, .len = FILS_ERP_MAX_USERNAME_LEN }, [NL80211_ATTR_FILS_ERP_REALM] = { .type = NLA_BINARY, .len = FILS_ERP_MAX_REALM_LEN }, [NL80211_ATTR_FILS_ERP_NEXT_SEQ_NUM] = { .type = NLA_U16 }, [NL80211_ATTR_FILS_ERP_RRK] = { .type = NLA_BINARY, .len = FILS_ERP_MAX_RRK_LEN }, [NL80211_ATTR_FILS_CACHE_ID] = NLA_POLICY_EXACT_LEN_WARN(2), [NL80211_ATTR_PMK] = { .type = NLA_BINARY, .len = PMK_MAX_LEN }, [NL80211_ATTR_PMKR0_NAME] = NLA_POLICY_EXACT_LEN(WLAN_PMK_NAME_LEN), [NL80211_ATTR_SCHED_SCAN_MULTI] = { .type = NLA_FLAG }, [NL80211_ATTR_EXTERNAL_AUTH_SUPPORT] = { .type = NLA_FLAG }, [NL80211_ATTR_TXQ_LIMIT] = { .type = NLA_U32 }, [NL80211_ATTR_TXQ_MEMORY_LIMIT] = { .type = NLA_U32 }, [NL80211_ATTR_TXQ_QUANTUM] = { .type = NLA_U32 }, [NL80211_ATTR_HE_CAPABILITY] = NLA_POLICY_VALIDATE_FN(NLA_BINARY, validate_he_capa, NL80211_HE_MAX_CAPABILITY_LEN), [NL80211_ATTR_FTM_RESPONDER] = NLA_POLICY_NESTED(nl80211_ftm_responder_policy), [NL80211_ATTR_TIMEOUT] = NLA_POLICY_MIN(NLA_U32, 1), [NL80211_ATTR_PEER_MEASUREMENTS] = NLA_POLICY_NESTED(nl80211_pmsr_attr_policy), [NL80211_ATTR_AIRTIME_WEIGHT] = NLA_POLICY_MIN(NLA_U16, 1), [NL80211_ATTR_SAE_PASSWORD] = { .type = NLA_BINARY, .len = SAE_PASSWORD_MAX_LEN }, [NL80211_ATTR_TWT_RESPONDER] = { .type = NLA_FLAG }, [NL80211_ATTR_HE_OBSS_PD] = NLA_POLICY_NESTED(he_obss_pd_policy), [NL80211_ATTR_VLAN_ID] = NLA_POLICY_RANGE(NLA_U16, 1, VLAN_N_VID - 2), [NL80211_ATTR_HE_BSS_COLOR] = NLA_POLICY_NESTED(he_bss_color_policy), [NL80211_ATTR_TID_CONFIG] = NLA_POLICY_NESTED_ARRAY(nl80211_tid_config_attr_policy), [NL80211_ATTR_CONTROL_PORT_NO_PREAUTH] = { .type = NLA_FLAG }, [NL80211_ATTR_PMK_LIFETIME] = NLA_POLICY_MIN(NLA_U32, 1), [NL80211_ATTR_PMK_REAUTH_THRESHOLD] = NLA_POLICY_RANGE(NLA_U8, 1, 100), [NL80211_ATTR_RECEIVE_MULTICAST] = { .type = NLA_FLAG }, [NL80211_ATTR_WIPHY_FREQ_OFFSET] = NLA_POLICY_RANGE(NLA_U32, 0, 999), [NL80211_ATTR_SCAN_FREQ_KHZ] = { .type = NLA_NESTED }, [NL80211_ATTR_HE_6GHZ_CAPABILITY] = NLA_POLICY_EXACT_LEN(sizeof(struct ieee80211_he_6ghz_capa)), [NL80211_ATTR_FILS_DISCOVERY] = NLA_POLICY_NESTED(nl80211_fils_discovery_policy), [NL80211_ATTR_UNSOL_BCAST_PROBE_RESP] = NLA_POLICY_NESTED(nl80211_unsol_bcast_probe_resp_policy), [NL80211_ATTR_S1G_CAPABILITY] = NLA_POLICY_EXACT_LEN(IEEE80211_S1G_CAPABILITY_LEN), [NL80211_ATTR_S1G_CAPABILITY_MASK] = NLA_POLICY_EXACT_LEN(IEEE80211_S1G_CAPABILITY_LEN), [NL80211_ATTR_SAE_PWE] = NLA_POLICY_RANGE(NLA_U8, NL80211_SAE_PWE_HUNT_AND_PECK, NL80211_SAE_PWE_BOTH), [NL80211_ATTR_RECONNECT_REQUESTED] = { .type = NLA_REJECT }, [NL80211_ATTR_SAR_SPEC] = NLA_POLICY_NESTED(sar_policy), [NL80211_ATTR_DISABLE_HE] = { .type = NLA_FLAG }, [NL80211_ATTR_OBSS_COLOR_BITMAP] = { .type = NLA_U64 }, [NL80211_ATTR_COLOR_CHANGE_COUNT] = { .type = NLA_U8 }, [NL80211_ATTR_COLOR_CHANGE_COLOR] = { .type = NLA_U8 }, [NL80211_ATTR_COLOR_CHANGE_ELEMS] = NLA_POLICY_NESTED(nl80211_policy), [NL80211_ATTR_MBSSID_CONFIG] = NLA_POLICY_NESTED(nl80211_mbssid_config_policy), [NL80211_ATTR_MBSSID_ELEMS] = { .type = NLA_NESTED }, [NL80211_ATTR_RADAR_BACKGROUND] = { .type = NLA_FLAG }, [NL80211_ATTR_AP_SETTINGS_FLAGS] = { .type = NLA_U32 }, [NL80211_ATTR_EHT_CAPABILITY] = NLA_POLICY_RANGE(NLA_BINARY, NL80211_EHT_MIN_CAPABILITY_LEN, NL80211_EHT_MAX_CAPABILITY_LEN), [NL80211_ATTR_DISABLE_EHT] = { .type = NLA_FLAG }, [NL80211_ATTR_MLO_LINKS] = NLA_POLICY_NESTED_ARRAY(nl80211_policy), [NL80211_ATTR_MLO_LINK_ID] = NLA_POLICY_RANGE(NLA_U8, 0, IEEE80211_MLD_MAX_NUM_LINKS), [NL80211_ATTR_MLD_ADDR] = NLA_POLICY_EXACT_LEN(ETH_ALEN), [NL80211_ATTR_MLO_SUPPORT] = { .type = NLA_FLAG }, [NL80211_ATTR_MAX_NUM_AKM_SUITES] = { .type = NLA_REJECT }, [NL80211_ATTR_PUNCT_BITMAP] = NLA_POLICY_FULL_RANGE(NLA_U32, &nl80211_punct_bitmap_range), [NL80211_ATTR_MAX_HW_TIMESTAMP_PEERS] = { .type = NLA_U16 }, [NL80211_ATTR_HW_TIMESTAMP_ENABLED] = { .type = NLA_FLAG }, [NL80211_ATTR_EMA_RNR_ELEMS] = { .type = NLA_NESTED }, [NL80211_ATTR_MLO_LINK_DISABLED] = { .type = NLA_FLAG }, }; /* policy for the key attributes */ static const struct nla_policy nl80211_key_policy[NL80211_KEY_MAX + 1] = { [NL80211_KEY_DATA] = { .type = NLA_BINARY, .len = WLAN_MAX_KEY_LEN }, [NL80211_KEY_IDX] = { .type = NLA_U8 }, [NL80211_KEY_CIPHER] = { .type = NLA_U32 }, [NL80211_KEY_SEQ] = { .type = NLA_BINARY, .len = 16 }, [NL80211_KEY_DEFAULT] = { .type = NLA_FLAG }, [NL80211_KEY_DEFAULT_MGMT] = { .type = NLA_FLAG }, [NL80211_KEY_TYPE] = NLA_POLICY_MAX(NLA_U32, NUM_NL80211_KEYTYPES - 1), [NL80211_KEY_DEFAULT_TYPES] = { .type = NLA_NESTED }, [NL80211_KEY_MODE] = NLA_POLICY_RANGE(NLA_U8, 0, NL80211_KEY_SET_TX), }; /* policy for the key default flags */ static const struct nla_policy nl80211_key_default_policy[NUM_NL80211_KEY_DEFAULT_TYPES] = { [NL80211_KEY_DEFAULT_TYPE_UNICAST] = { .type = NLA_FLAG }, [NL80211_KEY_DEFAULT_TYPE_MULTICAST] = { .type = NLA_FLAG }, }; #ifdef CONFIG_PM /* policy for WoWLAN attributes */ static const struct nla_policy nl80211_wowlan_policy[NUM_NL80211_WOWLAN_TRIG] = { [NL80211_WOWLAN_TRIG_ANY] = { .type = NLA_FLAG }, [NL80211_WOWLAN_TRIG_DISCONNECT] = { .type = NLA_FLAG }, [NL80211_WOWLAN_TRIG_MAGIC_PKT] = { .type = NLA_FLAG }, [NL80211_WOWLAN_TRIG_PKT_PATTERN] = { .type = NLA_NESTED }, [NL80211_WOWLAN_TRIG_GTK_REKEY_FAILURE] = { .type = NLA_FLAG }, [NL80211_WOWLAN_TRIG_EAP_IDENT_REQUEST] = { .type = NLA_FLAG }, [NL80211_WOWLAN_TRIG_4WAY_HANDSHAKE] = { .type = NLA_FLAG }, [NL80211_WOWLAN_TRIG_RFKILL_RELEASE] = { .type = NLA_FLAG }, [NL80211_WOWLAN_TRIG_TCP_CONNECTION] = { .type = NLA_NESTED }, [NL80211_WOWLAN_TRIG_NET_DETECT] = { .type = NLA_NESTED }, }; static const struct nla_policy nl80211_wowlan_tcp_policy[NUM_NL80211_WOWLAN_TCP] = { [NL80211_WOWLAN_TCP_SRC_IPV4] = { .type = NLA_U32 }, [NL80211_WOWLAN_TCP_DST_IPV4] = { .type = NLA_U32 }, [NL80211_WOWLAN_TCP_DST_MAC] = NLA_POLICY_EXACT_LEN_WARN(ETH_ALEN), [NL80211_WOWLAN_TCP_SRC_PORT] = { .type = NLA_U16 }, [NL80211_WOWLAN_TCP_DST_PORT] = { .type = NLA_U16 }, [NL80211_WOWLAN_TCP_DATA_PAYLOAD] = NLA_POLICY_MIN_LEN(1), [NL80211_WOWLAN_TCP_DATA_PAYLOAD_SEQ] = { .len = sizeof(struct nl80211_wowlan_tcp_data_seq) }, [NL80211_WOWLAN_TCP_DATA_PAYLOAD_TOKEN] = { .len = sizeof(struct nl80211_wowlan_tcp_data_token) }, [NL80211_WOWLAN_TCP_DATA_INTERVAL] = { .type = NLA_U32 }, [NL80211_WOWLAN_TCP_WAKE_PAYLOAD] = NLA_POLICY_MIN_LEN(1), [NL80211_WOWLAN_TCP_WAKE_MASK] = NLA_POLICY_MIN_LEN(1), }; #endif /* CONFIG_PM */ /* policy for coalesce rule attributes */ static const struct nla_policy nl80211_coalesce_policy[NUM_NL80211_ATTR_COALESCE_RULE] = { [NL80211_ATTR_COALESCE_RULE_DELAY] = { .type = NLA_U32 }, [NL80211_ATTR_COALESCE_RULE_CONDITION] = NLA_POLICY_RANGE(NLA_U32, NL80211_COALESCE_CONDITION_MATCH, NL80211_COALESCE_CONDITION_NO_MATCH), [NL80211_ATTR_COALESCE_RULE_PKT_PATTERN] = { .type = NLA_NESTED }, }; /* policy for GTK rekey offload attributes */ static const struct nla_policy nl80211_rekey_policy[NUM_NL80211_REKEY_DATA] = { [NL80211_REKEY_DATA_KEK] = { .type = NLA_BINARY, .len = NL80211_KEK_EXT_LEN }, [NL80211_REKEY_DATA_KCK] = { .type = NLA_BINARY, .len = NL80211_KCK_EXT_LEN_32 }, [NL80211_REKEY_DATA_REPLAY_CTR] = NLA_POLICY_EXACT_LEN(NL80211_REPLAY_CTR_LEN), [NL80211_REKEY_DATA_AKM] = { .type = NLA_U32 }, }; static const struct nla_policy nl80211_match_band_rssi_policy[NUM_NL80211_BANDS] = { [NL80211_BAND_2GHZ] = { .type = NLA_S32 }, [NL80211_BAND_5GHZ] = { .type = NLA_S32 }, [NL80211_BAND_6GHZ] = { .type = NLA_S32 }, [NL80211_BAND_60GHZ] = { .type = NLA_S32 }, [NL80211_BAND_LC] = { .type = NLA_S32 }, }; static const struct nla_policy nl80211_match_policy[NL80211_SCHED_SCAN_MATCH_ATTR_MAX + 1] = { [NL80211_SCHED_SCAN_MATCH_ATTR_SSID] = { .type = NLA_BINARY, .len = IEEE80211_MAX_SSID_LEN }, [NL80211_SCHED_SCAN_MATCH_ATTR_BSSID] = NLA_POLICY_EXACT_LEN_WARN(ETH_ALEN), [NL80211_SCHED_SCAN_MATCH_ATTR_RSSI] = { .type = NLA_U32 }, [NL80211_SCHED_SCAN_MATCH_PER_BAND_RSSI] = NLA_POLICY_NESTED(nl80211_match_band_rssi_policy), }; static const struct nla_policy nl80211_plan_policy[NL80211_SCHED_SCAN_PLAN_MAX + 1] = { [NL80211_SCHED_SCAN_PLAN_INTERVAL] = { .type = NLA_U32 }, [NL80211_SCHED_SCAN_PLAN_ITERATIONS] = { .type = NLA_U32 }, }; static const struct nla_policy nl80211_bss_select_policy[NL80211_BSS_SELECT_ATTR_MAX + 1] = { [NL80211_BSS_SELECT_ATTR_RSSI] = { .type = NLA_FLAG }, [NL80211_BSS_SELECT_ATTR_BAND_PREF] = { .type = NLA_U32 }, [NL80211_BSS_SELECT_ATTR_RSSI_ADJUST] = { .len = sizeof(struct nl80211_bss_select_rssi_adjust) }, }; /* policy for NAN function attributes */ static const struct nla_policy nl80211_nan_func_policy[NL80211_NAN_FUNC_ATTR_MAX + 1] = { [NL80211_NAN_FUNC_TYPE] = NLA_POLICY_MAX(NLA_U8, NL80211_NAN_FUNC_MAX_TYPE), [NL80211_NAN_FUNC_SERVICE_ID] = { .len = NL80211_NAN_FUNC_SERVICE_ID_LEN }, [NL80211_NAN_FUNC_PUBLISH_TYPE] = { .type = NLA_U8 }, [NL80211_NAN_FUNC_PUBLISH_BCAST] = { .type = NLA_FLAG }, [NL80211_NAN_FUNC_SUBSCRIBE_ACTIVE] = { .type = NLA_FLAG }, [NL80211_NAN_FUNC_FOLLOW_UP_ID] = { .type = NLA_U8 }, [NL80211_NAN_FUNC_FOLLOW_UP_REQ_ID] = { .type = NLA_U8 }, [NL80211_NAN_FUNC_FOLLOW_UP_DEST] = NLA_POLICY_EXACT_LEN_WARN(ETH_ALEN), [NL80211_NAN_FUNC_CLOSE_RANGE] = { .type = NLA_FLAG }, [NL80211_NAN_FUNC_TTL] = { .type = NLA_U32 }, [NL80211_NAN_FUNC_SERVICE_INFO] = { .type = NLA_BINARY, .len = NL80211_NAN_FUNC_SERVICE_SPEC_INFO_MAX_LEN }, [NL80211_NAN_FUNC_SRF] = { .type = NLA_NESTED }, [NL80211_NAN_FUNC_RX_MATCH_FILTER] = { .type = NLA_NESTED }, [NL80211_NAN_FUNC_TX_MATCH_FILTER] = { .type = NLA_NESTED }, [NL80211_NAN_FUNC_INSTANCE_ID] = { .type = NLA_U8 }, [NL80211_NAN_FUNC_TERM_REASON] = { .type = NLA_U8 }, }; /* policy for Service Response Filter attributes */ static const struct nla_policy nl80211_nan_srf_policy[NL80211_NAN_SRF_ATTR_MAX + 1] = { [NL80211_NAN_SRF_INCLUDE] = { .type = NLA_FLAG }, [NL80211_NAN_SRF_BF] = { .type = NLA_BINARY, .len = NL80211_NAN_FUNC_SRF_MAX_LEN }, [NL80211_NAN_SRF_BF_IDX] = { .type = NLA_U8 }, [NL80211_NAN_SRF_MAC_ADDRS] = { .type = NLA_NESTED }, }; /* policy for packet pattern attributes */ static const struct nla_policy nl80211_packet_pattern_policy[MAX_NL80211_PKTPAT + 1] = { [NL80211_PKTPAT_MASK] = { .type = NLA_BINARY, }, [NL80211_PKTPAT_PATTERN] = { .type = NLA_BINARY, }, [NL80211_PKTPAT_OFFSET] = { .type = NLA_U32 }, }; static int nl80211_prepare_wdev_dump(struct netlink_callback *cb, struct cfg80211_registered_device **rdev, struct wireless_dev **wdev, struct nlattr **attrbuf) { int err; if (!cb->args[0]) { struct nlattr **attrbuf_free = NULL; if (!attrbuf) { attrbuf = kcalloc(NUM_NL80211_ATTR, sizeof(*attrbuf), GFP_KERNEL); if (!attrbuf) return -ENOMEM; attrbuf_free = attrbuf; } err = nlmsg_parse_deprecated(cb->nlh, GENL_HDRLEN + nl80211_fam.hdrsize, attrbuf, nl80211_fam.maxattr, nl80211_policy, NULL); if (err) { kfree(attrbuf_free); return err; } rtnl_lock(); *wdev = __cfg80211_wdev_from_attrs(NULL, sock_net(cb->skb->sk), attrbuf); kfree(attrbuf_free); if (IS_ERR(*wdev)) { rtnl_unlock(); return PTR_ERR(*wdev); } *rdev = wiphy_to_rdev((*wdev)->wiphy); mutex_lock(&(*rdev)->wiphy.mtx); rtnl_unlock(); /* 0 is the first index - add 1 to parse only once */ cb->args[0] = (*rdev)->wiphy_idx + 1; cb->args[1] = (*wdev)->identifier; } else { /* subtract the 1 again here */ struct wiphy *wiphy; struct wireless_dev *tmp; rtnl_lock(); wiphy = wiphy_idx_to_wiphy(cb->args[0] - 1); if (!wiphy) { rtnl_unlock(); return -ENODEV; } *rdev = wiphy_to_rdev(wiphy); *wdev = NULL; list_for_each_entry(tmp, &(*rdev)->wiphy.wdev_list, list) { if (tmp->identifier == cb->args[1]) { *wdev = tmp; break; } } if (!*wdev) { rtnl_unlock(); return -ENODEV; } mutex_lock(&(*rdev)->wiphy.mtx); rtnl_unlock(); } return 0; } /* message building helper */ void *nl80211hdr_put(struct sk_buff *skb, u32 portid, u32 seq, int flags, u8 cmd) { /* since there is no private header just add the generic one */ return genlmsg_put(skb, portid, seq, &nl80211_fam, flags, cmd); } static int nl80211_msg_put_wmm_rules(struct sk_buff *msg, const struct ieee80211_reg_rule *rule) { int j; struct nlattr *nl_wmm_rules = nla_nest_start_noflag(msg, NL80211_FREQUENCY_ATTR_WMM); if (!nl_wmm_rules) goto nla_put_failure; for (j = 0; j < IEEE80211_NUM_ACS; j++) { struct nlattr *nl_wmm_rule = nla_nest_start_noflag(msg, j); if (!nl_wmm_rule) goto nla_put_failure; if (nla_put_u16(msg, NL80211_WMMR_CW_MIN, rule->wmm_rule.client[j].cw_min) || nla_put_u16(msg, NL80211_WMMR_CW_MAX, rule->wmm_rule.client[j].cw_max) || nla_put_u8(msg, NL80211_WMMR_AIFSN, rule->wmm_rule.client[j].aifsn) || nla_put_u16(msg, NL80211_WMMR_TXOP, rule->wmm_rule.client[j].cot)) goto nla_put_failure; nla_nest_end(msg, nl_wmm_rule); } nla_nest_end(msg, nl_wmm_rules); return 0; nla_put_failure: return -ENOBUFS; } static int nl80211_msg_put_channel(struct sk_buff *msg, struct wiphy *wiphy, struct ieee80211_channel *chan, bool large) { /* Some channels must be completely excluded from the * list to protect old user-space tools from breaking */ if (!large && chan->flags & (IEEE80211_CHAN_NO_10MHZ | IEEE80211_CHAN_NO_20MHZ)) return 0; if (!large && chan->freq_offset) return 0; if (nla_put_u32(msg, NL80211_FREQUENCY_ATTR_FREQ, chan->center_freq)) goto nla_put_failure; if (nla_put_u32(msg, NL80211_FREQUENCY_ATTR_OFFSET, chan->freq_offset)) goto nla_put_failure; if ((chan->flags & IEEE80211_CHAN_DISABLED) && nla_put_flag(msg, NL80211_FREQUENCY_ATTR_DISABLED)) goto nla_put_failure; if (chan->flags & IEEE80211_CHAN_NO_IR) { if (nla_put_flag(msg, NL80211_FREQUENCY_ATTR_NO_IR)) goto nla_put_failure; if (nla_put_flag(msg, __NL80211_FREQUENCY_ATTR_NO_IBSS)) goto nla_put_failure; } if (chan->flags & IEEE80211_CHAN_RADAR) { if (nla_put_flag(msg, NL80211_FREQUENCY_ATTR_RADAR)) goto nla_put_failure; if (large) { u32 time; time = elapsed_jiffies_msecs(chan->dfs_state_entered); if (nla_put_u32(msg, NL80211_FREQUENCY_ATTR_DFS_STATE, chan->dfs_state)) goto nla_put_failure; if (nla_put_u32(msg, NL80211_FREQUENCY_ATTR_DFS_TIME, time)) goto nla_put_failure; if (nla_put_u32(msg, NL80211_FREQUENCY_ATTR_DFS_CAC_TIME, chan->dfs_cac_ms)) goto nla_put_failure; } } if (large) { if ((chan->flags & IEEE80211_CHAN_NO_HT40MINUS) && nla_put_flag(msg, NL80211_FREQUENCY_ATTR_NO_HT40_MINUS)) goto nla_put_failure; if ((chan->flags & IEEE80211_CHAN_NO_HT40PLUS) && nla_put_flag(msg, NL80211_FREQUENCY_ATTR_NO_HT40_PLUS)) goto nla_put_failure; if ((chan->flags & IEEE80211_CHAN_NO_80MHZ) && nla_put_flag(msg, NL80211_FREQUENCY_ATTR_NO_80MHZ)) goto nla_put_failure; if ((chan->flags & IEEE80211_CHAN_NO_160MHZ) && nla_put_flag(msg, NL80211_FREQUENCY_ATTR_NO_160MHZ)) goto nla_put_failure; if ((chan->flags & IEEE80211_CHAN_INDOOR_ONLY) && nla_put_flag(msg, NL80211_FREQUENCY_ATTR_INDOOR_ONLY)) goto nla_put_failure; if ((chan->flags & IEEE80211_CHAN_IR_CONCURRENT) && nla_put_flag(msg, NL80211_FREQUENCY_ATTR_IR_CONCURRENT)) goto nla_put_failure; if ((chan->flags & IEEE80211_CHAN_NO_20MHZ) && nla_put_flag(msg, NL80211_FREQUENCY_ATTR_NO_20MHZ)) goto nla_put_failure; if ((chan->flags & IEEE80211_CHAN_NO_10MHZ) && nla_put_flag(msg, NL80211_FREQUENCY_ATTR_NO_10MHZ)) goto nla_put_failure; if ((chan->flags & IEEE80211_CHAN_NO_HE) && nla_put_flag(msg, NL80211_FREQUENCY_ATTR_NO_HE)) goto nla_put_failure; if ((chan->flags & IEEE80211_CHAN_1MHZ) && nla_put_flag(msg, NL80211_FREQUENCY_ATTR_1MHZ)) goto nla_put_failure; if ((chan->flags & IEEE80211_CHAN_2MHZ) && nla_put_flag(msg, NL80211_FREQUENCY_ATTR_2MHZ)) goto nla_put_failure; if ((chan->flags & IEEE80211_CHAN_4MHZ) && nla_put_flag(msg, NL80211_FREQUENCY_ATTR_4MHZ)) goto nla_put_failure; if ((chan->flags & IEEE80211_CHAN_8MHZ) && nla_put_flag(msg, NL80211_FREQUENCY_ATTR_8MHZ)) goto nla_put_failure; if ((chan->flags & IEEE80211_CHAN_16MHZ) && nla_put_flag(msg, NL80211_FREQUENCY_ATTR_16MHZ)) goto nla_put_failure; if ((chan->flags & IEEE80211_CHAN_NO_320MHZ) && nla_put_flag(msg, NL80211_FREQUENCY_ATTR_NO_320MHZ)) goto nla_put_failure; if ((chan->flags & IEEE80211_CHAN_NO_EHT) && nla_put_flag(msg, NL80211_FREQUENCY_ATTR_NO_EHT)) goto nla_put_failure; } if (nla_put_u32(msg, NL80211_FREQUENCY_ATTR_MAX_TX_POWER, DBM_TO_MBM(chan->max_power))) goto nla_put_failure; if (large) { const struct ieee80211_reg_rule *rule = freq_reg_info(wiphy, MHZ_TO_KHZ(chan->center_freq)); if (!IS_ERR_OR_NULL(rule) && rule->has_wmm) { if (nl80211_msg_put_wmm_rules(msg, rule)) goto nla_put_failure; } } return 0; nla_put_failure: return -ENOBUFS; } static bool nl80211_put_txq_stats(struct sk_buff *msg, struct cfg80211_txq_stats *txqstats, int attrtype) { struct nlattr *txqattr; #define PUT_TXQVAL_U32(attr, memb) do { \ if (txqstats->filled & BIT(NL80211_TXQ_STATS_ ## attr) && \ nla_put_u32(msg, NL80211_TXQ_STATS_ ## attr, txqstats->memb)) \ return false; \ } while (0) txqattr = nla_nest_start_noflag(msg, attrtype); if (!txqattr) return false; PUT_TXQVAL_U32(BACKLOG_BYTES, backlog_bytes); PUT_TXQVAL_U32(BACKLOG_PACKETS, backlog_packets); PUT_TXQVAL_U32(FLOWS, flows); PUT_TXQVAL_U32(DROPS, drops); PUT_TXQVAL_U32(ECN_MARKS, ecn_marks); PUT_TXQVAL_U32(OVERLIMIT, overlimit); PUT_TXQVAL_U32(OVERMEMORY, overmemory); PUT_TXQVAL_U32(COLLISIONS, collisions); PUT_TXQVAL_U32(TX_BYTES, tx_bytes); PUT_TXQVAL_U32(TX_PACKETS, tx_packets); PUT_TXQVAL_U32(MAX_FLOWS, max_flows); nla_nest_end(msg, txqattr); #undef PUT_TXQVAL_U32 return true; } /* netlink command implementations */ /** * nl80211_link_id - return link ID * @attrs: attributes to look at * * Returns: the link ID or 0 if not given * * Note this function doesn't do any validation of the link * ID validity wrt. links that were actually added, so it must * be called only from ops with %NL80211_FLAG_MLO_VALID_LINK_ID * or if additional validation is done. */ static unsigned int nl80211_link_id(struct nlattr **attrs) { struct nlattr *linkid = attrs[NL80211_ATTR_MLO_LINK_ID]; if (!linkid) return 0; return nla_get_u8(linkid); } static int nl80211_link_id_or_invalid(struct nlattr **attrs) { struct nlattr *linkid = attrs[NL80211_ATTR_MLO_LINK_ID]; if (!linkid) return -1; return nla_get_u8(linkid); } struct key_parse { struct key_params p; int idx; int type; bool def, defmgmt, defbeacon; bool def_uni, def_multi; }; static int nl80211_parse_key_new(struct genl_info *info, struct nlattr *key, struct key_parse *k) { struct nlattr *tb[NL80211_KEY_MAX + 1]; int err = nla_parse_nested_deprecated(tb, NL80211_KEY_MAX, key, nl80211_key_policy, info->extack); if (err) return err; k->def = !!tb[NL80211_KEY_DEFAULT]; k->defmgmt = !!tb[NL80211_KEY_DEFAULT_MGMT]; k->defbeacon = !!tb[NL80211_KEY_DEFAULT_BEACON]; if (k->def) { k->def_uni = true; k->def_multi = true; } if (k->defmgmt || k->defbeacon) k->def_multi = true; if (tb[NL80211_KEY_IDX]) k->idx = nla_get_u8(tb[NL80211_KEY_IDX]); if (tb[NL80211_KEY_DATA]) { k->p.key = nla_data(tb[NL80211_KEY_DATA]); k->p.key_len = nla_len(tb[NL80211_KEY_DATA]); } if (tb[NL80211_KEY_SEQ]) { k->p.seq = nla_data(tb[NL80211_KEY_SEQ]); k->p.seq_len = nla_len(tb[NL80211_KEY_SEQ]); } if (tb[NL80211_KEY_CIPHER]) k->p.cipher = nla_get_u32(tb[NL80211_KEY_CIPHER]); if (tb[NL80211_KEY_TYPE]) k->type = nla_get_u32(tb[NL80211_KEY_TYPE]); if (tb[NL80211_KEY_DEFAULT_TYPES]) { struct nlattr *kdt[NUM_NL80211_KEY_DEFAULT_TYPES]; err = nla_parse_nested_deprecated(kdt, NUM_NL80211_KEY_DEFAULT_TYPES - 1, tb[NL80211_KEY_DEFAULT_TYPES], nl80211_key_default_policy, info->extack); if (err) return err; k->def_uni = kdt[NL80211_KEY_DEFAULT_TYPE_UNICAST]; k->def_multi = kdt[NL80211_KEY_DEFAULT_TYPE_MULTICAST]; } if (tb[NL80211_KEY_MODE]) k->p.mode = nla_get_u8(tb[NL80211_KEY_MODE]); return 0; } static int nl80211_parse_key_old(struct genl_info *info, struct key_parse *k) { if (info->attrs[NL80211_ATTR_KEY_DATA]) { k->p.key = nla_data(info->attrs[NL80211_ATTR_KEY_DATA]); k->p.key_len = nla_len(info->attrs[NL80211_ATTR_KEY_DATA]); } if (info->attrs[NL80211_ATTR_KEY_SEQ]) { k->p.seq = nla_data(info->attrs[NL80211_ATTR_KEY_SEQ]); k->p.seq_len = nla_len(info->attrs[NL80211_ATTR_KEY_SEQ]); } if (info->attrs[NL80211_ATTR_KEY_IDX]) k->idx = nla_get_u8(info->attrs[NL80211_ATTR_KEY_IDX]); if (info->attrs[NL80211_ATTR_KEY_CIPHER]) k->p.cipher = nla_get_u32(info->attrs[NL80211_ATTR_KEY_CIPHER]); k->def = !!info->attrs[NL80211_ATTR_KEY_DEFAULT]; k->defmgmt = !!info->attrs[NL80211_ATTR_KEY_DEFAULT_MGMT]; if (k->def) { k->def_uni = true; k->def_multi = true; } if (k->defmgmt) k->def_multi = true; if (info->attrs[NL80211_ATTR_KEY_TYPE]) k->type = nla_get_u32(info->attrs[NL80211_ATTR_KEY_TYPE]); if (info->attrs[NL80211_ATTR_KEY_DEFAULT_TYPES]) { struct nlattr *kdt[NUM_NL80211_KEY_DEFAULT_TYPES]; int err = nla_parse_nested_deprecated(kdt, NUM_NL80211_KEY_DEFAULT_TYPES - 1, info->attrs[NL80211_ATTR_KEY_DEFAULT_TYPES], nl80211_key_default_policy, info->extack); if (err) return err; k->def_uni = kdt[NL80211_KEY_DEFAULT_TYPE_UNICAST]; k->def_multi = kdt[NL80211_KEY_DEFAULT_TYPE_MULTICAST]; } return 0; } static int nl80211_parse_key(struct genl_info *info, struct key_parse *k) { int err; memset(k, 0, sizeof(*k)); k->idx = -1; k->type = -1; if (info->attrs[NL80211_ATTR_KEY]) err = nl80211_parse_key_new(info, info->attrs[NL80211_ATTR_KEY], k); else err = nl80211_parse_key_old(info, k); if (err) return err; if ((k->def ? 1 : 0) + (k->defmgmt ? 1 : 0) + (k->defbeacon ? 1 : 0) > 1) { GENL_SET_ERR_MSG(info, "key with multiple default flags is invalid"); return -EINVAL; } if (k->defmgmt || k->defbeacon) { if (k->def_uni || !k->def_multi) { GENL_SET_ERR_MSG(info, "defmgmt/defbeacon key must be mcast"); return -EINVAL; } } if (k->idx != -1) { if (k->defmgmt) { if (k->idx < 4 || k->idx > 5) { GENL_SET_ERR_MSG(info, "defmgmt key idx not 4 or 5"); return -EINVAL; } } else if (k->defbeacon) { if (k->idx < 6 || k->idx > 7) { GENL_SET_ERR_MSG(info, "defbeacon key idx not 6 or 7"); return -EINVAL; } } else if (k->def) { if (k->idx < 0 || k->idx > 3) { GENL_SET_ERR_MSG(info, "def key idx not 0-3"); return -EINVAL; } } else { if (k->idx < 0 || k->idx > 7) { GENL_SET_ERR_MSG(info, "key idx not 0-7"); return -EINVAL; } } } return 0; } static struct cfg80211_cached_keys * nl80211_parse_connkeys(struct cfg80211_registered_device *rdev, struct genl_info *info, bool *no_ht) { struct nlattr *keys = info->attrs[NL80211_ATTR_KEYS]; struct key_parse parse; struct nlattr *key; struct cfg80211_cached_keys *result; int rem, err, def = 0; bool have_key = false; nla_for_each_nested(key, keys, rem) { have_key = true; break; } if (!have_key) return NULL; result = kzalloc(sizeof(*result), GFP_KERNEL); if (!result) return ERR_PTR(-ENOMEM); result->def = -1; nla_for_each_nested(key, keys, rem) { memset(&parse, 0, sizeof(parse)); parse.idx = -1; err = nl80211_parse_key_new(info, key, &parse); if (err) goto error; err = -EINVAL; if (!parse.p.key) goto error; if (parse.idx < 0 || parse.idx > 3) { GENL_SET_ERR_MSG(info, "key index out of range [0-3]"); goto error; } if (parse.def) { if (def) { GENL_SET_ERR_MSG(info, "only one key can be default"); goto error; } def = 1; result->def = parse.idx; if (!parse.def_uni || !parse.def_multi) goto error; } else if (parse.defmgmt) goto error; err = cfg80211_validate_key_settings(rdev, &parse.p, parse.idx, false, NULL); if (err) goto error; if (parse.p.cipher != WLAN_CIPHER_SUITE_WEP40 && parse.p.cipher != WLAN_CIPHER_SUITE_WEP104) { GENL_SET_ERR_MSG(info, "connect key must be WEP"); err = -EINVAL; goto error; } result->params[parse.idx].cipher = parse.p.cipher; result->params[parse.idx].key_len = parse.p.key_len; result->params[parse.idx].key = result->data[parse.idx]; memcpy(result->data[parse.idx], parse.p.key, parse.p.key_len); /* must be WEP key if we got here */ if (no_ht) *no_ht = true; } if (result->def < 0) { err = -EINVAL; GENL_SET_ERR_MSG(info, "need a default/TX key"); goto error; } return result; error: kfree(result); return ERR_PTR(err); } static int nl80211_key_allowed(struct wireless_dev *wdev) { ASSERT_WDEV_LOCK(wdev); switch (wdev->iftype) { case NL80211_IFTYPE_AP: case NL80211_IFTYPE_AP_VLAN: case NL80211_IFTYPE_P2P_GO: case NL80211_IFTYPE_MESH_POINT: break; case NL80211_IFTYPE_ADHOC: if (wdev->u.ibss.current_bss) return 0; return -ENOLINK; case NL80211_IFTYPE_STATION: case NL80211_IFTYPE_P2P_CLIENT: if (wdev->connected) return 0; return -ENOLINK; case NL80211_IFTYPE_NAN: if (wiphy_ext_feature_isset(wdev->wiphy, NL80211_EXT_FEATURE_SECURE_NAN)) return 0; return -EINVAL; case NL80211_IFTYPE_UNSPECIFIED: case NL80211_IFTYPE_OCB: case NL80211_IFTYPE_MONITOR: case NL80211_IFTYPE_P2P_DEVICE: case NL80211_IFTYPE_WDS: case NUM_NL80211_IFTYPES: return -EINVAL; } return 0; } static struct ieee80211_channel *nl80211_get_valid_chan(struct wiphy *wiphy, u32 freq) { struct ieee80211_channel *chan; chan = ieee80211_get_channel_khz(wiphy, freq); if (!chan || chan->flags & IEEE80211_CHAN_DISABLED) return NULL; return chan; } static int nl80211_put_iftypes(struct sk_buff *msg, u32 attr, u16 ifmodes) { struct nlattr *nl_modes = nla_nest_start_noflag(msg, attr); int i; if (!nl_modes) goto nla_put_failure; i = 0; while (ifmodes) { if ((ifmodes & 1) && nla_put_flag(msg, i)) goto nla_put_failure; ifmodes >>= 1; i++; } nla_nest_end(msg, nl_modes); return 0; nla_put_failure: return -ENOBUFS; } static int nl80211_put_iface_combinations(struct wiphy *wiphy, struct sk_buff *msg, bool large) { struct nlattr *nl_combis; int i, j; nl_combis = nla_nest_start_noflag(msg, NL80211_ATTR_INTERFACE_COMBINATIONS); if (!nl_combis) goto nla_put_failure; for (i = 0; i < wiphy->n_iface_combinations; i++) { const struct ieee80211_iface_combination *c; struct nlattr *nl_combi, *nl_limits; c = &wiphy->iface_combinations[i]; nl_combi = nla_nest_start_noflag(msg, i + 1); if (!nl_combi) goto nla_put_failure; nl_limits = nla_nest_start_noflag(msg, NL80211_IFACE_COMB_LIMITS); if (!nl_limits) goto nla_put_failure; for (j = 0; j < c->n_limits; j++) { struct nlattr *nl_limit; nl_limit = nla_nest_start_noflag(msg, j + 1); if (!nl_limit) goto nla_put_failure; if (nla_put_u32(msg, NL80211_IFACE_LIMIT_MAX, c->limits[j].max)) goto nla_put_failure; if (nl80211_put_iftypes(msg, NL80211_IFACE_LIMIT_TYPES, c->limits[j].types)) goto nla_put_failure; nla_nest_end(msg, nl_limit); } nla_nest_end(msg, nl_limits); if (c->beacon_int_infra_match && nla_put_flag(msg, NL80211_IFACE_COMB_STA_AP_BI_MATCH)) goto nla_put_failure; if (nla_put_u32(msg, NL80211_IFACE_COMB_NUM_CHANNELS, c->num_different_channels) || nla_put_u32(msg, NL80211_IFACE_COMB_MAXNUM, c->max_interfaces)) goto nla_put_failure; if (large && (nla_put_u32(msg, NL80211_IFACE_COMB_RADAR_DETECT_WIDTHS, c->radar_detect_widths) || nla_put_u32(msg, NL80211_IFACE_COMB_RADAR_DETECT_REGIONS, c->radar_detect_regions))) goto nla_put_failure; if (c->beacon_int_min_gcd && nla_put_u32(msg, NL80211_IFACE_COMB_BI_MIN_GCD, c->beacon_int_min_gcd)) goto nla_put_failure; nla_nest_end(msg, nl_combi); } nla_nest_end(msg, nl_combis); return 0; nla_put_failure: return -ENOBUFS; } #ifdef CONFIG_PM static int nl80211_send_wowlan_tcp_caps(struct cfg80211_registered_device *rdev, struct sk_buff *msg) { const struct wiphy_wowlan_tcp_support *tcp = rdev->wiphy.wowlan->tcp; struct nlattr *nl_tcp; if (!tcp) return 0; nl_tcp = nla_nest_start_noflag(msg, NL80211_WOWLAN_TRIG_TCP_CONNECTION); if (!nl_tcp) return -ENOBUFS; if (nla_put_u32(msg, NL80211_WOWLAN_TCP_DATA_PAYLOAD, tcp->data_payload_max)) return -ENOBUFS; if (nla_put_u32(msg, NL80211_WOWLAN_TCP_DATA_PAYLOAD, tcp->data_payload_max)) return -ENOBUFS; if (tcp->seq && nla_put_flag(msg, NL80211_WOWLAN_TCP_DATA_PAYLOAD_SEQ)) return -ENOBUFS; if (tcp->tok && nla_put(msg, NL80211_WOWLAN_TCP_DATA_PAYLOAD_TOKEN, sizeof(*tcp->tok), tcp->tok)) return -ENOBUFS; if (nla_put_u32(msg, NL80211_WOWLAN_TCP_DATA_INTERVAL, tcp->data_interval_max)) return -ENOBUFS; if (nla_put_u32(msg, NL80211_WOWLAN_TCP_WAKE_PAYLOAD, tcp->wake_payload_max)) return -ENOBUFS; nla_nest_end(msg, nl_tcp); return 0; } static int nl80211_send_wowlan(struct sk_buff *msg, struct cfg80211_registered_device *rdev, bool large) { struct nlattr *nl_wowlan; if (!rdev->wiphy.wowlan) return 0; nl_wowlan = nla_nest_start_noflag(msg, NL80211_ATTR_WOWLAN_TRIGGERS_SUPPORTED); if (!nl_wowlan) return -ENOBUFS; if (((rdev->wiphy.wowlan->flags & WIPHY_WOWLAN_ANY) && nla_put_flag(msg, NL80211_WOWLAN_TRIG_ANY)) || ((rdev->wiphy.wowlan->flags & WIPHY_WOWLAN_DISCONNECT) && nla_put_flag(msg, NL80211_WOWLAN_TRIG_DISCONNECT)) || ((rdev->wiphy.wowlan->flags & WIPHY_WOWLAN_MAGIC_PKT) && nla_put_flag(msg, NL80211_WOWLAN_TRIG_MAGIC_PKT)) || ((rdev->wiphy.wowlan->flags & WIPHY_WOWLAN_SUPPORTS_GTK_REKEY) && nla_put_flag(msg, NL80211_WOWLAN_TRIG_GTK_REKEY_SUPPORTED)) || ((rdev->wiphy.wowlan->flags & WIPHY_WOWLAN_GTK_REKEY_FAILURE) && nla_put_flag(msg, NL80211_WOWLAN_TRIG_GTK_REKEY_FAILURE)) || ((rdev->wiphy.wowlan->flags & WIPHY_WOWLAN_EAP_IDENTITY_REQ) && nla_put_flag(msg, NL80211_WOWLAN_TRIG_EAP_IDENT_REQUEST)) || ((rdev->wiphy.wowlan->flags & WIPHY_WOWLAN_4WAY_HANDSHAKE) && nla_put_flag(msg, NL80211_WOWLAN_TRIG_4WAY_HANDSHAKE)) || ((rdev->wiphy.wowlan->flags & WIPHY_WOWLAN_RFKILL_RELEASE) && nla_put_flag(msg, NL80211_WOWLAN_TRIG_RFKILL_RELEASE))) return -ENOBUFS; if (rdev->wiphy.wowlan->n_patterns) { struct nl80211_pattern_support pat = { .max_patterns = rdev->wiphy.wowlan->n_patterns, .min_pattern_len = rdev->wiphy.wowlan->pattern_min_len, .max_pattern_len = rdev->wiphy.wowlan->pattern_max_len, .max_pkt_offset = rdev->wiphy.wowlan->max_pkt_offset, }; if (nla_put(msg, NL80211_WOWLAN_TRIG_PKT_PATTERN, sizeof(pat), &pat)) return -ENOBUFS; } if ((rdev->wiphy.wowlan->flags & WIPHY_WOWLAN_NET_DETECT) && nla_put_u32(msg, NL80211_WOWLAN_TRIG_NET_DETECT, rdev->wiphy.wowlan->max_nd_match_sets)) return -ENOBUFS; if (large && nl80211_send_wowlan_tcp_caps(rdev, msg)) return -ENOBUFS; nla_nest_end(msg, nl_wowlan); return 0; } #endif static int nl80211_send_coalesce(struct sk_buff *msg, struct cfg80211_registered_device *rdev) { struct nl80211_coalesce_rule_support rule; if (!rdev->wiphy.coalesce) return 0; rule.max_rules = rdev->wiphy.coalesce->n_rules; rule.max_delay = rdev->wiphy.coalesce->max_delay; rule.pat.max_patterns = rdev->wiphy.coalesce->n_patterns; rule.pat.min_pattern_len = rdev->wiphy.coalesce->pattern_min_len; rule.pat.max_pattern_len = rdev->wiphy.coalesce->pattern_max_len; rule.pat.max_pkt_offset = rdev->wiphy.coalesce->max_pkt_offset; if (nla_put(msg, NL80211_ATTR_COALESCE_RULE, sizeof(rule), &rule)) return -ENOBUFS; return 0; } static int nl80211_send_iftype_data(struct sk_buff *msg, const struct ieee80211_supported_band *sband, const struct ieee80211_sband_iftype_data *iftdata) { const struct ieee80211_sta_he_cap *he_cap = &iftdata->he_cap; const struct ieee80211_sta_eht_cap *eht_cap = &iftdata->eht_cap; if (nl80211_put_iftypes(msg, NL80211_BAND_IFTYPE_ATTR_IFTYPES, iftdata->types_mask)) return -ENOBUFS; if (he_cap->has_he) { if (nla_put(msg, NL80211_BAND_IFTYPE_ATTR_HE_CAP_MAC, sizeof(he_cap->he_cap_elem.mac_cap_info), he_cap->he_cap_elem.mac_cap_info) || nla_put(msg, NL80211_BAND_IFTYPE_ATTR_HE_CAP_PHY, sizeof(he_cap->he_cap_elem.phy_cap_info), he_cap->he_cap_elem.phy_cap_info) || nla_put(msg, NL80211_BAND_IFTYPE_ATTR_HE_CAP_MCS_SET, sizeof(he_cap->he_mcs_nss_supp), &he_cap->he_mcs_nss_supp) || nla_put(msg, NL80211_BAND_IFTYPE_ATTR_HE_CAP_PPE, sizeof(he_cap->ppe_thres), he_cap->ppe_thres)) return -ENOBUFS; } if (eht_cap->has_eht && he_cap->has_he) { u8 mcs_nss_size, ppe_thresh_size; u16 ppe_thres_hdr; bool is_ap; is_ap = iftdata->types_mask & BIT(NL80211_IFTYPE_AP) || iftdata->types_mask & BIT(NL80211_IFTYPE_P2P_GO); mcs_nss_size = ieee80211_eht_mcs_nss_size(&he_cap->he_cap_elem, &eht_cap->eht_cap_elem, is_ap); ppe_thres_hdr = get_unaligned_le16(&eht_cap->eht_ppe_thres[0]); ppe_thresh_size = ieee80211_eht_ppe_size(ppe_thres_hdr, eht_cap->eht_cap_elem.phy_cap_info); if (nla_put(msg, NL80211_BAND_IFTYPE_ATTR_EHT_CAP_MAC, sizeof(eht_cap->eht_cap_elem.mac_cap_info), eht_cap->eht_cap_elem.mac_cap_info) || nla_put(msg, NL80211_BAND_IFTYPE_ATTR_EHT_CAP_PHY, sizeof(eht_cap->eht_cap_elem.phy_cap_info), eht_cap->eht_cap_elem.phy_cap_info) || nla_put(msg, NL80211_BAND_IFTYPE_ATTR_EHT_CAP_MCS_SET, mcs_nss_size, &eht_cap->eht_mcs_nss_supp) || nla_put(msg, NL80211_BAND_IFTYPE_ATTR_EHT_CAP_PPE, ppe_thresh_size, eht_cap->eht_ppe_thres)) return -ENOBUFS; } if (sband->band == NL80211_BAND_6GHZ && nla_put(msg, NL80211_BAND_IFTYPE_ATTR_HE_6GHZ_CAPA, sizeof(iftdata->he_6ghz_capa), &iftdata->he_6ghz_capa)) return -ENOBUFS; if (iftdata->vendor_elems.data && iftdata->vendor_elems.len && nla_put(msg, NL80211_BAND_IFTYPE_ATTR_VENDOR_ELEMS, iftdata->vendor_elems.len, iftdata->vendor_elems.data)) return -ENOBUFS; return 0; } static int nl80211_send_band_rateinfo(struct sk_buff *msg, struct ieee80211_supported_band *sband, bool large) { struct nlattr *nl_rates, *nl_rate; struct ieee80211_rate *rate; int i; /* add HT info */ if (sband->ht_cap.ht_supported && (nla_put(msg, NL80211_BAND_ATTR_HT_MCS_SET, sizeof(sband->ht_cap.mcs), &sband->ht_cap.mcs) || nla_put_u16(msg, NL80211_BAND_ATTR_HT_CAPA, sband->ht_cap.cap) || nla_put_u8(msg, NL80211_BAND_ATTR_HT_AMPDU_FACTOR, sband->ht_cap.ampdu_factor) || nla_put_u8(msg, NL80211_BAND_ATTR_HT_AMPDU_DENSITY, sband->ht_cap.ampdu_density))) return -ENOBUFS; /* add VHT info */ if (sband->vht_cap.vht_supported && (nla_put(msg, NL80211_BAND_ATTR_VHT_MCS_SET, sizeof(sband->vht_cap.vht_mcs), &sband->vht_cap.vht_mcs) || nla_put_u32(msg, NL80211_BAND_ATTR_VHT_CAPA, sband->vht_cap.cap))) return -ENOBUFS; if (large && sband->n_iftype_data) { struct nlattr *nl_iftype_data = nla_nest_start_noflag(msg, NL80211_BAND_ATTR_IFTYPE_DATA); int err; if (!nl_iftype_data) return -ENOBUFS; for (i = 0; i < sband->n_iftype_data; i++) { struct nlattr *iftdata; iftdata = nla_nest_start_noflag(msg, i + 1); if (!iftdata) return -ENOBUFS; err = nl80211_send_iftype_data(msg, sband, &sband->iftype_data[i]); if (err) return err; nla_nest_end(msg, iftdata); } nla_nest_end(msg, nl_iftype_data); } /* add EDMG info */ if (large && sband->edmg_cap.channels && (nla_put_u8(msg, NL80211_BAND_ATTR_EDMG_CHANNELS, sband->edmg_cap.channels) || nla_put_u8(msg, NL80211_BAND_ATTR_EDMG_BW_CONFIG, sband->edmg_cap.bw_config))) return -ENOBUFS; /* add bitrates */ nl_rates = nla_nest_start_noflag(msg, NL80211_BAND_ATTR_RATES); if (!nl_rates) return -ENOBUFS; for (i = 0; i < sband->n_bitrates; i++) { nl_rate = nla_nest_start_noflag(msg, i); if (!nl_rate) return -ENOBUFS; rate = &sband->bitrates[i]; if (nla_put_u32(msg, NL80211_BITRATE_ATTR_RATE, rate->bitrate)) return -ENOBUFS; if ((rate->flags & IEEE80211_RATE_SHORT_PREAMBLE) && nla_put_flag(msg, NL80211_BITRATE_ATTR_2GHZ_SHORTPREAMBLE)) return -ENOBUFS; nla_nest_end(msg, nl_rate); } nla_nest_end(msg, nl_rates); /* S1G capabilities */ if (sband->band == NL80211_BAND_S1GHZ && sband->s1g_cap.s1g && (nla_put(msg, NL80211_BAND_ATTR_S1G_CAPA, sizeof(sband->s1g_cap.cap), sband->s1g_cap.cap) || nla_put(msg, NL80211_BAND_ATTR_S1G_MCS_NSS_SET, sizeof(sband->s1g_cap.nss_mcs), sband->s1g_cap.nss_mcs))) return -ENOBUFS; return 0; } static int nl80211_send_mgmt_stypes(struct sk_buff *msg, const struct ieee80211_txrx_stypes *mgmt_stypes) { u16 stypes; struct nlattr *nl_ftypes, *nl_ifs; enum nl80211_iftype ift; int i; if (!mgmt_stypes) return 0; nl_ifs = nla_nest_start_noflag(msg, NL80211_ATTR_TX_FRAME_TYPES); if (!nl_ifs) return -ENOBUFS; for (ift = 0; ift < NUM_NL80211_IFTYPES; ift++) { nl_ftypes = nla_nest_start_noflag(msg, ift); if (!nl_ftypes) return -ENOBUFS; i = 0; stypes = mgmt_stypes[ift].tx; while (stypes) { if ((stypes & 1) && nla_put_u16(msg, NL80211_ATTR_FRAME_TYPE, (i << 4) | IEEE80211_FTYPE_MGMT)) return -ENOBUFS; stypes >>= 1; i++; } nla_nest_end(msg, nl_ftypes); } nla_nest_end(msg, nl_ifs); nl_ifs = nla_nest_start_noflag(msg, NL80211_ATTR_RX_FRAME_TYPES); if (!nl_ifs) return -ENOBUFS; for (ift = 0; ift < NUM_NL80211_IFTYPES; ift++) { nl_ftypes = nla_nest_start_noflag(msg, ift); if (!nl_ftypes) return -ENOBUFS; i = 0; stypes = mgmt_stypes[ift].rx; while (stypes) { if ((stypes & 1) && nla_put_u16(msg, NL80211_ATTR_FRAME_TYPE, (i << 4) | IEEE80211_FTYPE_MGMT)) return -ENOBUFS; stypes >>= 1; i++; } nla_nest_end(msg, nl_ftypes); } nla_nest_end(msg, nl_ifs); return 0; } #define CMD(op, n) \ do { \ if (rdev->ops->op) { \ i++; \ if (nla_put_u32(msg, i, NL80211_CMD_ ## n)) \ goto nla_put_failure; \ } \ } while (0) static int nl80211_add_commands_unsplit(struct cfg80211_registered_device *rdev, struct sk_buff *msg) { int i = 0; /* * do *NOT* add anything into this function, new things need to be * advertised only to new versions of userspace that can deal with * the split (and they can't possibly care about new features... */ CMD(add_virtual_intf, NEW_INTERFACE); CMD(change_virtual_intf, SET_INTERFACE); CMD(add_key, NEW_KEY); CMD(start_ap, START_AP); CMD(add_station, NEW_STATION); CMD(add_mpath, NEW_MPATH); CMD(update_mesh_config, SET_MESH_CONFIG); CMD(change_bss, SET_BSS); CMD(auth, AUTHENTICATE); CMD(assoc, ASSOCIATE); CMD(deauth, DEAUTHENTICATE); CMD(disassoc, DISASSOCIATE); CMD(join_ibss, JOIN_IBSS); CMD(join_mesh, JOIN_MESH); CMD(set_pmksa, SET_PMKSA); CMD(del_pmksa, DEL_PMKSA); CMD(flush_pmksa, FLUSH_PMKSA); if (rdev->wiphy.flags & WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL) CMD(remain_on_channel, REMAIN_ON_CHANNEL); CMD(set_bitrate_mask, SET_TX_BITRATE_MASK); CMD(mgmt_tx, FRAME); CMD(mgmt_tx_cancel_wait, FRAME_WAIT_CANCEL); if (rdev->wiphy.flags & WIPHY_FLAG_NETNS_OK) { i++; if (nla_put_u32(msg, i, NL80211_CMD_SET_WIPHY_NETNS)) goto nla_put_failure; } if (rdev->ops->set_monitor_channel || rdev->ops->start_ap || rdev->ops->join_mesh) { i++; if (nla_put_u32(msg, i, NL80211_CMD_SET_CHANNEL)) goto nla_put_failure; } if (rdev->wiphy.flags & WIPHY_FLAG_SUPPORTS_TDLS) { CMD(tdls_mgmt, TDLS_MGMT); CMD(tdls_oper, TDLS_OPER); } if (rdev->wiphy.max_sched_scan_reqs) CMD(sched_scan_start, START_SCHED_SCAN); CMD(probe_client, PROBE_CLIENT); CMD(set_noack_map, SET_NOACK_MAP); if (rdev->wiphy.flags & WIPHY_FLAG_REPORTS_OBSS) { i++; if (nla_put_u32(msg, i, NL80211_CMD_REGISTER_BEACONS)) goto nla_put_failure; } CMD(start_p2p_device, START_P2P_DEVICE); CMD(set_mcast_rate, SET_MCAST_RATE); #ifdef CONFIG_NL80211_TESTMODE CMD(testmode_cmd, TESTMODE); #endif if (rdev->ops->connect || rdev->ops->auth) { i++; if (nla_put_u32(msg, i, NL80211_CMD_CONNECT)) goto nla_put_failure; } if (rdev->ops->disconnect || rdev->ops->deauth) { i++; if (nla_put_u32(msg, i, NL80211_CMD_DISCONNECT)) goto nla_put_failure; } return i; nla_put_failure: return -ENOBUFS; } static int nl80211_send_pmsr_ftm_capa(const struct cfg80211_pmsr_capabilities *cap, struct sk_buff *msg) { struct nlattr *ftm; if (!cap->ftm.supported) return 0; ftm = nla_nest_start_noflag(msg, NL80211_PMSR_TYPE_FTM); if (!ftm) return -ENOBUFS; if (cap->ftm.asap && nla_put_flag(msg, NL80211_PMSR_FTM_CAPA_ATTR_ASAP)) return -ENOBUFS; if (cap->ftm.non_asap && nla_put_flag(msg, NL80211_PMSR_FTM_CAPA_ATTR_NON_ASAP)) return -ENOBUFS; if (cap->ftm.request_lci && nla_put_flag(msg, NL80211_PMSR_FTM_CAPA_ATTR_REQ_LCI)) return -ENOBUFS; if (cap->ftm.request_civicloc && nla_put_flag(msg, NL80211_PMSR_FTM_CAPA_ATTR_REQ_CIVICLOC)) return -ENOBUFS; if (nla_put_u32(msg, NL80211_PMSR_FTM_CAPA_ATTR_PREAMBLES, cap->ftm.preambles)) return -ENOBUFS; if (nla_put_u32(msg, NL80211_PMSR_FTM_CAPA_ATTR_BANDWIDTHS, cap->ftm.bandwidths)) return -ENOBUFS; if (cap->ftm.max_bursts_exponent >= 0 && nla_put_u32(msg, NL80211_PMSR_FTM_CAPA_ATTR_MAX_BURSTS_EXPONENT, cap->ftm.max_bursts_exponent)) return -ENOBUFS; if (cap->ftm.max_ftms_per_burst && nla_put_u32(msg, NL80211_PMSR_FTM_CAPA_ATTR_MAX_FTMS_PER_BURST, cap->ftm.max_ftms_per_burst)) return -ENOBUFS; if (cap->ftm.trigger_based && nla_put_flag(msg, NL80211_PMSR_FTM_CAPA_ATTR_TRIGGER_BASED)) return -ENOBUFS; if (cap->ftm.non_trigger_based && nla_put_flag(msg, NL80211_PMSR_FTM_CAPA_ATTR_NON_TRIGGER_BASED)) return -ENOBUFS; nla_nest_end(msg, ftm); return 0; } static int nl80211_send_pmsr_capa(struct cfg80211_registered_device *rdev, struct sk_buff *msg) { const struct cfg80211_pmsr_capabilities *cap = rdev->wiphy.pmsr_capa; struct nlattr *pmsr, *caps; if (!cap) return 0; /* * we don't need to clean up anything here since the caller * will genlmsg_cancel() if we fail */ pmsr = nla_nest_start_noflag(msg, NL80211_ATTR_PEER_MEASUREMENTS); if (!pmsr) return -ENOBUFS; if (nla_put_u32(msg, NL80211_PMSR_ATTR_MAX_PEERS, cap->max_peers)) return -ENOBUFS; if (cap->report_ap_tsf && nla_put_flag(msg, NL80211_PMSR_ATTR_REPORT_AP_TSF)) return -ENOBUFS; if (cap->randomize_mac_addr && nla_put_flag(msg, NL80211_PMSR_ATTR_RANDOMIZE_MAC_ADDR)) return -ENOBUFS; caps = nla_nest_start_noflag(msg, NL80211_PMSR_ATTR_TYPE_CAPA); if (!caps) return -ENOBUFS; if (nl80211_send_pmsr_ftm_capa(cap, msg)) return -ENOBUFS; nla_nest_end(msg, caps); nla_nest_end(msg, pmsr); return 0; } static int nl80211_put_iftype_akm_suites(struct cfg80211_registered_device *rdev, struct sk_buff *msg) { int i; struct nlattr *nested, *nested_akms; const struct wiphy_iftype_akm_suites *iftype_akms; if (!rdev->wiphy.num_iftype_akm_suites || !rdev->wiphy.iftype_akm_suites) return 0; nested = nla_nest_start(msg, NL80211_ATTR_IFTYPE_AKM_SUITES); if (!nested) return -ENOBUFS; for (i = 0; i < rdev->wiphy.num_iftype_akm_suites; i++) { nested_akms = nla_nest_start(msg, i + 1); if (!nested_akms) return -ENOBUFS; iftype_akms = &rdev->wiphy.iftype_akm_suites[i]; if (nl80211_put_iftypes(msg, NL80211_IFTYPE_AKM_ATTR_IFTYPES, iftype_akms->iftypes_mask)) return -ENOBUFS; if (nla_put(msg, NL80211_IFTYPE_AKM_ATTR_SUITES, sizeof(u32) * iftype_akms->n_akm_suites, iftype_akms->akm_suites)) { return -ENOBUFS; } nla_nest_end(msg, nested_akms); } nla_nest_end(msg, nested); return 0; } static int nl80211_put_tid_config_support(struct cfg80211_registered_device *rdev, struct sk_buff *msg) { struct nlattr *supp; if (!rdev->wiphy.tid_config_support.vif && !rdev->wiphy.tid_config_support.peer) return 0; supp = nla_nest_start(msg, NL80211_ATTR_TID_CONFIG); if (!supp) return -ENOSPC; if (rdev->wiphy.tid_config_support.vif && nla_put_u64_64bit(msg, NL80211_TID_CONFIG_ATTR_VIF_SUPP, rdev->wiphy.tid_config_support.vif, NL80211_TID_CONFIG_ATTR_PAD)) goto fail; if (rdev->wiphy.tid_config_support.peer && nla_put_u64_64bit(msg, NL80211_TID_CONFIG_ATTR_PEER_SUPP, rdev->wiphy.tid_config_support.peer, NL80211_TID_CONFIG_ATTR_PAD)) goto fail; /* for now we just use the same value ... makes more sense */ if (nla_put_u8(msg, NL80211_TID_CONFIG_ATTR_RETRY_SHORT, rdev->wiphy.tid_config_support.max_retry)) goto fail; if (nla_put_u8(msg, NL80211_TID_CONFIG_ATTR_RETRY_LONG, rdev->wiphy.tid_config_support.max_retry)) goto fail; nla_nest_end(msg, supp); return 0; fail: nla_nest_cancel(msg, supp); return -ENOBUFS; } static int nl80211_put_sar_specs(struct cfg80211_registered_device *rdev, struct sk_buff *msg) { struct nlattr *sar_capa, *specs, *sub_freq_range; u8 num_freq_ranges; int i; if (!rdev->wiphy.sar_capa) return 0; num_freq_ranges = rdev->wiphy.sar_capa->num_freq_ranges; sar_capa = nla_nest_start(msg, NL80211_ATTR_SAR_SPEC); if (!sar_capa) return -ENOSPC; if (nla_put_u32(msg, NL80211_SAR_ATTR_TYPE, rdev->wiphy.sar_capa->type)) goto fail; specs = nla_nest_start(msg, NL80211_SAR_ATTR_SPECS); if (!specs) goto fail; /* report supported freq_ranges */ for (i = 0; i < num_freq_ranges; i++) { sub_freq_range = nla_nest_start(msg, i + 1); if (!sub_freq_range) goto fail; if (nla_put_u32(msg, NL80211_SAR_ATTR_SPECS_START_FREQ, rdev->wiphy.sar_capa->freq_ranges[i].start_freq)) goto fail; if (nla_put_u32(msg, NL80211_SAR_ATTR_SPECS_END_FREQ, rdev->wiphy.sar_capa->freq_ranges[i].end_freq)) goto fail; nla_nest_end(msg, sub_freq_range); } nla_nest_end(msg, specs); nla_nest_end(msg, sar_capa); return 0; fail: nla_nest_cancel(msg, sar_capa); return -ENOBUFS; } static int nl80211_put_mbssid_support(struct wiphy *wiphy, struct sk_buff *msg) { struct nlattr *config; if (!wiphy->mbssid_max_interfaces) return 0; config = nla_nest_start(msg, NL80211_ATTR_MBSSID_CONFIG); if (!config) return -ENOBUFS; if (nla_put_u8(msg, NL80211_MBSSID_CONFIG_ATTR_MAX_INTERFACES, wiphy->mbssid_max_interfaces)) goto fail; if (wiphy->ema_max_profile_periodicity && nla_put_u8(msg, NL80211_MBSSID_CONFIG_ATTR_MAX_EMA_PROFILE_PERIODICITY, wiphy->ema_max_profile_periodicity)) goto fail; nla_nest_end(msg, config); return 0; fail: nla_nest_cancel(msg, config); return -ENOBUFS; } struct nl80211_dump_wiphy_state { s64 filter_wiphy; long start; long split_start, band_start, chan_start, capa_start; bool split; }; static int nl80211_send_wiphy(struct cfg80211_registered_device *rdev, enum nl80211_commands cmd, struct sk_buff *msg, u32 portid, u32 seq, int flags, struct nl80211_dump_wiphy_state *state) { void *hdr; struct nlattr *nl_bands, *nl_band; struct nlattr *nl_freqs, *nl_freq; struct nlattr *nl_cmds; enum nl80211_band band; struct ieee80211_channel *chan; int i; const struct ieee80211_txrx_stypes *mgmt_stypes = rdev->wiphy.mgmt_stypes; u32 features; hdr = nl80211hdr_put(msg, portid, seq, flags, cmd); if (!hdr) return -ENOBUFS; if (WARN_ON(!state)) return -EINVAL; if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || nla_put_string(msg, NL80211_ATTR_WIPHY_NAME, wiphy_name(&rdev->wiphy)) || nla_put_u32(msg, NL80211_ATTR_GENERATION, cfg80211_rdev_list_generation)) goto nla_put_failure; if (cmd != NL80211_CMD_NEW_WIPHY) goto finish; switch (state->split_start) { case 0: if (nla_put_u8(msg, NL80211_ATTR_WIPHY_RETRY_SHORT, rdev->wiphy.retry_short) || nla_put_u8(msg, NL80211_ATTR_WIPHY_RETRY_LONG, rdev->wiphy.retry_long) || nla_put_u32(msg, NL80211_ATTR_WIPHY_FRAG_THRESHOLD, rdev->wiphy.frag_threshold) || nla_put_u32(msg, NL80211_ATTR_WIPHY_RTS_THRESHOLD, rdev->wiphy.rts_threshold) || nla_put_u8(msg, NL80211_ATTR_WIPHY_COVERAGE_CLASS, rdev->wiphy.coverage_class) || nla_put_u8(msg, NL80211_ATTR_MAX_NUM_SCAN_SSIDS, rdev->wiphy.max_scan_ssids) || nla_put_u8(msg, NL80211_ATTR_MAX_NUM_SCHED_SCAN_SSIDS, rdev->wiphy.max_sched_scan_ssids) || nla_put_u16(msg, NL80211_ATTR_MAX_SCAN_IE_LEN, rdev->wiphy.max_scan_ie_len) || nla_put_u16(msg, NL80211_ATTR_MAX_SCHED_SCAN_IE_LEN, rdev->wiphy.max_sched_scan_ie_len) || nla_put_u8(msg, NL80211_ATTR_MAX_MATCH_SETS, rdev->wiphy.max_match_sets)) goto nla_put_failure; if ((rdev->wiphy.flags & WIPHY_FLAG_IBSS_RSN) && nla_put_flag(msg, NL80211_ATTR_SUPPORT_IBSS_RSN)) goto nla_put_failure; if ((rdev->wiphy.flags & WIPHY_FLAG_MESH_AUTH) && nla_put_flag(msg, NL80211_ATTR_SUPPORT_MESH_AUTH)) goto nla_put_failure; if ((rdev->wiphy.flags & WIPHY_FLAG_AP_UAPSD) && nla_put_flag(msg, NL80211_ATTR_SUPPORT_AP_UAPSD)) goto nla_put_failure; if ((rdev->wiphy.flags & WIPHY_FLAG_SUPPORTS_FW_ROAM) && nla_put_flag(msg, NL80211_ATTR_ROAM_SUPPORT)) goto nla_put_failure; if ((rdev->wiphy.flags & WIPHY_FLAG_SUPPORTS_TDLS) && nla_put_flag(msg, NL80211_ATTR_TDLS_SUPPORT)) goto nla_put_failure; if ((rdev->wiphy.flags & WIPHY_FLAG_TDLS_EXTERNAL_SETUP) && nla_put_flag(msg, NL80211_ATTR_TDLS_EXTERNAL_SETUP)) goto nla_put_failure; state->split_start++; if (state->split) break; fallthrough; case 1: if (nla_put(msg, NL80211_ATTR_CIPHER_SUITES, sizeof(u32) * rdev->wiphy.n_cipher_suites, rdev->wiphy.cipher_suites)) goto nla_put_failure; if (nla_put_u8(msg, NL80211_ATTR_MAX_NUM_PMKIDS, rdev->wiphy.max_num_pmkids)) goto nla_put_failure; if ((rdev->wiphy.flags & WIPHY_FLAG_CONTROL_PORT_PROTOCOL) && nla_put_flag(msg, NL80211_ATTR_CONTROL_PORT_ETHERTYPE)) goto nla_put_failure; if (nla_put_u32(msg, NL80211_ATTR_WIPHY_ANTENNA_AVAIL_TX, rdev->wiphy.available_antennas_tx) || nla_put_u32(msg, NL80211_ATTR_WIPHY_ANTENNA_AVAIL_RX, rdev->wiphy.available_antennas_rx)) goto nla_put_failure; if ((rdev->wiphy.flags & WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD) && nla_put_u32(msg, NL80211_ATTR_PROBE_RESP_OFFLOAD, rdev->wiphy.probe_resp_offload)) goto nla_put_failure; if ((rdev->wiphy.available_antennas_tx || rdev->wiphy.available_antennas_rx) && rdev->ops->get_antenna) { u32 tx_ant = 0, rx_ant = 0; int res; res = rdev_get_antenna(rdev, &tx_ant, &rx_ant); if (!res) { if (nla_put_u32(msg, NL80211_ATTR_WIPHY_ANTENNA_TX, tx_ant) || nla_put_u32(msg, NL80211_ATTR_WIPHY_ANTENNA_RX, rx_ant)) goto nla_put_failure; } } state->split_start++; if (state->split) break; fallthrough; case 2: if (nl80211_put_iftypes(msg, NL80211_ATTR_SUPPORTED_IFTYPES, rdev->wiphy.interface_modes)) goto nla_put_failure; state->split_start++; if (state->split) break; fallthrough; case 3: nl_bands = nla_nest_start_noflag(msg, NL80211_ATTR_WIPHY_BANDS); if (!nl_bands) goto nla_put_failure; for (band = state->band_start; band < (state->split ? NUM_NL80211_BANDS : NL80211_BAND_60GHZ + 1); band++) { struct ieee80211_supported_band *sband; /* omit higher bands for ancient software */ if (band > NL80211_BAND_5GHZ && !state->split) break; sband = rdev->wiphy.bands[band]; if (!sband) continue; nl_band = nla_nest_start_noflag(msg, band); if (!nl_band) goto nla_put_failure; switch (state->chan_start) { case 0: if (nl80211_send_band_rateinfo(msg, sband, state->split)) goto nla_put_failure; state->chan_start++; if (state->split) break; fallthrough; default: /* add frequencies */ nl_freqs = nla_nest_start_noflag(msg, NL80211_BAND_ATTR_FREQS); if (!nl_freqs) goto nla_put_failure; for (i = state->chan_start - 1; i < sband->n_channels; i++) { nl_freq = nla_nest_start_noflag(msg, i); if (!nl_freq) goto nla_put_failure; chan = &sband->channels[i]; if (nl80211_msg_put_channel( msg, &rdev->wiphy, chan, state->split)) goto nla_put_failure; nla_nest_end(msg, nl_freq); if (state->split) break; } if (i < sband->n_channels) state->chan_start = i + 2; else state->chan_start = 0; nla_nest_end(msg, nl_freqs); } nla_nest_end(msg, nl_band); if (state->split) { /* start again here */ if (state->chan_start) band--; break; } } nla_nest_end(msg, nl_bands); if (band < NUM_NL80211_BANDS) state->band_start = band + 1; else state->band_start = 0; /* if bands & channels are done, continue outside */ if (state->band_start == 0 && state->chan_start == 0) state->split_start++; if (state->split) break; fallthrough; case 4: nl_cmds = nla_nest_start_noflag(msg, NL80211_ATTR_SUPPORTED_COMMANDS); if (!nl_cmds) goto nla_put_failure; i = nl80211_add_commands_unsplit(rdev, msg); if (i < 0) goto nla_put_failure; if (state->split) { CMD(crit_proto_start, CRIT_PROTOCOL_START); CMD(crit_proto_stop, CRIT_PROTOCOL_STOP); if (rdev->wiphy.flags & WIPHY_FLAG_HAS_CHANNEL_SWITCH) CMD(channel_switch, CHANNEL_SWITCH); CMD(set_qos_map, SET_QOS_MAP); if (rdev->wiphy.features & NL80211_FEATURE_SUPPORTS_WMM_ADMISSION) CMD(add_tx_ts, ADD_TX_TS); CMD(set_multicast_to_unicast, SET_MULTICAST_TO_UNICAST); CMD(update_connect_params, UPDATE_CONNECT_PARAMS); CMD(update_ft_ies, UPDATE_FT_IES); if (rdev->wiphy.sar_capa) CMD(set_sar_specs, SET_SAR_SPECS); } #undef CMD nla_nest_end(msg, nl_cmds); state->split_start++; if (state->split) break; fallthrough; case 5: if (rdev->ops->remain_on_channel && (rdev->wiphy.flags & WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL) && nla_put_u32(msg, NL80211_ATTR_MAX_REMAIN_ON_CHANNEL_DURATION, rdev->wiphy.max_remain_on_channel_duration)) goto nla_put_failure; if ((rdev->wiphy.flags & WIPHY_FLAG_OFFCHAN_TX) && nla_put_flag(msg, NL80211_ATTR_OFFCHANNEL_TX_OK)) goto nla_put_failure; state->split_start++; if (state->split) break; fallthrough; case 6: #ifdef CONFIG_PM if (nl80211_send_wowlan(msg, rdev, state->split)) goto nla_put_failure; state->split_start++; if (state->split) break; #else state->split_start++; #endif fallthrough; case 7: if (nl80211_put_iftypes(msg, NL80211_ATTR_SOFTWARE_IFTYPES, rdev->wiphy.software_iftypes)) goto nla_put_failure; if (nl80211_put_iface_combinations(&rdev->wiphy, msg, state->split)) goto nla_put_failure; state->split_start++; if (state->split) break; fallthrough; case 8: if ((rdev->wiphy.flags & WIPHY_FLAG_HAVE_AP_SME) && nla_put_u32(msg, NL80211_ATTR_DEVICE_AP_SME, rdev->wiphy.ap_sme_capa)) goto nla_put_failure; features = rdev->wiphy.features; /* * We can only add the per-channel limit information if the * dump is split, otherwise it makes it too big. Therefore * only advertise it in that case. */ if (state->split) features |= NL80211_FEATURE_ADVERTISE_CHAN_LIMITS; if (nla_put_u32(msg, NL80211_ATTR_FEATURE_FLAGS, features)) goto nla_put_failure; if (rdev->wiphy.ht_capa_mod_mask && nla_put(msg, NL80211_ATTR_HT_CAPABILITY_MASK, sizeof(*rdev->wiphy.ht_capa_mod_mask), rdev->wiphy.ht_capa_mod_mask)) goto nla_put_failure; if (rdev->wiphy.flags & WIPHY_FLAG_HAVE_AP_SME && rdev->wiphy.max_acl_mac_addrs && nla_put_u32(msg, NL80211_ATTR_MAC_ACL_MAX, rdev->wiphy.max_acl_mac_addrs)) goto nla_put_failure; /* * Any information below this point is only available to * applications that can deal with it being split. This * helps ensure that newly added capabilities don't break * older tools by overrunning their buffers. * * We still increment split_start so that in the split * case we'll continue with more data in the next round, * but break unconditionally so unsplit data stops here. */ if (state->split) state->split_start++; else state->split_start = 0; break; case 9: if (nl80211_send_mgmt_stypes(msg, mgmt_stypes)) goto nla_put_failure; if (nla_put_u32(msg, NL80211_ATTR_MAX_NUM_SCHED_SCAN_PLANS, rdev->wiphy.max_sched_scan_plans) || nla_put_u32(msg, NL80211_ATTR_MAX_SCAN_PLAN_INTERVAL, rdev->wiphy.max_sched_scan_plan_interval) || nla_put_u32(msg, NL80211_ATTR_MAX_SCAN_PLAN_ITERATIONS, rdev->wiphy.max_sched_scan_plan_iterations)) goto nla_put_failure; if (rdev->wiphy.extended_capabilities && (nla_put(msg, NL80211_ATTR_EXT_CAPA, rdev->wiphy.extended_capabilities_len, rdev->wiphy.extended_capabilities) || nla_put(msg, NL80211_ATTR_EXT_CAPA_MASK, rdev->wiphy.extended_capabilities_len, rdev->wiphy.extended_capabilities_mask))) goto nla_put_failure; if (rdev->wiphy.vht_capa_mod_mask && nla_put(msg, NL80211_ATTR_VHT_CAPABILITY_MASK, sizeof(*rdev->wiphy.vht_capa_mod_mask), rdev->wiphy.vht_capa_mod_mask)) goto nla_put_failure; if (nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, rdev->wiphy.perm_addr)) goto nla_put_failure; if (!is_zero_ether_addr(rdev->wiphy.addr_mask) && nla_put(msg, NL80211_ATTR_MAC_MASK, ETH_ALEN, rdev->wiphy.addr_mask)) goto nla_put_failure; if (rdev->wiphy.n_addresses > 1) { void *attr; attr = nla_nest_start(msg, NL80211_ATTR_MAC_ADDRS); if (!attr) goto nla_put_failure; for (i = 0; i < rdev->wiphy.n_addresses; i++) if (nla_put(msg, i + 1, ETH_ALEN, rdev->wiphy.addresses[i].addr)) goto nla_put_failure; nla_nest_end(msg, attr); } state->split_start++; break; case 10: if (nl80211_send_coalesce(msg, rdev)) goto nla_put_failure; if ((rdev->wiphy.flags & WIPHY_FLAG_SUPPORTS_5_10_MHZ) && (nla_put_flag(msg, NL80211_ATTR_SUPPORT_5_MHZ) || nla_put_flag(msg, NL80211_ATTR_SUPPORT_10_MHZ))) goto nla_put_failure; if (rdev->wiphy.max_ap_assoc_sta && nla_put_u32(msg, NL80211_ATTR_MAX_AP_ASSOC_STA, rdev->wiphy.max_ap_assoc_sta)) goto nla_put_failure; state->split_start++; break; case 11: if (rdev->wiphy.n_vendor_commands) { const struct nl80211_vendor_cmd_info *info; struct nlattr *nested; nested = nla_nest_start_noflag(msg, NL80211_ATTR_VENDOR_DATA); if (!nested) goto nla_put_failure; for (i = 0; i < rdev->wiphy.n_vendor_commands; i++) { info = &rdev->wiphy.vendor_commands[i].info; if (nla_put(msg, i + 1, sizeof(*info), info)) goto nla_put_failure; } nla_nest_end(msg, nested); } if (rdev->wiphy.n_vendor_events) { const struct nl80211_vendor_cmd_info *info; struct nlattr *nested; nested = nla_nest_start_noflag(msg, NL80211_ATTR_VENDOR_EVENTS); if (!nested) goto nla_put_failure; for (i = 0; i < rdev->wiphy.n_vendor_events; i++) { info = &rdev->wiphy.vendor_events[i]; if (nla_put(msg, i + 1, sizeof(*info), info)) goto nla_put_failure; } nla_nest_end(msg, nested); } state->split_start++; break; case 12: if (rdev->wiphy.flags & WIPHY_FLAG_HAS_CHANNEL_SWITCH && nla_put_u8(msg, NL80211_ATTR_MAX_CSA_COUNTERS, rdev->wiphy.max_num_csa_counters)) goto nla_put_failure; if (rdev->wiphy.regulatory_flags & REGULATORY_WIPHY_SELF_MANAGED && nla_put_flag(msg, NL80211_ATTR_WIPHY_SELF_MANAGED_REG)) goto nla_put_failure; if (rdev->wiphy.max_sched_scan_reqs && nla_put_u32(msg, NL80211_ATTR_SCHED_SCAN_MAX_REQS, rdev->wiphy.max_sched_scan_reqs)) goto nla_put_failure; if (nla_put(msg, NL80211_ATTR_EXT_FEATURES, sizeof(rdev->wiphy.ext_features), rdev->wiphy.ext_features)) goto nla_put_failure; if (rdev->wiphy.bss_select_support) { struct nlattr *nested; u32 bss_select_support = rdev->wiphy.bss_select_support; nested = nla_nest_start_noflag(msg, NL80211_ATTR_BSS_SELECT); if (!nested) goto nla_put_failure; i = 0; while (bss_select_support) { if ((bss_select_support & 1) && nla_put_flag(msg, i)) goto nla_put_failure; i++; bss_select_support >>= 1; } nla_nest_end(msg, nested); } state->split_start++; break; case 13: if (rdev->wiphy.num_iftype_ext_capab && rdev->wiphy.iftype_ext_capab) { struct nlattr *nested_ext_capab, *nested; nested = nla_nest_start_noflag(msg, NL80211_ATTR_IFTYPE_EXT_CAPA); if (!nested) goto nla_put_failure; for (i = state->capa_start; i < rdev->wiphy.num_iftype_ext_capab; i++) { const struct wiphy_iftype_ext_capab *capab; capab = &rdev->wiphy.iftype_ext_capab[i]; nested_ext_capab = nla_nest_start_noflag(msg, i); if (!nested_ext_capab || nla_put_u32(msg, NL80211_ATTR_IFTYPE, capab->iftype) || nla_put(msg, NL80211_ATTR_EXT_CAPA, capab->extended_capabilities_len, capab->extended_capabilities) || nla_put(msg, NL80211_ATTR_EXT_CAPA_MASK, capab->extended_capabilities_len, capab->extended_capabilities_mask)) goto nla_put_failure; if (rdev->wiphy.flags & WIPHY_FLAG_SUPPORTS_MLO && (nla_put_u16(msg, NL80211_ATTR_EML_CAPABILITY, capab->eml_capabilities) || nla_put_u16(msg, NL80211_ATTR_MLD_CAPA_AND_OPS, capab->mld_capa_and_ops))) goto nla_put_failure; nla_nest_end(msg, nested_ext_capab); if (state->split) break; } nla_nest_end(msg, nested); if (i < rdev->wiphy.num_iftype_ext_capab) { state->capa_start = i + 1; break; } } if (nla_put_u32(msg, NL80211_ATTR_BANDS, rdev->wiphy.nan_supported_bands)) goto nla_put_failure; if (wiphy_ext_feature_isset(&rdev->wiphy, NL80211_EXT_FEATURE_TXQS)) { struct cfg80211_txq_stats txqstats = {}; int res; res = rdev_get_txq_stats(rdev, NULL, &txqstats); if (!res && !nl80211_put_txq_stats(msg, &txqstats, NL80211_ATTR_TXQ_STATS)) goto nla_put_failure; if (nla_put_u32(msg, NL80211_ATTR_TXQ_LIMIT, rdev->wiphy.txq_limit)) goto nla_put_failure; if (nla_put_u32(msg, NL80211_ATTR_TXQ_MEMORY_LIMIT, rdev->wiphy.txq_memory_limit)) goto nla_put_failure; if (nla_put_u32(msg, NL80211_ATTR_TXQ_QUANTUM, rdev->wiphy.txq_quantum)) goto nla_put_failure; } state->split_start++; break; case 14: if (nl80211_send_pmsr_capa(rdev, msg)) goto nla_put_failure; state->split_start++; break; case 15: if (rdev->wiphy.akm_suites && nla_put(msg, NL80211_ATTR_AKM_SUITES, sizeof(u32) * rdev->wiphy.n_akm_suites, rdev->wiphy.akm_suites)) goto nla_put_failure; if (nl80211_put_iftype_akm_suites(rdev, msg)) goto nla_put_failure; if (nl80211_put_tid_config_support(rdev, msg)) goto nla_put_failure; state->split_start++; break; case 16: if (nl80211_put_sar_specs(rdev, msg)) goto nla_put_failure; if (nl80211_put_mbssid_support(&rdev->wiphy, msg)) goto nla_put_failure; if (nla_put_u16(msg, NL80211_ATTR_MAX_NUM_AKM_SUITES, rdev->wiphy.max_num_akm_suites)) goto nla_put_failure; if (rdev->wiphy.flags & WIPHY_FLAG_SUPPORTS_MLO) nla_put_flag(msg, NL80211_ATTR_MLO_SUPPORT); if (rdev->wiphy.hw_timestamp_max_peers && nla_put_u16(msg, NL80211_ATTR_MAX_HW_TIMESTAMP_PEERS, rdev->wiphy.hw_timestamp_max_peers)) goto nla_put_failure; /* done */ state->split_start = 0; break; } finish: genlmsg_end(msg, hdr); return 0; nla_put_failure: genlmsg_cancel(msg, hdr); return -EMSGSIZE; } static int nl80211_dump_wiphy_parse(struct sk_buff *skb, struct netlink_callback *cb, struct nl80211_dump_wiphy_state *state) { struct nlattr **tb = kcalloc(NUM_NL80211_ATTR, sizeof(*tb), GFP_KERNEL); int ret; if (!tb) return -ENOMEM; ret = nlmsg_parse_deprecated(cb->nlh, GENL_HDRLEN + nl80211_fam.hdrsize, tb, nl80211_fam.maxattr, nl80211_policy, NULL); /* ignore parse errors for backward compatibility */ if (ret) { ret = 0; goto out; } state->split = tb[NL80211_ATTR_SPLIT_WIPHY_DUMP]; if (tb[NL80211_ATTR_WIPHY]) state->filter_wiphy = nla_get_u32(tb[NL80211_ATTR_WIPHY]); if (tb[NL80211_ATTR_WDEV]) state->filter_wiphy = nla_get_u64(tb[NL80211_ATTR_WDEV]) >> 32; if (tb[NL80211_ATTR_IFINDEX]) { struct net_device *netdev; struct cfg80211_registered_device *rdev; int ifidx = nla_get_u32(tb[NL80211_ATTR_IFINDEX]); netdev = __dev_get_by_index(sock_net(skb->sk), ifidx); if (!netdev) { ret = -ENODEV; goto out; } if (netdev->ieee80211_ptr) { rdev = wiphy_to_rdev( netdev->ieee80211_ptr->wiphy); state->filter_wiphy = rdev->wiphy_idx; } } ret = 0; out: kfree(tb); return ret; } static int nl80211_dump_wiphy(struct sk_buff *skb, struct netlink_callback *cb) { int idx = 0, ret; struct nl80211_dump_wiphy_state *state = (void *)cb->args[0]; struct cfg80211_registered_device *rdev; rtnl_lock(); if (!state) { state = kzalloc(sizeof(*state), GFP_KERNEL); if (!state) { rtnl_unlock(); return -ENOMEM; } state->filter_wiphy = -1; ret = nl80211_dump_wiphy_parse(skb, cb, state); if (ret) { kfree(state); rtnl_unlock(); return ret; } cb->args[0] = (long)state; } list_for_each_entry(rdev, &cfg80211_rdev_list, list) { if (!net_eq(wiphy_net(&rdev->wiphy), sock_net(skb->sk))) continue; if (++idx <= state->start) continue; if (state->filter_wiphy != -1 && state->filter_wiphy != rdev->wiphy_idx) continue; wiphy_lock(&rdev->wiphy); /* attempt to fit multiple wiphy data chunks into the skb */ do { ret = nl80211_send_wiphy(rdev, NL80211_CMD_NEW_WIPHY, skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, NLM_F_MULTI, state); if (ret < 0) { /* * If sending the wiphy data didn't fit (ENOBUFS * or EMSGSIZE returned), this SKB is still * empty (so it's not too big because another * wiphy dataset is already in the skb) and * we've not tried to adjust the dump allocation * yet ... then adjust the alloc size to be * bigger, and return 1 but with the empty skb. * This results in an empty message being RX'ed * in userspace, but that is ignored. * * We can then retry with the larger buffer. */ if ((ret == -ENOBUFS || ret == -EMSGSIZE) && !skb->len && !state->split && cb->min_dump_alloc < 4096) { cb->min_dump_alloc = 4096; state->split_start = 0; wiphy_unlock(&rdev->wiphy); rtnl_unlock(); return 1; } idx--; break; } } while (state->split_start > 0); wiphy_unlock(&rdev->wiphy); break; } rtnl_unlock(); state->start = idx; return skb->len; } static int nl80211_dump_wiphy_done(struct netlink_callback *cb) { kfree((void *)cb->args[0]); return 0; } static int nl80211_get_wiphy(struct sk_buff *skb, struct genl_info *info) { struct sk_buff *msg; struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct nl80211_dump_wiphy_state state = {}; msg = nlmsg_new(4096, GFP_KERNEL); if (!msg) return -ENOMEM; if (nl80211_send_wiphy(rdev, NL80211_CMD_NEW_WIPHY, msg, info->snd_portid, info->snd_seq, 0, &state) < 0) { nlmsg_free(msg); return -ENOBUFS; } return genlmsg_reply(msg, info); } static const struct nla_policy txq_params_policy[NL80211_TXQ_ATTR_MAX + 1] = { [NL80211_TXQ_ATTR_QUEUE] = { .type = NLA_U8 }, [NL80211_TXQ_ATTR_TXOP] = { .type = NLA_U16 }, [NL80211_TXQ_ATTR_CWMIN] = { .type = NLA_U16 }, [NL80211_TXQ_ATTR_CWMAX] = { .type = NLA_U16 }, [NL80211_TXQ_ATTR_AIFS] = { .type = NLA_U8 }, }; static int parse_txq_params(struct nlattr *tb[], struct ieee80211_txq_params *txq_params) { u8 ac; if (!tb[NL80211_TXQ_ATTR_AC] || !tb[NL80211_TXQ_ATTR_TXOP] || !tb[NL80211_TXQ_ATTR_CWMIN] || !tb[NL80211_TXQ_ATTR_CWMAX] || !tb[NL80211_TXQ_ATTR_AIFS]) return -EINVAL; ac = nla_get_u8(tb[NL80211_TXQ_ATTR_AC]); txq_params->txop = nla_get_u16(tb[NL80211_TXQ_ATTR_TXOP]); txq_params->cwmin = nla_get_u16(tb[NL80211_TXQ_ATTR_CWMIN]); txq_params->cwmax = nla_get_u16(tb[NL80211_TXQ_ATTR_CWMAX]); txq_params->aifs = nla_get_u8(tb[NL80211_TXQ_ATTR_AIFS]); if (ac >= NL80211_NUM_ACS) return -EINVAL; txq_params->ac = array_index_nospec(ac, NL80211_NUM_ACS); return 0; } static bool nl80211_can_set_dev_channel(struct wireless_dev *wdev) { /* * You can only set the channel explicitly for some interfaces, * most have their channel managed via their respective * "establish a connection" command (connect, join, ...) * * For AP/GO and mesh mode, the channel can be set with the * channel userspace API, but is only stored and passed to the * low-level driver when the AP starts or the mesh is joined. * This is for backward compatibility, userspace can also give * the channel in the start-ap or join-mesh commands instead. * * Monitors are special as they are normally slaved to * whatever else is going on, so they have their own special * operation to set the monitor channel if possible. */ return !wdev || wdev->iftype == NL80211_IFTYPE_AP || wdev->iftype == NL80211_IFTYPE_MESH_POINT || wdev->iftype == NL80211_IFTYPE_MONITOR || wdev->iftype == NL80211_IFTYPE_P2P_GO; } static int nl80211_parse_punct_bitmap(struct cfg80211_registered_device *rdev, struct genl_info *info, const struct cfg80211_chan_def *chandef, u16 *punct_bitmap) { if (!wiphy_ext_feature_isset(&rdev->wiphy, NL80211_EXT_FEATURE_PUNCT)) return -EINVAL; *punct_bitmap = nla_get_u32(info->attrs[NL80211_ATTR_PUNCT_BITMAP]); if (!cfg80211_valid_disable_subchannel_bitmap(punct_bitmap, chandef)) return -EINVAL; return 0; } int nl80211_parse_chandef(struct cfg80211_registered_device *rdev, struct genl_info *info, struct cfg80211_chan_def *chandef) { struct netlink_ext_ack *extack = info->extack; struct nlattr **attrs = info->attrs; u32 control_freq; if (!attrs[NL80211_ATTR_WIPHY_FREQ]) { NL_SET_ERR_MSG_ATTR(extack, attrs[NL80211_ATTR_WIPHY_FREQ], "Frequency is missing"); return -EINVAL; } control_freq = MHZ_TO_KHZ( nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ])); if (info->attrs[NL80211_ATTR_WIPHY_FREQ_OFFSET]) control_freq += nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ_OFFSET]); memset(chandef, 0, sizeof(*chandef)); chandef->chan = ieee80211_get_channel_khz(&rdev->wiphy, control_freq); chandef->width = NL80211_CHAN_WIDTH_20_NOHT; chandef->center_freq1 = KHZ_TO_MHZ(control_freq); chandef->freq1_offset = control_freq % 1000; chandef->center_freq2 = 0; /* Primary channel not allowed */ if (!chandef->chan || chandef->chan->flags & IEEE80211_CHAN_DISABLED) { NL_SET_ERR_MSG_ATTR(extack, attrs[NL80211_ATTR_WIPHY_FREQ], "Channel is disabled"); return -EINVAL; } if (attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE]) { enum nl80211_channel_type chantype; chantype = nla_get_u32(attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE]); switch (chantype) { case NL80211_CHAN_NO_HT: case NL80211_CHAN_HT20: case NL80211_CHAN_HT40PLUS: case NL80211_CHAN_HT40MINUS: cfg80211_chandef_create(chandef, chandef->chan, chantype); /* user input for center_freq is incorrect */ if (attrs[NL80211_ATTR_CENTER_FREQ1] && chandef->center_freq1 != nla_get_u32(attrs[NL80211_ATTR_CENTER_FREQ1])) { NL_SET_ERR_MSG_ATTR(extack, attrs[NL80211_ATTR_CENTER_FREQ1], "bad center frequency 1"); return -EINVAL; } /* center_freq2 must be zero */ if (attrs[NL80211_ATTR_CENTER_FREQ2] && nla_get_u32(attrs[NL80211_ATTR_CENTER_FREQ2])) { NL_SET_ERR_MSG_ATTR(extack, attrs[NL80211_ATTR_CENTER_FREQ2], "center frequency 2 can't be used"); return -EINVAL; } break; default: NL_SET_ERR_MSG_ATTR(extack, attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE], "invalid channel type"); return -EINVAL; } } else if (attrs[NL80211_ATTR_CHANNEL_WIDTH]) { chandef->width = nla_get_u32(attrs[NL80211_ATTR_CHANNEL_WIDTH]); if (chandef->chan->band == NL80211_BAND_S1GHZ) { /* User input error for channel width doesn't match channel */ if (chandef->width != ieee80211_s1g_channel_width(chandef->chan)) { NL_SET_ERR_MSG_ATTR(extack, attrs[NL80211_ATTR_CHANNEL_WIDTH], "bad channel width"); return -EINVAL; } } if (attrs[NL80211_ATTR_CENTER_FREQ1]) { chandef->center_freq1 = nla_get_u32(attrs[NL80211_ATTR_CENTER_FREQ1]); if (attrs[NL80211_ATTR_CENTER_FREQ1_OFFSET]) chandef->freq1_offset = nla_get_u32( attrs[NL80211_ATTR_CENTER_FREQ1_OFFSET]); else chandef->freq1_offset = 0; } if (attrs[NL80211_ATTR_CENTER_FREQ2]) chandef->center_freq2 = nla_get_u32(attrs[NL80211_ATTR_CENTER_FREQ2]); } if (info->attrs[NL80211_ATTR_WIPHY_EDMG_CHANNELS]) { chandef->edmg.channels = nla_get_u8(info->attrs[NL80211_ATTR_WIPHY_EDMG_CHANNELS]); if (info->attrs[NL80211_ATTR_WIPHY_EDMG_BW_CONFIG]) chandef->edmg.bw_config = nla_get_u8(info->attrs[NL80211_ATTR_WIPHY_EDMG_BW_CONFIG]); } else { chandef->edmg.bw_config = 0; chandef->edmg.channels = 0; } if (!cfg80211_chandef_valid(chandef)) { NL_SET_ERR_MSG(extack, "invalid channel definition"); return -EINVAL; } if (!cfg80211_chandef_usable(&rdev->wiphy, chandef, IEEE80211_CHAN_DISABLED)) { NL_SET_ERR_MSG(extack, "(extension) channel is disabled"); return -EINVAL; } if ((chandef->width == NL80211_CHAN_WIDTH_5 || chandef->width == NL80211_CHAN_WIDTH_10) && !(rdev->wiphy.flags & WIPHY_FLAG_SUPPORTS_5_10_MHZ)) { NL_SET_ERR_MSG(extack, "5/10 MHz not supported"); return -EINVAL; } return 0; } static int __nl80211_set_channel(struct cfg80211_registered_device *rdev, struct net_device *dev, struct genl_info *info, int _link_id) { struct cfg80211_chan_def chandef; int result; enum nl80211_iftype iftype = NL80211_IFTYPE_MONITOR; struct wireless_dev *wdev = NULL; int link_id = _link_id; if (dev) wdev = dev->ieee80211_ptr; if (!nl80211_can_set_dev_channel(wdev)) return -EOPNOTSUPP; if (wdev) iftype = wdev->iftype; if (link_id < 0) { if (wdev && wdev->valid_links) return -EINVAL; link_id = 0; } result = nl80211_parse_chandef(rdev, info, &chandef); if (result) return result; switch (iftype) { case NL80211_IFTYPE_AP: case NL80211_IFTYPE_P2P_GO: if (!cfg80211_reg_can_beacon_relax(&rdev->wiphy, &chandef, iftype)) return -EINVAL; if (wdev->links[link_id].ap.beacon_interval) { struct ieee80211_channel *cur_chan; if (!dev || !rdev->ops->set_ap_chanwidth || !(rdev->wiphy.features & NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE)) return -EBUSY; /* Only allow dynamic channel width changes */ cur_chan = wdev->links[link_id].ap.chandef.chan; if (chandef.chan != cur_chan) return -EBUSY; result = rdev_set_ap_chanwidth(rdev, dev, link_id, &chandef); if (result) return result; wdev->links[link_id].ap.chandef = chandef; } else { wdev->u.ap.preset_chandef = chandef; } return 0; case NL80211_IFTYPE_MESH_POINT: return cfg80211_set_mesh_channel(rdev, wdev, &chandef); case NL80211_IFTYPE_MONITOR: return cfg80211_set_monitor_channel(rdev, &chandef); default: break; } return -EINVAL; } static int nl80211_set_channel(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; int link_id = nl80211_link_id_or_invalid(info->attrs); struct net_device *netdev = info->user_ptr[1]; int ret; wdev_lock(netdev->ieee80211_ptr); ret = __nl80211_set_channel(rdev, netdev, info, link_id); wdev_unlock(netdev->ieee80211_ptr); return ret; } static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = NULL; struct net_device *netdev = NULL; struct wireless_dev *wdev; int result = 0, rem_txq_params = 0; struct nlattr *nl_txq_params; u32 changed; u8 retry_short = 0, retry_long = 0; u32 frag_threshold = 0, rts_threshold = 0; u8 coverage_class = 0; u32 txq_limit = 0, txq_memory_limit = 0, txq_quantum = 0; rtnl_lock(); /* * Try to find the wiphy and netdev. Normally this * function shouldn't need the netdev, but this is * done for backward compatibility -- previously * setting the channel was done per wiphy, but now * it is per netdev. Previous userland like hostapd * also passed a netdev to set_wiphy, so that it is * possible to let that go to the right netdev! */ if (info->attrs[NL80211_ATTR_IFINDEX]) { int ifindex = nla_get_u32(info->attrs[NL80211_ATTR_IFINDEX]); netdev = __dev_get_by_index(genl_info_net(info), ifindex); if (netdev && netdev->ieee80211_ptr) rdev = wiphy_to_rdev(netdev->ieee80211_ptr->wiphy); else netdev = NULL; } if (!netdev) { rdev = __cfg80211_rdev_from_attrs(genl_info_net(info), info->attrs); if (IS_ERR(rdev)) { rtnl_unlock(); return PTR_ERR(rdev); } wdev = NULL; netdev = NULL; result = 0; } else wdev = netdev->ieee80211_ptr; wiphy_lock(&rdev->wiphy); /* * end workaround code, by now the rdev is available * and locked, and wdev may or may not be NULL. */ if (info->attrs[NL80211_ATTR_WIPHY_NAME]) result = cfg80211_dev_rename( rdev, nla_data(info->attrs[NL80211_ATTR_WIPHY_NAME])); rtnl_unlock(); if (result) goto out; if (info->attrs[NL80211_ATTR_WIPHY_TXQ_PARAMS]) { struct ieee80211_txq_params txq_params; struct nlattr *tb[NL80211_TXQ_ATTR_MAX + 1]; if (!rdev->ops->set_txq_params) { result = -EOPNOTSUPP; goto out; } if (!netdev) { result = -EINVAL; goto out; } if (netdev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP && netdev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_GO) { result = -EINVAL; goto out; } if (!netif_running(netdev)) { result = -ENETDOWN; goto out; } nla_for_each_nested(nl_txq_params, info->attrs[NL80211_ATTR_WIPHY_TXQ_PARAMS], rem_txq_params) { result = nla_parse_nested_deprecated(tb, NL80211_TXQ_ATTR_MAX, nl_txq_params, txq_params_policy, info->extack); if (result) goto out; result = parse_txq_params(tb, &txq_params); if (result) goto out; txq_params.link_id = nl80211_link_id_or_invalid(info->attrs); wdev_lock(netdev->ieee80211_ptr); if (txq_params.link_id >= 0 && !(netdev->ieee80211_ptr->valid_links & BIT(txq_params.link_id))) result = -ENOLINK; else if (txq_params.link_id >= 0 && !netdev->ieee80211_ptr->valid_links) result = -EINVAL; else result = rdev_set_txq_params(rdev, netdev, &txq_params); wdev_unlock(netdev->ieee80211_ptr); if (result) goto out; } } if (info->attrs[NL80211_ATTR_WIPHY_FREQ]) { int link_id = nl80211_link_id_or_invalid(info->attrs); if (wdev) { wdev_lock(wdev); result = __nl80211_set_channel( rdev, nl80211_can_set_dev_channel(wdev) ? netdev : NULL, info, link_id); wdev_unlock(wdev); } else { result = __nl80211_set_channel(rdev, netdev, info, link_id); } if (result) goto out; } if (info->attrs[NL80211_ATTR_WIPHY_TX_POWER_SETTING]) { struct wireless_dev *txp_wdev = wdev; enum nl80211_tx_power_setting type; int idx, mbm = 0; if (!(rdev->wiphy.features & NL80211_FEATURE_VIF_TXPOWER)) txp_wdev = NULL; if (!rdev->ops->set_tx_power) { result = -EOPNOTSUPP; goto out; } idx = NL80211_ATTR_WIPHY_TX_POWER_SETTING; type = nla_get_u32(info->attrs[idx]); if (!info->attrs[NL80211_ATTR_WIPHY_TX_POWER_LEVEL] && (type != NL80211_TX_POWER_AUTOMATIC)) { result = -EINVAL; goto out; } if (type != NL80211_TX_POWER_AUTOMATIC) { idx = NL80211_ATTR_WIPHY_TX_POWER_LEVEL; mbm = nla_get_u32(info->attrs[idx]); } result = rdev_set_tx_power(rdev, txp_wdev, type, mbm); if (result) goto out; } if (info->attrs[NL80211_ATTR_WIPHY_ANTENNA_TX] && info->attrs[NL80211_ATTR_WIPHY_ANTENNA_RX]) { u32 tx_ant, rx_ant; if ((!rdev->wiphy.available_antennas_tx && !rdev->wiphy.available_antennas_rx) || !rdev->ops->set_antenna) { result = -EOPNOTSUPP; goto out; } tx_ant = nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_ANTENNA_TX]); rx_ant = nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_ANTENNA_RX]); /* reject antenna configurations which don't match the * available antenna masks, except for the "all" mask */ if ((~tx_ant && (tx_ant & ~rdev->wiphy.available_antennas_tx)) || (~rx_ant && (rx_ant & ~rdev->wiphy.available_antennas_rx))) { result = -EINVAL; goto out; } tx_ant = tx_ant & rdev->wiphy.available_antennas_tx; rx_ant = rx_ant & rdev->wiphy.available_antennas_rx; result = rdev_set_antenna(rdev, tx_ant, rx_ant); if (result) goto out; } changed = 0; if (info->attrs[NL80211_ATTR_WIPHY_RETRY_SHORT]) { retry_short = nla_get_u8( info->attrs[NL80211_ATTR_WIPHY_RETRY_SHORT]); changed |= WIPHY_PARAM_RETRY_SHORT; } if (info->attrs[NL80211_ATTR_WIPHY_RETRY_LONG]) { retry_long = nla_get_u8( info->attrs[NL80211_ATTR_WIPHY_RETRY_LONG]); changed |= WIPHY_PARAM_RETRY_LONG; } if (info->attrs[NL80211_ATTR_WIPHY_FRAG_THRESHOLD]) { frag_threshold = nla_get_u32( info->attrs[NL80211_ATTR_WIPHY_FRAG_THRESHOLD]); if (frag_threshold < 256) { result = -EINVAL; goto out; } if (frag_threshold != (u32) -1) { /* * Fragments (apart from the last one) are required to * have even length. Make the fragmentation code * simpler by stripping LSB should someone try to use * odd threshold value. */ frag_threshold &= ~0x1; } changed |= WIPHY_PARAM_FRAG_THRESHOLD; } if (info->attrs[NL80211_ATTR_WIPHY_RTS_THRESHOLD]) { rts_threshold = nla_get_u32( info->attrs[NL80211_ATTR_WIPHY_RTS_THRESHOLD]); changed |= WIPHY_PARAM_RTS_THRESHOLD; } if (info->attrs[NL80211_ATTR_WIPHY_COVERAGE_CLASS]) { if (info->attrs[NL80211_ATTR_WIPHY_DYN_ACK]) { result = -EINVAL; goto out; } coverage_class = nla_get_u8( info->attrs[NL80211_ATTR_WIPHY_COVERAGE_CLASS]); changed |= WIPHY_PARAM_COVERAGE_CLASS; } if (info->attrs[NL80211_ATTR_WIPHY_DYN_ACK]) { if (!(rdev->wiphy.features & NL80211_FEATURE_ACKTO_ESTIMATION)) { result = -EOPNOTSUPP; goto out; } changed |= WIPHY_PARAM_DYN_ACK; } if (info->attrs[NL80211_ATTR_TXQ_LIMIT]) { if (!wiphy_ext_feature_isset(&rdev->wiphy, NL80211_EXT_FEATURE_TXQS)) { result = -EOPNOTSUPP; goto out; } txq_limit = nla_get_u32( info->attrs[NL80211_ATTR_TXQ_LIMIT]); changed |= WIPHY_PARAM_TXQ_LIMIT; } if (info->attrs[NL80211_ATTR_TXQ_MEMORY_LIMIT]) { if (!wiphy_ext_feature_isset(&rdev->wiphy, NL80211_EXT_FEATURE_TXQS)) { result = -EOPNOTSUPP; goto out; } txq_memory_limit = nla_get_u32( info->attrs[NL80211_ATTR_TXQ_MEMORY_LIMIT]); changed |= WIPHY_PARAM_TXQ_MEMORY_LIMIT; } if (info->attrs[NL80211_ATTR_TXQ_QUANTUM]) { if (!wiphy_ext_feature_isset(&rdev->wiphy, NL80211_EXT_FEATURE_TXQS)) { result = -EOPNOTSUPP; goto out; } txq_quantum = nla_get_u32( info->attrs[NL80211_ATTR_TXQ_QUANTUM]); changed |= WIPHY_PARAM_TXQ_QUANTUM; } if (changed) { u8 old_retry_short, old_retry_long; u32 old_frag_threshold, old_rts_threshold; u8 old_coverage_class; u32 old_txq_limit, old_txq_memory_limit, old_txq_quantum; if (!rdev->ops->set_wiphy_params) { result = -EOPNOTSUPP; goto out; } old_retry_short = rdev->wiphy.retry_short; old_retry_long = rdev->wiphy.retry_long; old_frag_threshold = rdev->wiphy.frag_threshold; old_rts_threshold = rdev->wiphy.rts_threshold; old_coverage_class = rdev->wiphy.coverage_class; old_txq_limit = rdev->wiphy.txq_limit; old_txq_memory_limit = rdev->wiphy.txq_memory_limit; old_txq_quantum = rdev->wiphy.txq_quantum; if (changed & WIPHY_PARAM_RETRY_SHORT) rdev->wiphy.retry_short = retry_short; if (changed & WIPHY_PARAM_RETRY_LONG) rdev->wiphy.retry_long = retry_long; if (changed & WIPHY_PARAM_FRAG_THRESHOLD) rdev->wiphy.frag_threshold = frag_threshold; if (changed & WIPHY_PARAM_RTS_THRESHOLD) rdev->wiphy.rts_threshold = rts_threshold; if (changed & WIPHY_PARAM_COVERAGE_CLASS) rdev->wiphy.coverage_class = coverage_class; if (changed & WIPHY_PARAM_TXQ_LIMIT) rdev->wiphy.txq_limit = txq_limit; if (changed & WIPHY_PARAM_TXQ_MEMORY_LIMIT) rdev->wiphy.txq_memory_limit = txq_memory_limit; if (changed & WIPHY_PARAM_TXQ_QUANTUM) rdev->wiphy.txq_quantum = txq_quantum; result = rdev_set_wiphy_params(rdev, changed); if (result) { rdev->wiphy.retry_short = old_retry_short; rdev->wiphy.retry_long = old_retry_long; rdev->wiphy.frag_threshold = old_frag_threshold; rdev->wiphy.rts_threshold = old_rts_threshold; rdev->wiphy.coverage_class = old_coverage_class; rdev->wiphy.txq_limit = old_txq_limit; rdev->wiphy.txq_memory_limit = old_txq_memory_limit; rdev->wiphy.txq_quantum = old_txq_quantum; goto out; } } result = 0; out: wiphy_unlock(&rdev->wiphy); return result; } int nl80211_send_chandef(struct sk_buff *msg, const struct cfg80211_chan_def *chandef) { if (WARN_ON(!cfg80211_chandef_valid(chandef))) return -EINVAL; if (nla_put_u32(msg, NL80211_ATTR_WIPHY_FREQ, chandef->chan->center_freq)) return -ENOBUFS; if (nla_put_u32(msg, NL80211_ATTR_WIPHY_FREQ_OFFSET, chandef->chan->freq_offset)) return -ENOBUFS; switch (chandef->width) { case NL80211_CHAN_WIDTH_20_NOHT: case NL80211_CHAN_WIDTH_20: case NL80211_CHAN_WIDTH_40: if (nla_put_u32(msg, NL80211_ATTR_WIPHY_CHANNEL_TYPE, cfg80211_get_chandef_type(chandef))) return -ENOBUFS; break; default: break; } if (nla_put_u32(msg, NL80211_ATTR_CHANNEL_WIDTH, chandef->width)) return -ENOBUFS; if (nla_put_u32(msg, NL80211_ATTR_CENTER_FREQ1, chandef->center_freq1)) return -ENOBUFS; if (chandef->center_freq2 && nla_put_u32(msg, NL80211_ATTR_CENTER_FREQ2, chandef->center_freq2)) return -ENOBUFS; return 0; } EXPORT_SYMBOL(nl80211_send_chandef); static int nl80211_send_iface(struct sk_buff *msg, u32 portid, u32 seq, int flags, struct cfg80211_registered_device *rdev, struct wireless_dev *wdev, enum nl80211_commands cmd) { struct net_device *dev = wdev->netdev; void *hdr; WARN_ON(cmd != NL80211_CMD_NEW_INTERFACE && cmd != NL80211_CMD_DEL_INTERFACE && cmd != NL80211_CMD_SET_INTERFACE); hdr = nl80211hdr_put(msg, portid, seq, flags, cmd); if (!hdr) return -1; if (dev && (nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex) || nla_put_string(msg, NL80211_ATTR_IFNAME, dev->name))) goto nla_put_failure; if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || nla_put_u32(msg, NL80211_ATTR_IFTYPE, wdev->iftype) || nla_put_u64_64bit(msg, NL80211_ATTR_WDEV, wdev_id(wdev), NL80211_ATTR_PAD) || nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, wdev_address(wdev)) || nla_put_u32(msg, NL80211_ATTR_GENERATION, rdev->devlist_generation ^ (cfg80211_rdev_list_generation << 2)) || nla_put_u8(msg, NL80211_ATTR_4ADDR, wdev->use_4addr)) goto nla_put_failure; if (rdev->ops->get_channel && !wdev->valid_links) { struct cfg80211_chan_def chandef = {}; int ret; ret = rdev_get_channel(rdev, wdev, 0, &chandef); if (ret == 0 && nl80211_send_chandef(msg, &chandef)) goto nla_put_failure; } if (rdev->ops->get_tx_power) { int dbm, ret; ret = rdev_get_tx_power(rdev, wdev, &dbm); if (ret == 0 && nla_put_u32(msg, NL80211_ATTR_WIPHY_TX_POWER_LEVEL, DBM_TO_MBM(dbm))) goto nla_put_failure; } wdev_lock(wdev); switch (wdev->iftype) { case NL80211_IFTYPE_AP: case NL80211_IFTYPE_P2P_GO: if (wdev->u.ap.ssid_len && nla_put(msg, NL80211_ATTR_SSID, wdev->u.ap.ssid_len, wdev->u.ap.ssid)) goto nla_put_failure_locked; break; case NL80211_IFTYPE_STATION: case NL80211_IFTYPE_P2P_CLIENT: if (wdev->u.client.ssid_len && nla_put(msg, NL80211_ATTR_SSID, wdev->u.client.ssid_len, wdev->u.client.ssid)) goto nla_put_failure_locked; break; case NL80211_IFTYPE_ADHOC: if (wdev->u.ibss.ssid_len && nla_put(msg, NL80211_ATTR_SSID, wdev->u.ibss.ssid_len, wdev->u.ibss.ssid)) goto nla_put_failure_locked; break; default: /* nothing */ break; } wdev_unlock(wdev); if (rdev->ops->get_txq_stats) { struct cfg80211_txq_stats txqstats = {}; int ret = rdev_get_txq_stats(rdev, wdev, &txqstats); if (ret == 0 && !nl80211_put_txq_stats(msg, &txqstats, NL80211_ATTR_TXQ_STATS)) goto nla_put_failure; } if (wdev->valid_links) { unsigned int link_id; struct nlattr *links = nla_nest_start(msg, NL80211_ATTR_MLO_LINKS); if (!links) goto nla_put_failure; for_each_valid_link(wdev, link_id) { struct nlattr *link = nla_nest_start(msg, link_id + 1); struct cfg80211_chan_def chandef = {}; int ret; if (!link) goto nla_put_failure; if (nla_put_u8(msg, NL80211_ATTR_MLO_LINK_ID, link_id)) goto nla_put_failure; if (nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, wdev->links[link_id].addr)) goto nla_put_failure; ret = rdev_get_channel(rdev, wdev, link_id, &chandef); if (ret == 0 && nl80211_send_chandef(msg, &chandef)) goto nla_put_failure; nla_nest_end(msg, link); } nla_nest_end(msg, links); } genlmsg_end(msg, hdr); return 0; nla_put_failure_locked: wdev_unlock(wdev); nla_put_failure: genlmsg_cancel(msg, hdr); return -EMSGSIZE; } static int nl80211_dump_interface(struct sk_buff *skb, struct netlink_callback *cb) { int wp_idx = 0; int if_idx = 0; int wp_start = cb->args[0]; int if_start = cb->args[1]; int filter_wiphy = -1; struct cfg80211_registered_device *rdev; struct wireless_dev *wdev; int ret; rtnl_lock(); if (!cb->args[2]) { struct nl80211_dump_wiphy_state state = { .filter_wiphy = -1, }; ret = nl80211_dump_wiphy_parse(skb, cb, &state); if (ret) goto out_unlock; filter_wiphy = state.filter_wiphy; /* * if filtering, set cb->args[2] to +1 since 0 is the default * value needed to determine that parsing is necessary. */ if (filter_wiphy >= 0) cb->args[2] = filter_wiphy + 1; else cb->args[2] = -1; } else if (cb->args[2] > 0) { filter_wiphy = cb->args[2] - 1; } list_for_each_entry(rdev, &cfg80211_rdev_list, list) { if (!net_eq(wiphy_net(&rdev->wiphy), sock_net(skb->sk))) continue; if (wp_idx < wp_start) { wp_idx++; continue; } if (filter_wiphy >= 0 && filter_wiphy != rdev->wiphy_idx) continue; if_idx = 0; list_for_each_entry(wdev, &rdev->wiphy.wdev_list, list) { if (if_idx < if_start) { if_idx++; continue; } if (nl80211_send_iface(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, NLM_F_MULTI, rdev, wdev, NL80211_CMD_NEW_INTERFACE) < 0) { goto out; } if_idx++; } wp_idx++; } out: cb->args[0] = wp_idx; cb->args[1] = if_idx; ret = skb->len; out_unlock: rtnl_unlock(); return ret; } static int nl80211_get_interface(struct sk_buff *skb, struct genl_info *info) { struct sk_buff *msg; struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct wireless_dev *wdev = info->user_ptr[1]; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) return -ENOMEM; if (nl80211_send_iface(msg, info->snd_portid, info->snd_seq, 0, rdev, wdev, NL80211_CMD_NEW_INTERFACE) < 0) { nlmsg_free(msg); return -ENOBUFS; } return genlmsg_reply(msg, info); } static const struct nla_policy mntr_flags_policy[NL80211_MNTR_FLAG_MAX + 1] = { [NL80211_MNTR_FLAG_FCSFAIL] = { .type = NLA_FLAG }, [NL80211_MNTR_FLAG_PLCPFAIL] = { .type = NLA_FLAG }, [NL80211_MNTR_FLAG_CONTROL] = { .type = NLA_FLAG }, [NL80211_MNTR_FLAG_OTHER_BSS] = { .type = NLA_FLAG }, [NL80211_MNTR_FLAG_COOK_FRAMES] = { .type = NLA_FLAG }, [NL80211_MNTR_FLAG_ACTIVE] = { .type = NLA_FLAG }, }; static int parse_monitor_flags(struct nlattr *nla, u32 *mntrflags) { struct nlattr *flags[NL80211_MNTR_FLAG_MAX + 1]; int flag; *mntrflags = 0; if (!nla) return -EINVAL; if (nla_parse_nested_deprecated(flags, NL80211_MNTR_FLAG_MAX, nla, mntr_flags_policy, NULL)) return -EINVAL; for (flag = 1; flag <= NL80211_MNTR_FLAG_MAX; flag++) if (flags[flag]) *mntrflags |= (1<<flag); *mntrflags |= MONITOR_FLAG_CHANGED; return 0; } static int nl80211_parse_mon_options(struct cfg80211_registered_device *rdev, enum nl80211_iftype type, struct genl_info *info, struct vif_params *params) { bool change = false; int err; if (info->attrs[NL80211_ATTR_MNTR_FLAGS]) { if (type != NL80211_IFTYPE_MONITOR) return -EINVAL; err = parse_monitor_flags(info->attrs[NL80211_ATTR_MNTR_FLAGS], &params->flags); if (err) return err; change = true; } if (params->flags & MONITOR_FLAG_ACTIVE && !(rdev->wiphy.features & NL80211_FEATURE_ACTIVE_MONITOR)) return -EOPNOTSUPP; if (info->attrs[NL80211_ATTR_MU_MIMO_GROUP_DATA]) { const u8 *mumimo_groups; u32 cap_flag = NL80211_EXT_FEATURE_MU_MIMO_AIR_SNIFFER; if (type != NL80211_IFTYPE_MONITOR) return -EINVAL; if (!wiphy_ext_feature_isset(&rdev->wiphy, cap_flag)) return -EOPNOTSUPP; mumimo_groups = nla_data(info->attrs[NL80211_ATTR_MU_MIMO_GROUP_DATA]); /* bits 0 and 63 are reserved and must be zero */ if ((mumimo_groups[0] & BIT(0)) || (mumimo_groups[VHT_MUMIMO_GROUPS_DATA_LEN - 1] & BIT(7))) return -EINVAL; params->vht_mumimo_groups = mumimo_groups; change = true; } if (info->attrs[NL80211_ATTR_MU_MIMO_FOLLOW_MAC_ADDR]) { u32 cap_flag = NL80211_EXT_FEATURE_MU_MIMO_AIR_SNIFFER; if (type != NL80211_IFTYPE_MONITOR) return -EINVAL; if (!wiphy_ext_feature_isset(&rdev->wiphy, cap_flag)) return -EOPNOTSUPP; params->vht_mumimo_follow_addr = nla_data(info->attrs[NL80211_ATTR_MU_MIMO_FOLLOW_MAC_ADDR]); change = true; } return change ? 1 : 0; } static int nl80211_valid_4addr(struct cfg80211_registered_device *rdev, struct net_device *netdev, u8 use_4addr, enum nl80211_iftype iftype) { if (!use_4addr) { if (netdev && netif_is_bridge_port(netdev)) return -EBUSY; return 0; } switch (iftype) { case NL80211_IFTYPE_AP_VLAN: if (rdev->wiphy.flags & WIPHY_FLAG_4ADDR_AP) return 0; break; case NL80211_IFTYPE_STATION: if (rdev->wiphy.flags & WIPHY_FLAG_4ADDR_STATION) return 0; break; default: break; } return -EOPNOTSUPP; } static int nl80211_set_interface(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct vif_params params; int err; enum nl80211_iftype otype, ntype; struct net_device *dev = info->user_ptr[1]; bool change = false; memset(&params, 0, sizeof(params)); otype = ntype = dev->ieee80211_ptr->iftype; if (info->attrs[NL80211_ATTR_IFTYPE]) { ntype = nla_get_u32(info->attrs[NL80211_ATTR_IFTYPE]); if (otype != ntype) change = true; } if (info->attrs[NL80211_ATTR_MESH_ID]) { struct wireless_dev *wdev = dev->ieee80211_ptr; if (ntype != NL80211_IFTYPE_MESH_POINT) return -EINVAL; if (netif_running(dev)) return -EBUSY; wdev_lock(wdev); BUILD_BUG_ON(IEEE80211_MAX_SSID_LEN != IEEE80211_MAX_MESH_ID_LEN); wdev->u.mesh.id_up_len = nla_len(info->attrs[NL80211_ATTR_MESH_ID]); memcpy(wdev->u.mesh.id, nla_data(info->attrs[NL80211_ATTR_MESH_ID]), wdev->u.mesh.id_up_len); wdev_unlock(wdev); } if (info->attrs[NL80211_ATTR_4ADDR]) { params.use_4addr = !!nla_get_u8(info->attrs[NL80211_ATTR_4ADDR]); change = true; err = nl80211_valid_4addr(rdev, dev, params.use_4addr, ntype); if (err) return err; } else { params.use_4addr = -1; } err = nl80211_parse_mon_options(rdev, ntype, info, &params); if (err < 0) return err; if (err > 0) change = true; if (change) err = cfg80211_change_iface(rdev, dev, ntype, &params); else err = 0; if (!err && params.use_4addr != -1) dev->ieee80211_ptr->use_4addr = params.use_4addr; if (change && !err) { struct wireless_dev *wdev = dev->ieee80211_ptr; nl80211_notify_iface(rdev, wdev, NL80211_CMD_SET_INTERFACE); } return err; } static int _nl80211_new_interface(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct vif_params params; struct wireless_dev *wdev; struct sk_buff *msg; int err; enum nl80211_iftype type = NL80211_IFTYPE_UNSPECIFIED; memset(&params, 0, sizeof(params)); if (!info->attrs[NL80211_ATTR_IFNAME]) return -EINVAL; if (info->attrs[NL80211_ATTR_IFTYPE]) type = nla_get_u32(info->attrs[NL80211_ATTR_IFTYPE]); if (!rdev->ops->add_virtual_intf) return -EOPNOTSUPP; if ((type == NL80211_IFTYPE_P2P_DEVICE || type == NL80211_IFTYPE_NAN || rdev->wiphy.features & NL80211_FEATURE_MAC_ON_CREATE) && info->attrs[NL80211_ATTR_MAC]) { nla_memcpy(params.macaddr, info->attrs[NL80211_ATTR_MAC], ETH_ALEN); if (!is_valid_ether_addr(params.macaddr)) return -EADDRNOTAVAIL; } if (info->attrs[NL80211_ATTR_4ADDR]) { params.use_4addr = !!nla_get_u8(info->attrs[NL80211_ATTR_4ADDR]); err = nl80211_valid_4addr(rdev, NULL, params.use_4addr, type); if (err) return err; } if (!cfg80211_iftype_allowed(&rdev->wiphy, type, params.use_4addr, 0)) return -EOPNOTSUPP; err = nl80211_parse_mon_options(rdev, type, info, &params); if (err < 0) return err; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) return -ENOMEM; wdev = rdev_add_virtual_intf(rdev, nla_data(info->attrs[NL80211_ATTR_IFNAME]), NET_NAME_USER, type, &params); if (WARN_ON(!wdev)) { nlmsg_free(msg); return -EPROTO; } else if (IS_ERR(wdev)) { nlmsg_free(msg); return PTR_ERR(wdev); } if (info->attrs[NL80211_ATTR_SOCKET_OWNER]) wdev->owner_nlportid = info->snd_portid; switch (type) { case NL80211_IFTYPE_MESH_POINT: if (!info->attrs[NL80211_ATTR_MESH_ID]) break; wdev_lock(wdev); BUILD_BUG_ON(IEEE80211_MAX_SSID_LEN != IEEE80211_MAX_MESH_ID_LEN); wdev->u.mesh.id_up_len = nla_len(info->attrs[NL80211_ATTR_MESH_ID]); memcpy(wdev->u.mesh.id, nla_data(info->attrs[NL80211_ATTR_MESH_ID]), wdev->u.mesh.id_up_len); wdev_unlock(wdev); break; case NL80211_IFTYPE_NAN: case NL80211_IFTYPE_P2P_DEVICE: /* * P2P Device and NAN do not have a netdev, so don't go * through the netdev notifier and must be added here */ cfg80211_init_wdev(wdev); cfg80211_register_wdev(rdev, wdev); break; default: break; } if (nl80211_send_iface(msg, info->snd_portid, info->snd_seq, 0, rdev, wdev, NL80211_CMD_NEW_INTERFACE) < 0) { nlmsg_free(msg); return -ENOBUFS; } return genlmsg_reply(msg, info); } static int nl80211_new_interface(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; int ret; /* to avoid failing a new interface creation due to pending removal */ cfg80211_destroy_ifaces(rdev); wiphy_lock(&rdev->wiphy); ret = _nl80211_new_interface(skb, info); wiphy_unlock(&rdev->wiphy); return ret; } static int nl80211_del_interface(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct wireless_dev *wdev = info->user_ptr[1]; if (!rdev->ops->del_virtual_intf) return -EOPNOTSUPP; /* * We hold RTNL, so this is safe, without RTNL opencount cannot * reach 0, and thus the rdev cannot be deleted. * * We need to do it for the dev_close(), since that will call * the netdev notifiers, and we need to acquire the mutex there * but don't know if we get there from here or from some other * place (e.g. "ip link set ... down"). */ mutex_unlock(&rdev->wiphy.mtx); /* * If we remove a wireless device without a netdev then clear * user_ptr[1] so that nl80211_post_doit won't dereference it * to check if it needs to do dev_put(). Otherwise it crashes * since the wdev has been freed, unlike with a netdev where * we need the dev_put() for the netdev to really be freed. */ if (!wdev->netdev) info->user_ptr[1] = NULL; else dev_close(wdev->netdev); mutex_lock(&rdev->wiphy.mtx); return cfg80211_remove_virtual_intf(rdev, wdev); } static int nl80211_set_noack_map(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct net_device *dev = info->user_ptr[1]; u16 noack_map; if (!info->attrs[NL80211_ATTR_NOACK_MAP]) return -EINVAL; if (!rdev->ops->set_noack_map) return -EOPNOTSUPP; noack_map = nla_get_u16(info->attrs[NL80211_ATTR_NOACK_MAP]); return rdev_set_noack_map(rdev, dev, noack_map); } static int nl80211_validate_key_link_id(struct genl_info *info, struct wireless_dev *wdev, int link_id, bool pairwise) { if (pairwise) { if (link_id != -1) { GENL_SET_ERR_MSG(info, "link ID not allowed for pairwise key"); return -EINVAL; } return 0; } if (wdev->valid_links) { if (link_id == -1) { GENL_SET_ERR_MSG(info, "link ID must for MLO group key"); return -EINVAL; } if (!(wdev->valid_links & BIT(link_id))) { GENL_SET_ERR_MSG(info, "invalid link ID for MLO group key"); return -EINVAL; } } else if (link_id != -1) { GENL_SET_ERR_MSG(info, "link ID not allowed for non-MLO group key"); return -EINVAL; } return 0; } struct get_key_cookie { struct sk_buff *msg; int error; int idx; }; static void get_key_callback(void *c, struct key_params *params) { struct nlattr *key; struct get_key_cookie *cookie = c; if ((params->key && nla_put(cookie->msg, NL80211_ATTR_KEY_DATA, params->key_len, params->key)) || (params->seq && nla_put(cookie->msg, NL80211_ATTR_KEY_SEQ, params->seq_len, params->seq)) || (params->cipher && nla_put_u32(cookie->msg, NL80211_ATTR_KEY_CIPHER, params->cipher))) goto nla_put_failure; key = nla_nest_start_noflag(cookie->msg, NL80211_ATTR_KEY); if (!key) goto nla_put_failure; if ((params->key && nla_put(cookie->msg, NL80211_KEY_DATA, params->key_len, params->key)) || (params->seq && nla_put(cookie->msg, NL80211_KEY_SEQ, params->seq_len, params->seq)) || (params->cipher && nla_put_u32(cookie->msg, NL80211_KEY_CIPHER, params->cipher))) goto nla_put_failure; if (nla_put_u8(cookie->msg, NL80211_KEY_IDX, cookie->idx)) goto nla_put_failure; nla_nest_end(cookie->msg, key); return; nla_put_failure: cookie->error = 1; } static int nl80211_get_key(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; int err; struct net_device *dev = info->user_ptr[1]; u8 key_idx = 0; const u8 *mac_addr = NULL; bool pairwise; struct get_key_cookie cookie = { .error = 0, }; void *hdr; struct sk_buff *msg; bool bigtk_support = false; int link_id = nl80211_link_id_or_invalid(info->attrs); struct wireless_dev *wdev = dev->ieee80211_ptr; if (wiphy_ext_feature_isset(&rdev->wiphy, NL80211_EXT_FEATURE_BEACON_PROTECTION)) bigtk_support = true; if ((wdev->iftype == NL80211_IFTYPE_STATION || wdev->iftype == NL80211_IFTYPE_P2P_CLIENT) && wiphy_ext_feature_isset(&rdev->wiphy, NL80211_EXT_FEATURE_BEACON_PROTECTION_CLIENT)) bigtk_support = true; if (info->attrs[NL80211_ATTR_KEY_IDX]) { key_idx = nla_get_u8(info->attrs[NL80211_ATTR_KEY_IDX]); if (key_idx >= 6 && key_idx <= 7 && !bigtk_support) { GENL_SET_ERR_MSG(info, "BIGTK not supported"); return -EINVAL; } } if (info->attrs[NL80211_ATTR_MAC]) mac_addr = nla_data(info->attrs[NL80211_ATTR_MAC]); pairwise = !!mac_addr; if (info->attrs[NL80211_ATTR_KEY_TYPE]) { u32 kt = nla_get_u32(info->attrs[NL80211_ATTR_KEY_TYPE]); if (kt != NL80211_KEYTYPE_GROUP && kt != NL80211_KEYTYPE_PAIRWISE) return -EINVAL; pairwise = kt == NL80211_KEYTYPE_PAIRWISE; } if (!rdev->ops->get_key) return -EOPNOTSUPP; if (!pairwise && mac_addr && !(rdev->wiphy.flags & WIPHY_FLAG_IBSS_RSN)) return -ENOENT; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) return -ENOMEM; hdr = nl80211hdr_put(msg, info->snd_portid, info->snd_seq, 0, NL80211_CMD_NEW_KEY); if (!hdr) goto nla_put_failure; cookie.msg = msg; cookie.idx = key_idx; if (nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex) || nla_put_u8(msg, NL80211_ATTR_KEY_IDX, key_idx)) goto nla_put_failure; if (mac_addr && nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, mac_addr)) goto nla_put_failure; err = nl80211_validate_key_link_id(info, wdev, link_id, pairwise); if (err) goto free_msg; err = rdev_get_key(rdev, dev, link_id, key_idx, pairwise, mac_addr, &cookie, get_key_callback); if (err) goto free_msg; if (cookie.error) goto nla_put_failure; genlmsg_end(msg, hdr); return genlmsg_reply(msg, info); nla_put_failure: err = -ENOBUFS; free_msg: nlmsg_free(msg); return err; } static int nl80211_set_key(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct key_parse key; int err; struct net_device *dev = info->user_ptr[1]; int link_id = nl80211_link_id_or_invalid(info->attrs); struct wireless_dev *wdev = dev->ieee80211_ptr; err = nl80211_parse_key(info, &key); if (err) return err; if (key.idx < 0) return -EINVAL; /* Only support setting default key and * Extended Key ID action NL80211_KEY_SET_TX. */ if (!key.def && !key.defmgmt && !key.defbeacon && !(key.p.mode == NL80211_KEY_SET_TX)) return -EINVAL; wdev_lock(wdev); if (key.def) { if (!rdev->ops->set_default_key) { err = -EOPNOTSUPP; goto out; } err = nl80211_key_allowed(wdev); if (err) goto out; err = nl80211_validate_key_link_id(info, wdev, link_id, false); if (err) goto out; err = rdev_set_default_key(rdev, dev, link_id, key.idx, key.def_uni, key.def_multi); if (err) goto out; #ifdef CONFIG_CFG80211_WEXT wdev->wext.default_key = key.idx; #endif } else if (key.defmgmt) { if (key.def_uni || !key.def_multi) { err = -EINVAL; goto out; } if (!rdev->ops->set_default_mgmt_key) { err = -EOPNOTSUPP; goto out; } err = nl80211_key_allowed(wdev); if (err) goto out; err = nl80211_validate_key_link_id(info, wdev, link_id, false); if (err) goto out; err = rdev_set_default_mgmt_key(rdev, dev, link_id, key.idx); if (err) goto out; #ifdef CONFIG_CFG80211_WEXT wdev->wext.default_mgmt_key = key.idx; #endif } else if (key.defbeacon) { if (key.def_uni || !key.def_multi) { err = -EINVAL; goto out; } if (!rdev->ops->set_default_beacon_key) { err = -EOPNOTSUPP; goto out; } err = nl80211_key_allowed(wdev); if (err) goto out; err = nl80211_validate_key_link_id(info, wdev, link_id, false); if (err) goto out; err = rdev_set_default_beacon_key(rdev, dev, link_id, key.idx); if (err) goto out; } else if (key.p.mode == NL80211_KEY_SET_TX && wiphy_ext_feature_isset(&rdev->wiphy, NL80211_EXT_FEATURE_EXT_KEY_ID)) { u8 *mac_addr = NULL; if (info->attrs[NL80211_ATTR_MAC]) mac_addr = nla_data(info->attrs[NL80211_ATTR_MAC]); if (!mac_addr || key.idx < 0 || key.idx > 1) { err = -EINVAL; goto out; } err = nl80211_validate_key_link_id(info, wdev, link_id, true); if (err) goto out; err = rdev_add_key(rdev, dev, link_id, key.idx, NL80211_KEYTYPE_PAIRWISE, mac_addr, &key.p); } else { err = -EINVAL; } out: wdev_unlock(wdev); return err; } static int nl80211_new_key(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; int err; struct net_device *dev = info->user_ptr[1]; struct key_parse key; const u8 *mac_addr = NULL; int link_id = nl80211_link_id_or_invalid(info->attrs); struct wireless_dev *wdev = dev->ieee80211_ptr; err = nl80211_parse_key(info, &key); if (err) return err; if (!key.p.key) { GENL_SET_ERR_MSG(info, "no key"); return -EINVAL; } if (info->attrs[NL80211_ATTR_MAC]) mac_addr = nla_data(info->attrs[NL80211_ATTR_MAC]); if (key.type == -1) { if (mac_addr) key.type = NL80211_KEYTYPE_PAIRWISE; else key.type = NL80211_KEYTYPE_GROUP; } /* for now */ if (key.type != NL80211_KEYTYPE_PAIRWISE && key.type != NL80211_KEYTYPE_GROUP) { GENL_SET_ERR_MSG(info, "key type not pairwise or group"); return -EINVAL; } if (key.type == NL80211_KEYTYPE_GROUP && info->attrs[NL80211_ATTR_VLAN_ID]) key.p.vlan_id = nla_get_u16(info->attrs[NL80211_ATTR_VLAN_ID]); if (!rdev->ops->add_key) return -EOPNOTSUPP; if (cfg80211_validate_key_settings(rdev, &key.p, key.idx, key.type == NL80211_KEYTYPE_PAIRWISE, mac_addr)) { GENL_SET_ERR_MSG(info, "key setting validation failed"); return -EINVAL; } wdev_lock(wdev); err = nl80211_key_allowed(wdev); if (err) GENL_SET_ERR_MSG(info, "key not allowed"); if (!err) err = nl80211_validate_key_link_id(info, wdev, link_id, key.type == NL80211_KEYTYPE_PAIRWISE); if (!err) { err = rdev_add_key(rdev, dev, link_id, key.idx, key.type == NL80211_KEYTYPE_PAIRWISE, mac_addr, &key.p); if (err) GENL_SET_ERR_MSG(info, "key addition failed"); } wdev_unlock(wdev); return err; } static int nl80211_del_key(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; int err; struct net_device *dev = info->user_ptr[1]; u8 *mac_addr = NULL; struct key_parse key; int link_id = nl80211_link_id_or_invalid(info->attrs); struct wireless_dev *wdev = dev->ieee80211_ptr; err = nl80211_parse_key(info, &key); if (err) return err; if (info->attrs[NL80211_ATTR_MAC]) mac_addr = nla_data(info->attrs[NL80211_ATTR_MAC]); if (key.type == -1) { if (mac_addr) key.type = NL80211_KEYTYPE_PAIRWISE; else key.type = NL80211_KEYTYPE_GROUP; } /* for now */ if (key.type != NL80211_KEYTYPE_PAIRWISE && key.type != NL80211_KEYTYPE_GROUP) return -EINVAL; if (!cfg80211_valid_key_idx(rdev, key.idx, key.type == NL80211_KEYTYPE_PAIRWISE)) return -EINVAL; if (!rdev->ops->del_key) return -EOPNOTSUPP; wdev_lock(wdev); err = nl80211_key_allowed(wdev); if (key.type == NL80211_KEYTYPE_GROUP && mac_addr && !(rdev->wiphy.flags & WIPHY_FLAG_IBSS_RSN)) err = -ENOENT; if (!err) err = nl80211_validate_key_link_id(info, wdev, link_id, key.type == NL80211_KEYTYPE_PAIRWISE); if (!err) err = rdev_del_key(rdev, dev, link_id, key.idx, key.type == NL80211_KEYTYPE_PAIRWISE, mac_addr); #ifdef CONFIG_CFG80211_WEXT if (!err) { if (key.idx == wdev->wext.default_key) wdev->wext.default_key = -1; else if (key.idx == wdev->wext.default_mgmt_key) wdev->wext.default_mgmt_key = -1; } #endif wdev_unlock(wdev); return err; } /* This function returns an error or the number of nested attributes */ static int validate_acl_mac_addrs(struct nlattr *nl_attr) { struct nlattr *attr; int n_entries = 0, tmp; nla_for_each_nested(attr, nl_attr, tmp) { if (nla_len(attr) != ETH_ALEN) return -EINVAL; n_entries++; } return n_entries; } /* * This function parses ACL information and allocates memory for ACL data. * On successful return, the calling function is responsible to free the * ACL buffer returned by this function. */ static struct cfg80211_acl_data *parse_acl_data(struct wiphy *wiphy, struct genl_info *info) { enum nl80211_acl_policy acl_policy; struct nlattr *attr; struct cfg80211_acl_data *acl; int i = 0, n_entries, tmp; if (!wiphy->max_acl_mac_addrs) return ERR_PTR(-EOPNOTSUPP); if (!info->attrs[NL80211_ATTR_ACL_POLICY]) return ERR_PTR(-EINVAL); acl_policy = nla_get_u32(info->attrs[NL80211_ATTR_ACL_POLICY]); if (acl_policy != NL80211_ACL_POLICY_ACCEPT_UNLESS_LISTED && acl_policy != NL80211_ACL_POLICY_DENY_UNLESS_LISTED) return ERR_PTR(-EINVAL); if (!info->attrs[NL80211_ATTR_MAC_ADDRS]) return ERR_PTR(-EINVAL); n_entries = validate_acl_mac_addrs(info->attrs[NL80211_ATTR_MAC_ADDRS]); if (n_entries < 0) return ERR_PTR(n_entries); if (n_entries > wiphy->max_acl_mac_addrs) return ERR_PTR(-ENOTSUPP); acl = kzalloc(struct_size(acl, mac_addrs, n_entries), GFP_KERNEL); if (!acl) return ERR_PTR(-ENOMEM); acl->n_acl_entries = n_entries; nla_for_each_nested(attr, info->attrs[NL80211_ATTR_MAC_ADDRS], tmp) { memcpy(acl->mac_addrs[i].addr, nla_data(attr), ETH_ALEN); i++; } acl->acl_policy = acl_policy; return acl; } static int nl80211_set_mac_acl(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct net_device *dev = info->user_ptr[1]; struct cfg80211_acl_data *acl; int err; if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP && dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_GO) return -EOPNOTSUPP; if (!dev->ieee80211_ptr->links[0].ap.beacon_interval) return -EINVAL; acl = parse_acl_data(&rdev->wiphy, info); if (IS_ERR(acl)) return PTR_ERR(acl); err = rdev_set_mac_acl(rdev, dev, acl); kfree(acl); return err; } static u32 rateset_to_mask(struct ieee80211_supported_band *sband, u8 *rates, u8 rates_len) { u8 i; u32 mask = 0; for (i = 0; i < rates_len; i++) { int rate = (rates[i] & 0x7f) * 5; int ridx; for (ridx = 0; ridx < sband->n_bitrates; ridx++) { struct ieee80211_rate *srate = &sband->bitrates[ridx]; if (rate == srate->bitrate) { mask |= 1 << ridx; break; } } if (ridx == sband->n_bitrates) return 0; /* rate not found */ } return mask; } static bool ht_rateset_to_mask(struct ieee80211_supported_band *sband, u8 *rates, u8 rates_len, u8 mcs[IEEE80211_HT_MCS_MASK_LEN]) { u8 i; memset(mcs, 0, IEEE80211_HT_MCS_MASK_LEN); for (i = 0; i < rates_len; i++) { int ridx, rbit; ridx = rates[i] / 8; rbit = BIT(rates[i] % 8); /* check validity */ if ((ridx < 0) || (ridx >= IEEE80211_HT_MCS_MASK_LEN)) return false; /* check availability */ ridx = array_index_nospec(ridx, IEEE80211_HT_MCS_MASK_LEN); if (sband->ht_cap.mcs.rx_mask[ridx] & rbit) mcs[ridx] |= rbit; else return false; } return true; } static u16 vht_mcs_map_to_mcs_mask(u8 vht_mcs_map) { u16 mcs_mask = 0; switch (vht_mcs_map) { case IEEE80211_VHT_MCS_NOT_SUPPORTED: break; case IEEE80211_VHT_MCS_SUPPORT_0_7: mcs_mask = 0x00FF; break; case IEEE80211_VHT_MCS_SUPPORT_0_8: mcs_mask = 0x01FF; break; case IEEE80211_VHT_MCS_SUPPORT_0_9: mcs_mask = 0x03FF; break; default: break; } return mcs_mask; } static void vht_build_mcs_mask(u16 vht_mcs_map, u16 vht_mcs_mask[NL80211_VHT_NSS_MAX]) { u8 nss; for (nss = 0; nss < NL80211_VHT_NSS_MAX; nss++) { vht_mcs_mask[nss] = vht_mcs_map_to_mcs_mask(vht_mcs_map & 0x03); vht_mcs_map >>= 2; } } static bool vht_set_mcs_mask(struct ieee80211_supported_band *sband, struct nl80211_txrate_vht *txrate, u16 mcs[NL80211_VHT_NSS_MAX]) { u16 tx_mcs_map = le16_to_cpu(sband->vht_cap.vht_mcs.tx_mcs_map); u16 tx_mcs_mask[NL80211_VHT_NSS_MAX] = {}; u8 i; if (!sband->vht_cap.vht_supported) return false; memset(mcs, 0, sizeof(u16) * NL80211_VHT_NSS_MAX); /* Build vht_mcs_mask from VHT capabilities */ vht_build_mcs_mask(tx_mcs_map, tx_mcs_mask); for (i = 0; i < NL80211_VHT_NSS_MAX; i++) { if ((tx_mcs_mask[i] & txrate->mcs[i]) == txrate->mcs[i]) mcs[i] = txrate->mcs[i]; else return false; } return true; } static u16 he_mcs_map_to_mcs_mask(u8 he_mcs_map) { switch (he_mcs_map) { case IEEE80211_HE_MCS_NOT_SUPPORTED: return 0; case IEEE80211_HE_MCS_SUPPORT_0_7: return 0x00FF; case IEEE80211_HE_MCS_SUPPORT_0_9: return 0x03FF; case IEEE80211_HE_MCS_SUPPORT_0_11: return 0xFFF; default: break; } return 0; } static void he_build_mcs_mask(u16 he_mcs_map, u16 he_mcs_mask[NL80211_HE_NSS_MAX]) { u8 nss; for (nss = 0; nss < NL80211_HE_NSS_MAX; nss++) { he_mcs_mask[nss] = he_mcs_map_to_mcs_mask(he_mcs_map & 0x03); he_mcs_map >>= 2; } } static u16 he_get_txmcsmap(struct genl_info *info, unsigned int link_id, const struct ieee80211_sta_he_cap *he_cap) { struct net_device *dev = info->user_ptr[1]; struct wireless_dev *wdev = dev->ieee80211_ptr; struct cfg80211_chan_def *chandef; __le16 tx_mcs; chandef = wdev_chandef(wdev, link_id); if (!chandef) { /* * This is probably broken, but we never maintained * a chandef in these cases, so it always was. */ return le16_to_cpu(he_cap->he_mcs_nss_supp.tx_mcs_80); } switch (chandef->width) { case NL80211_CHAN_WIDTH_80P80: tx_mcs = he_cap->he_mcs_nss_supp.tx_mcs_80p80; break; case NL80211_CHAN_WIDTH_160: tx_mcs = he_cap->he_mcs_nss_supp.tx_mcs_160; break; default: tx_mcs = he_cap->he_mcs_nss_supp.tx_mcs_80; break; } return le16_to_cpu(tx_mcs); } static bool he_set_mcs_mask(struct genl_info *info, struct wireless_dev *wdev, struct ieee80211_supported_band *sband, struct nl80211_txrate_he *txrate, u16 mcs[NL80211_HE_NSS_MAX], unsigned int link_id) { const struct ieee80211_sta_he_cap *he_cap; u16 tx_mcs_mask[NL80211_HE_NSS_MAX] = {}; u16 tx_mcs_map = 0; u8 i; he_cap = ieee80211_get_he_iftype_cap(sband, wdev->iftype); if (!he_cap) return false; memset(mcs, 0, sizeof(u16) * NL80211_HE_NSS_MAX); tx_mcs_map = he_get_txmcsmap(info, link_id, he_cap); /* Build he_mcs_mask from HE capabilities */ he_build_mcs_mask(tx_mcs_map, tx_mcs_mask); for (i = 0; i < NL80211_HE_NSS_MAX; i++) { if ((tx_mcs_mask[i] & txrate->mcs[i]) == txrate->mcs[i]) mcs[i] = txrate->mcs[i]; else return false; } return true; } static int nl80211_parse_tx_bitrate_mask(struct genl_info *info, struct nlattr *attrs[], enum nl80211_attrs attr, struct cfg80211_bitrate_mask *mask, struct net_device *dev, bool default_all_enabled, unsigned int link_id) { struct nlattr *tb[NL80211_TXRATE_MAX + 1]; struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct wireless_dev *wdev = dev->ieee80211_ptr; int rem, i; struct nlattr *tx_rates; struct ieee80211_supported_band *sband; u16 vht_tx_mcs_map, he_tx_mcs_map; memset(mask, 0, sizeof(*mask)); /* Default to all rates enabled */ for (i = 0; i < NUM_NL80211_BANDS; i++) { const struct ieee80211_sta_he_cap *he_cap; if (!default_all_enabled) break; sband = rdev->wiphy.bands[i]; if (!sband) continue; mask->control[i].legacy = (1 << sband->n_bitrates) - 1; memcpy(mask->control[i].ht_mcs, sband->ht_cap.mcs.rx_mask, sizeof(mask->control[i].ht_mcs)); if (sband->vht_cap.vht_supported) { vht_tx_mcs_map = le16_to_cpu(sband->vht_cap.vht_mcs.tx_mcs_map); vht_build_mcs_mask(vht_tx_mcs_map, mask->control[i].vht_mcs); } he_cap = ieee80211_get_he_iftype_cap(sband, wdev->iftype); if (!he_cap) continue; he_tx_mcs_map = he_get_txmcsmap(info, link_id, he_cap); he_build_mcs_mask(he_tx_mcs_map, mask->control[i].he_mcs); mask->control[i].he_gi = 0xFF; mask->control[i].he_ltf = 0xFF; } /* if no rates are given set it back to the defaults */ if (!attrs[attr]) goto out; /* The nested attribute uses enum nl80211_band as the index. This maps * directly to the enum nl80211_band values used in cfg80211. */ BUILD_BUG_ON(NL80211_MAX_SUPP_HT_RATES > IEEE80211_HT_MCS_MASK_LEN * 8); nla_for_each_nested(tx_rates, attrs[attr], rem) { enum nl80211_band band = nla_type(tx_rates); int err; if (band < 0 || band >= NUM_NL80211_BANDS) return -EINVAL; sband = rdev->wiphy.bands[band]; if (sband == NULL) return -EINVAL; err = nla_parse_nested_deprecated(tb, NL80211_TXRATE_MAX, tx_rates, nl80211_txattr_policy, info->extack); if (err) return err; if (tb[NL80211_TXRATE_LEGACY]) { mask->control[band].legacy = rateset_to_mask( sband, nla_data(tb[NL80211_TXRATE_LEGACY]), nla_len(tb[NL80211_TXRATE_LEGACY])); if ((mask->control[band].legacy == 0) && nla_len(tb[NL80211_TXRATE_LEGACY])) return -EINVAL; } if (tb[NL80211_TXRATE_HT]) { if (!ht_rateset_to_mask( sband, nla_data(tb[NL80211_TXRATE_HT]), nla_len(tb[NL80211_TXRATE_HT]), mask->control[band].ht_mcs)) return -EINVAL; } if (tb[NL80211_TXRATE_VHT]) { if (!vht_set_mcs_mask( sband, nla_data(tb[NL80211_TXRATE_VHT]), mask->control[band].vht_mcs)) return -EINVAL; } if (tb[NL80211_TXRATE_GI]) { mask->control[band].gi = nla_get_u8(tb[NL80211_TXRATE_GI]); if (mask->control[band].gi > NL80211_TXRATE_FORCE_LGI) return -EINVAL; } if (tb[NL80211_TXRATE_HE] && !he_set_mcs_mask(info, wdev, sband, nla_data(tb[NL80211_TXRATE_HE]), mask->control[band].he_mcs, link_id)) return -EINVAL; if (tb[NL80211_TXRATE_HE_GI]) mask->control[band].he_gi = nla_get_u8(tb[NL80211_TXRATE_HE_GI]); if (tb[NL80211_TXRATE_HE_LTF]) mask->control[band].he_ltf = nla_get_u8(tb[NL80211_TXRATE_HE_LTF]); if (mask->control[band].legacy == 0) { /* don't allow empty legacy rates if HT, VHT or HE * are not even supported. */ if (!(rdev->wiphy.bands[band]->ht_cap.ht_supported || rdev->wiphy.bands[band]->vht_cap.vht_supported || ieee80211_get_he_iftype_cap(sband, wdev->iftype))) return -EINVAL; for (i = 0; i < IEEE80211_HT_MCS_MASK_LEN; i++) if (mask->control[band].ht_mcs[i]) goto out; for (i = 0; i < NL80211_VHT_NSS_MAX; i++) if (mask->control[band].vht_mcs[i]) goto out; for (i = 0; i < NL80211_HE_NSS_MAX; i++) if (mask->control[band].he_mcs[i]) goto out; /* legacy and mcs rates may not be both empty */ return -EINVAL; } } out: return 0; } static int validate_beacon_tx_rate(struct cfg80211_registered_device *rdev, enum nl80211_band band, struct cfg80211_bitrate_mask *beacon_rate) { u32 count_ht, count_vht, count_he, i; u32 rate = beacon_rate->control[band].legacy; /* Allow only one rate */ if (hweight32(rate) > 1) return -EINVAL; count_ht = 0; for (i = 0; i < IEEE80211_HT_MCS_MASK_LEN; i++) { if (hweight8(beacon_rate->control[band].ht_mcs[i]) > 1) { return -EINVAL; } else if (beacon_rate->control[band].ht_mcs[i]) { count_ht++; if (count_ht > 1) return -EINVAL; } if (count_ht && rate) return -EINVAL; } count_vht = 0; for (i = 0; i < NL80211_VHT_NSS_MAX; i++) { if (hweight16(beacon_rate->control[band].vht_mcs[i]) > 1) { return -EINVAL; } else if (beacon_rate->control[band].vht_mcs[i]) { count_vht++; if (count_vht > 1) return -EINVAL; } if (count_vht && rate) return -EINVAL; } count_he = 0; for (i = 0; i < NL80211_HE_NSS_MAX; i++) { if (hweight16(beacon_rate->control[band].he_mcs[i]) > 1) { return -EINVAL; } else if (beacon_rate->control[band].he_mcs[i]) { count_he++; if (count_he > 1) return -EINVAL; } if (count_he && rate) return -EINVAL; } if ((count_ht && count_vht && count_he) || (!rate && !count_ht && !count_vht && !count_he)) return -EINVAL; if (rate && !wiphy_ext_feature_isset(&rdev->wiphy, NL80211_EXT_FEATURE_BEACON_RATE_LEGACY)) return -EINVAL; if (count_ht && !wiphy_ext_feature_isset(&rdev->wiphy, NL80211_EXT_FEATURE_BEACON_RATE_HT)) return -EINVAL; if (count_vht && !wiphy_ext_feature_isset(&rdev->wiphy, NL80211_EXT_FEATURE_BEACON_RATE_VHT)) return -EINVAL; if (count_he && !wiphy_ext_feature_isset(&rdev->wiphy, NL80211_EXT_FEATURE_BEACON_RATE_HE)) return -EINVAL; return 0; } static int nl80211_parse_mbssid_config(struct wiphy *wiphy, struct net_device *dev, struct nlattr *attrs, struct cfg80211_mbssid_config *config, u8 num_elems) { struct nlattr *tb[NL80211_MBSSID_CONFIG_ATTR_MAX + 1]; if (!wiphy->mbssid_max_interfaces) return -EOPNOTSUPP; if (nla_parse_nested(tb, NL80211_MBSSID_CONFIG_ATTR_MAX, attrs, NULL, NULL) || !tb[NL80211_MBSSID_CONFIG_ATTR_INDEX]) return -EINVAL; config->ema = nla_get_flag(tb[NL80211_MBSSID_CONFIG_ATTR_EMA]); if (config->ema) { if (!wiphy->ema_max_profile_periodicity) return -EOPNOTSUPP; if (num_elems > wiphy->ema_max_profile_periodicity) return -EINVAL; } config->index = nla_get_u8(tb[NL80211_MBSSID_CONFIG_ATTR_INDEX]); if (config->index >= wiphy->mbssid_max_interfaces || (!config->index && !num_elems)) return -EINVAL; if (tb[NL80211_MBSSID_CONFIG_ATTR_TX_IFINDEX]) { u32 tx_ifindex = nla_get_u32(tb[NL80211_MBSSID_CONFIG_ATTR_TX_IFINDEX]); if ((!config->index && tx_ifindex != dev->ifindex) || (config->index && tx_ifindex == dev->ifindex)) return -EINVAL; if (tx_ifindex != dev->ifindex) { struct net_device *tx_netdev = dev_get_by_index(wiphy_net(wiphy), tx_ifindex); if (!tx_netdev || !tx_netdev->ieee80211_ptr || tx_netdev->ieee80211_ptr->wiphy != wiphy || tx_netdev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP) { dev_put(tx_netdev); return -EINVAL; } config->tx_wdev = tx_netdev->ieee80211_ptr; } else { config->tx_wdev = dev->ieee80211_ptr; } } else if (!config->index) { config->tx_wdev = dev->ieee80211_ptr; } else { return -EINVAL; } return 0; } static struct cfg80211_mbssid_elems * nl80211_parse_mbssid_elems(struct wiphy *wiphy, struct nlattr *attrs) { struct nlattr *nl_elems; struct cfg80211_mbssid_elems *elems; int rem_elems; u8 i = 0, num_elems = 0; if (!wiphy->mbssid_max_interfaces) return ERR_PTR(-EINVAL); nla_for_each_nested(nl_elems, attrs, rem_elems) { if (num_elems >= 255) return ERR_PTR(-EINVAL); num_elems++; } elems = kzalloc(struct_size(elems, elem, num_elems), GFP_KERNEL); if (!elems) return ERR_PTR(-ENOMEM); elems->cnt = num_elems; nla_for_each_nested(nl_elems, attrs, rem_elems) { elems->elem[i].data = nla_data(nl_elems); elems->elem[i].len = nla_len(nl_elems); i++; } return elems; } static struct cfg80211_rnr_elems * nl80211_parse_rnr_elems(struct wiphy *wiphy, struct nlattr *attrs, struct netlink_ext_ack *extack) { struct nlattr *nl_elems; struct cfg80211_rnr_elems *elems; int rem_elems; u8 i = 0, num_elems = 0; nla_for_each_nested(nl_elems, attrs, rem_elems) { int ret; ret = validate_ie_attr(nl_elems, extack); if (ret) return ERR_PTR(ret); num_elems++; } elems = kzalloc(struct_size(elems, elem, num_elems), GFP_KERNEL); if (!elems) return ERR_PTR(-ENOMEM); elems->cnt = num_elems; nla_for_each_nested(nl_elems, attrs, rem_elems) { elems->elem[i].data = nla_data(nl_elems); elems->elem[i].len = nla_len(nl_elems); i++; } return elems; } static int nl80211_parse_he_bss_color(struct nlattr *attrs, struct cfg80211_he_bss_color *he_bss_color) { struct nlattr *tb[NL80211_HE_BSS_COLOR_ATTR_MAX + 1]; int err; err = nla_parse_nested(tb, NL80211_HE_BSS_COLOR_ATTR_MAX, attrs, he_bss_color_policy, NULL); if (err) return err; if (!tb[NL80211_HE_BSS_COLOR_ATTR_COLOR]) return -EINVAL; he_bss_color->color = nla_get_u8(tb[NL80211_HE_BSS_COLOR_ATTR_COLOR]); he_bss_color->enabled = !nla_get_flag(tb[NL80211_HE_BSS_COLOR_ATTR_DISABLED]); he_bss_color->partial = nla_get_flag(tb[NL80211_HE_BSS_COLOR_ATTR_PARTIAL]); return 0; } static int nl80211_parse_beacon(struct cfg80211_registered_device *rdev, struct nlattr *attrs[], struct cfg80211_beacon_data *bcn, struct netlink_ext_ack *extack) { bool haveinfo = false; int err; memset(bcn, 0, sizeof(*bcn)); bcn->link_id = nl80211_link_id(attrs); if (attrs[NL80211_ATTR_BEACON_HEAD]) { bcn->head = nla_data(attrs[NL80211_ATTR_BEACON_HEAD]); bcn->head_len = nla_len(attrs[NL80211_ATTR_BEACON_HEAD]); if (!bcn->head_len) return -EINVAL; haveinfo = true; } if (attrs[NL80211_ATTR_BEACON_TAIL]) { bcn->tail = nla_data(attrs[NL80211_ATTR_BEACON_TAIL]); bcn->tail_len = nla_len(attrs[NL80211_ATTR_BEACON_TAIL]); haveinfo = true; } if (!haveinfo) return -EINVAL; if (attrs[NL80211_ATTR_IE]) { bcn->beacon_ies = nla_data(attrs[NL80211_ATTR_IE]); bcn->beacon_ies_len = nla_len(attrs[NL80211_ATTR_IE]); } if (attrs[NL80211_ATTR_IE_PROBE_RESP]) { bcn->proberesp_ies = nla_data(attrs[NL80211_ATTR_IE_PROBE_RESP]); bcn->proberesp_ies_len = nla_len(attrs[NL80211_ATTR_IE_PROBE_RESP]); } if (attrs[NL80211_ATTR_IE_ASSOC_RESP]) { bcn->assocresp_ies = nla_data(attrs[NL80211_ATTR_IE_ASSOC_RESP]); bcn->assocresp_ies_len = nla_len(attrs[NL80211_ATTR_IE_ASSOC_RESP]); } if (attrs[NL80211_ATTR_PROBE_RESP]) { bcn->probe_resp = nla_data(attrs[NL80211_ATTR_PROBE_RESP]); bcn->probe_resp_len = nla_len(attrs[NL80211_ATTR_PROBE_RESP]); } if (attrs[NL80211_ATTR_FTM_RESPONDER]) { struct nlattr *tb[NL80211_FTM_RESP_ATTR_MAX + 1]; err = nla_parse_nested_deprecated(tb, NL80211_FTM_RESP_ATTR_MAX, attrs[NL80211_ATTR_FTM_RESPONDER], NULL, NULL); if (err) return err; if (tb[NL80211_FTM_RESP_ATTR_ENABLED] && wiphy_ext_feature_isset(&rdev->wiphy, NL80211_EXT_FEATURE_ENABLE_FTM_RESPONDER)) bcn->ftm_responder = 1; else return -EOPNOTSUPP; if (tb[NL80211_FTM_RESP_ATTR_LCI]) { bcn->lci = nla_data(tb[NL80211_FTM_RESP_ATTR_LCI]); bcn->lci_len = nla_len(tb[NL80211_FTM_RESP_ATTR_LCI]); } if (tb[NL80211_FTM_RESP_ATTR_CIVICLOC]) { bcn->civicloc = nla_data(tb[NL80211_FTM_RESP_ATTR_CIVICLOC]); bcn->civicloc_len = nla_len(tb[NL80211_FTM_RESP_ATTR_CIVICLOC]); } } else { bcn->ftm_responder = -1; } if (attrs[NL80211_ATTR_HE_BSS_COLOR]) { err = nl80211_parse_he_bss_color(attrs[NL80211_ATTR_HE_BSS_COLOR], &bcn->he_bss_color); if (err) return err; bcn->he_bss_color_valid = true; } if (attrs[NL80211_ATTR_MBSSID_ELEMS]) { struct cfg80211_mbssid_elems *mbssid = nl80211_parse_mbssid_elems(&rdev->wiphy, attrs[NL80211_ATTR_MBSSID_ELEMS]); if (IS_ERR(mbssid)) return PTR_ERR(mbssid); bcn->mbssid_ies = mbssid; if (bcn->mbssid_ies && attrs[NL80211_ATTR_EMA_RNR_ELEMS]) { struct cfg80211_rnr_elems *rnr = nl80211_parse_rnr_elems(&rdev->wiphy, attrs[NL80211_ATTR_EMA_RNR_ELEMS], extack); if (IS_ERR(rnr)) return PTR_ERR(rnr); if (rnr && rnr->cnt < bcn->mbssid_ies->cnt) return -EINVAL; bcn->rnr_ies = rnr; } } return 0; } static int nl80211_parse_he_obss_pd(struct nlattr *attrs, struct ieee80211_he_obss_pd *he_obss_pd) { struct nlattr *tb[NL80211_HE_OBSS_PD_ATTR_MAX + 1]; int err; err = nla_parse_nested(tb, NL80211_HE_OBSS_PD_ATTR_MAX, attrs, he_obss_pd_policy, NULL); if (err) return err; if (!tb[NL80211_HE_OBSS_PD_ATTR_SR_CTRL]) return -EINVAL; he_obss_pd->sr_ctrl = nla_get_u8(tb[NL80211_HE_OBSS_PD_ATTR_SR_CTRL]); if (tb[NL80211_HE_OBSS_PD_ATTR_MIN_OFFSET]) he_obss_pd->min_offset = nla_get_u8(tb[NL80211_HE_OBSS_PD_ATTR_MIN_OFFSET]); if (tb[NL80211_HE_OBSS_PD_ATTR_MAX_OFFSET]) he_obss_pd->max_offset = nla_get_u8(tb[NL80211_HE_OBSS_PD_ATTR_MAX_OFFSET]); if (tb[NL80211_HE_OBSS_PD_ATTR_NON_SRG_MAX_OFFSET]) he_obss_pd->non_srg_max_offset = nla_get_u8(tb[NL80211_HE_OBSS_PD_ATTR_NON_SRG_MAX_OFFSET]); if (he_obss_pd->min_offset > he_obss_pd->max_offset) return -EINVAL; if (tb[NL80211_HE_OBSS_PD_ATTR_BSS_COLOR_BITMAP]) memcpy(he_obss_pd->bss_color_bitmap, nla_data(tb[NL80211_HE_OBSS_PD_ATTR_BSS_COLOR_BITMAP]), sizeof(he_obss_pd->bss_color_bitmap)); if (tb[NL80211_HE_OBSS_PD_ATTR_PARTIAL_BSSID_BITMAP]) memcpy(he_obss_pd->partial_bssid_bitmap, nla_data(tb[NL80211_HE_OBSS_PD_ATTR_PARTIAL_BSSID_BITMAP]), sizeof(he_obss_pd->partial_bssid_bitmap)); he_obss_pd->enable = true; return 0; } static int nl80211_parse_fils_discovery(struct cfg80211_registered_device *rdev, struct nlattr *attrs, struct cfg80211_ap_settings *params) { struct nlattr *tb[NL80211_FILS_DISCOVERY_ATTR_MAX + 1]; int ret; struct cfg80211_fils_discovery *fd = &params->fils_discovery; if (!wiphy_ext_feature_isset(&rdev->wiphy, NL80211_EXT_FEATURE_FILS_DISCOVERY)) return -EINVAL; ret = nla_parse_nested(tb, NL80211_FILS_DISCOVERY_ATTR_MAX, attrs, NULL, NULL); if (ret) return ret; if (!tb[NL80211_FILS_DISCOVERY_ATTR_INT_MIN] || !tb[NL80211_FILS_DISCOVERY_ATTR_INT_MAX] || !tb[NL80211_FILS_DISCOVERY_ATTR_TMPL]) return -EINVAL; fd->tmpl_len = nla_len(tb[NL80211_FILS_DISCOVERY_ATTR_TMPL]); fd->tmpl = nla_data(tb[NL80211_FILS_DISCOVERY_ATTR_TMPL]); fd->min_interval = nla_get_u32(tb[NL80211_FILS_DISCOVERY_ATTR_INT_MIN]); fd->max_interval = nla_get_u32(tb[NL80211_FILS_DISCOVERY_ATTR_INT_MAX]); return 0; } static int nl80211_parse_unsol_bcast_probe_resp(struct cfg80211_registered_device *rdev, struct nlattr *attrs, struct cfg80211_ap_settings *params) { struct nlattr *tb[NL80211_UNSOL_BCAST_PROBE_RESP_ATTR_MAX + 1]; int ret; struct cfg80211_unsol_bcast_probe_resp *presp = &params->unsol_bcast_probe_resp; if (!wiphy_ext_feature_isset(&rdev->wiphy, NL80211_EXT_FEATURE_UNSOL_BCAST_PROBE_RESP)) return -EINVAL; ret = nla_parse_nested(tb, NL80211_UNSOL_BCAST_PROBE_RESP_ATTR_MAX, attrs, NULL, NULL); if (ret) return ret; if (!tb[NL80211_UNSOL_BCAST_PROBE_RESP_ATTR_INT] || !tb[NL80211_UNSOL_BCAST_PROBE_RESP_ATTR_TMPL]) return -EINVAL; presp->tmpl = nla_data(tb[NL80211_UNSOL_BCAST_PROBE_RESP_ATTR_TMPL]); presp->tmpl_len = nla_len(tb[NL80211_UNSOL_BCAST_PROBE_RESP_ATTR_TMPL]); presp->interval = nla_get_u32(tb[NL80211_UNSOL_BCAST_PROBE_RESP_ATTR_INT]); return 0; } static void nl80211_check_ap_rate_selectors(struct cfg80211_ap_settings *params, const struct element *rates) { int i; if (!rates) return; for (i = 0; i < rates->datalen; i++) { if (rates->data[i] == BSS_MEMBERSHIP_SELECTOR_HT_PHY) params->ht_required = true; if (rates->data[i] == BSS_MEMBERSHIP_SELECTOR_VHT_PHY) params->vht_required = true; if (rates->data[i] == BSS_MEMBERSHIP_SELECTOR_HE_PHY) params->he_required = true; if (rates->data[i] == BSS_MEMBERSHIP_SELECTOR_SAE_H2E) params->sae_h2e_required = true; } } /* * Since the nl80211 API didn't include, from the beginning, attributes about * HT/VHT requirements/capabilities, we parse them out of the IEs for the * benefit of drivers that rebuild IEs in the firmware. */ static int nl80211_calculate_ap_params(struct cfg80211_ap_settings *params) { const struct cfg80211_beacon_data *bcn = &params->beacon; size_t ies_len = bcn->tail_len; const u8 *ies = bcn->tail; const struct element *rates; const struct element *cap; rates = cfg80211_find_elem(WLAN_EID_SUPP_RATES, ies, ies_len); nl80211_check_ap_rate_selectors(params, rates); rates = cfg80211_find_elem(WLAN_EID_EXT_SUPP_RATES, ies, ies_len); nl80211_check_ap_rate_selectors(params, rates); cap = cfg80211_find_elem(WLAN_EID_HT_CAPABILITY, ies, ies_len); if (cap && cap->datalen >= sizeof(*params->ht_cap)) params->ht_cap = (void *)cap->data; cap = cfg80211_find_elem(WLAN_EID_VHT_CAPABILITY, ies, ies_len); if (cap && cap->datalen >= sizeof(*params->vht_cap)) params->vht_cap = (void *)cap->data; cap = cfg80211_find_ext_elem(WLAN_EID_EXT_HE_CAPABILITY, ies, ies_len); if (cap && cap->datalen >= sizeof(*params->he_cap) + 1) params->he_cap = (void *)(cap->data + 1); cap = cfg80211_find_ext_elem(WLAN_EID_EXT_HE_OPERATION, ies, ies_len); if (cap && cap->datalen >= sizeof(*params->he_oper) + 1) params->he_oper = (void *)(cap->data + 1); cap = cfg80211_find_ext_elem(WLAN_EID_EXT_EHT_CAPABILITY, ies, ies_len); if (cap) { if (!cap->datalen) return -EINVAL; params->eht_cap = (void *)(cap->data + 1); if (!ieee80211_eht_capa_size_ok((const u8 *)params->he_cap, (const u8 *)params->eht_cap, cap->datalen - 1, true)) return -EINVAL; } cap = cfg80211_find_ext_elem(WLAN_EID_EXT_EHT_OPERATION, ies, ies_len); if (cap) { if (!cap->datalen) return -EINVAL; params->eht_oper = (void *)(cap->data + 1); if (!ieee80211_eht_oper_size_ok((const u8 *)params->eht_oper, cap->datalen - 1)) return -EINVAL; } return 0; } static bool nl80211_get_ap_channel(struct cfg80211_registered_device *rdev, struct cfg80211_ap_settings *params) { struct wireless_dev *wdev; list_for_each_entry(wdev, &rdev->wiphy.wdev_list, list) { if (wdev->iftype != NL80211_IFTYPE_AP && wdev->iftype != NL80211_IFTYPE_P2P_GO) continue; if (!wdev->u.ap.preset_chandef.chan) continue; params->chandef = wdev->u.ap.preset_chandef; return true; } return false; } static bool nl80211_valid_auth_type(struct cfg80211_registered_device *rdev, enum nl80211_auth_type auth_type, enum nl80211_commands cmd) { if (auth_type > NL80211_AUTHTYPE_MAX) return false; switch (cmd) { case NL80211_CMD_AUTHENTICATE: if (!(rdev->wiphy.features & NL80211_FEATURE_SAE) && auth_type == NL80211_AUTHTYPE_SAE) return false; if (!wiphy_ext_feature_isset(&rdev->wiphy, NL80211_EXT_FEATURE_FILS_STA) && (auth_type == NL80211_AUTHTYPE_FILS_SK || auth_type == NL80211_AUTHTYPE_FILS_SK_PFS || auth_type == NL80211_AUTHTYPE_FILS_PK)) return false; return true; case NL80211_CMD_CONNECT: if (!(rdev->wiphy.features & NL80211_FEATURE_SAE) && !wiphy_ext_feature_isset(&rdev->wiphy, NL80211_EXT_FEATURE_SAE_OFFLOAD) && auth_type == NL80211_AUTHTYPE_SAE) return false; /* FILS with SK PFS or PK not supported yet */ if (auth_type == NL80211_AUTHTYPE_FILS_SK_PFS || auth_type == NL80211_AUTHTYPE_FILS_PK) return false; if (!wiphy_ext_feature_isset( &rdev->wiphy, NL80211_EXT_FEATURE_FILS_SK_OFFLOAD) && auth_type == NL80211_AUTHTYPE_FILS_SK) return false; return true; case NL80211_CMD_START_AP: if (!wiphy_ext_feature_isset(&rdev->wiphy, NL80211_EXT_FEATURE_SAE_OFFLOAD_AP) && auth_type == NL80211_AUTHTYPE_SAE) return false; /* FILS not supported yet */ if (auth_type == NL80211_AUTHTYPE_FILS_SK || auth_type == NL80211_AUTHTYPE_FILS_SK_PFS || auth_type == NL80211_AUTHTYPE_FILS_PK) return false; return true; default: return false; } } static void nl80211_send_ap_started(struct wireless_dev *wdev, unsigned int link_id) { struct wiphy *wiphy = wdev->wiphy; struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy); struct sk_buff *msg; void *hdr; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) return; hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_START_AP); if (!hdr) goto out; if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || nla_put_u32(msg, NL80211_ATTR_IFINDEX, wdev->netdev->ifindex) || nla_put_u64_64bit(msg, NL80211_ATTR_WDEV, wdev_id(wdev), NL80211_ATTR_PAD) || (wdev->u.ap.ssid_len && nla_put(msg, NL80211_ATTR_SSID, wdev->u.ap.ssid_len, wdev->u.ap.ssid)) || (wdev->valid_links && nla_put_u8(msg, NL80211_ATTR_MLO_LINK_ID, link_id))) goto out; genlmsg_end(msg, hdr); genlmsg_multicast_netns(&nl80211_fam, wiphy_net(wiphy), msg, 0, NL80211_MCGRP_MLME, GFP_KERNEL); return; out: nlmsg_free(msg); } static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; unsigned int link_id = nl80211_link_id(info->attrs); struct net_device *dev = info->user_ptr[1]; struct wireless_dev *wdev = dev->ieee80211_ptr; struct cfg80211_ap_settings *params; int err; if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP && dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_GO) return -EOPNOTSUPP; if (!rdev->ops->start_ap) return -EOPNOTSUPP; if (wdev->links[link_id].ap.beacon_interval) return -EALREADY; /* these are required for START_AP */ if (!info->attrs[NL80211_ATTR_BEACON_INTERVAL] || !info->attrs[NL80211_ATTR_DTIM_PERIOD] || !info->attrs[NL80211_ATTR_BEACON_HEAD]) return -EINVAL; params = kzalloc(sizeof(*params), GFP_KERNEL); if (!params) return -ENOMEM; err = nl80211_parse_beacon(rdev, info->attrs, &params->beacon, info->extack); if (err) goto out; params->beacon_interval = nla_get_u32(info->attrs[NL80211_ATTR_BEACON_INTERVAL]); params->dtim_period = nla_get_u32(info->attrs[NL80211_ATTR_DTIM_PERIOD]); err = cfg80211_validate_beacon_int(rdev, dev->ieee80211_ptr->iftype, params->beacon_interval); if (err) goto out; /* * In theory, some of these attributes should be required here * but since they were not used when the command was originally * added, keep them optional for old user space programs to let * them continue to work with drivers that do not need the * additional information -- drivers must check! */ if (info->attrs[NL80211_ATTR_SSID]) { params->ssid = nla_data(info->attrs[NL80211_ATTR_SSID]); params->ssid_len = nla_len(info->attrs[NL80211_ATTR_SSID]); if (params->ssid_len == 0) { err = -EINVAL; goto out; } if (wdev->u.ap.ssid_len && (wdev->u.ap.ssid_len != params->ssid_len || memcmp(wdev->u.ap.ssid, params->ssid, params->ssid_len))) { /* require identical SSID for MLO */ err = -EINVAL; goto out; } } else if (wdev->valid_links) { /* require SSID for MLO */ err = -EINVAL; goto out; } if (info->attrs[NL80211_ATTR_HIDDEN_SSID]) params->hidden_ssid = nla_get_u32( info->attrs[NL80211_ATTR_HIDDEN_SSID]); params->privacy = !!info->attrs[NL80211_ATTR_PRIVACY]; if (info->attrs[NL80211_ATTR_AUTH_TYPE]) { params->auth_type = nla_get_u32( info->attrs[NL80211_ATTR_AUTH_TYPE]); if (!nl80211_valid_auth_type(rdev, params->auth_type, NL80211_CMD_START_AP)) { err = -EINVAL; goto out; } } else params->auth_type = NL80211_AUTHTYPE_AUTOMATIC; err = nl80211_crypto_settings(rdev, info, &params->crypto, NL80211_MAX_NR_CIPHER_SUITES); if (err) goto out; if (info->attrs[NL80211_ATTR_INACTIVITY_TIMEOUT]) { if (!(rdev->wiphy.features & NL80211_FEATURE_INACTIVITY_TIMER)) { err = -EOPNOTSUPP; goto out; } params->inactivity_timeout = nla_get_u16( info->attrs[NL80211_ATTR_INACTIVITY_TIMEOUT]); } if (info->attrs[NL80211_ATTR_P2P_CTWINDOW]) { if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_GO) { err = -EINVAL; goto out; } params->p2p_ctwindow = nla_get_u8(info->attrs[NL80211_ATTR_P2P_CTWINDOW]); if (params->p2p_ctwindow != 0 && !(rdev->wiphy.features & NL80211_FEATURE_P2P_GO_CTWIN)) { err = -EINVAL; goto out; } } if (info->attrs[NL80211_ATTR_P2P_OPPPS]) { u8 tmp; if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_GO) { err = -EINVAL; goto out; } tmp = nla_get_u8(info->attrs[NL80211_ATTR_P2P_OPPPS]); params->p2p_opp_ps = tmp; if (params->p2p_opp_ps != 0 && !(rdev->wiphy.features & NL80211_FEATURE_P2P_GO_OPPPS)) { err = -EINVAL; goto out; } } if (info->attrs[NL80211_ATTR_WIPHY_FREQ]) { err = nl80211_parse_chandef(rdev, info, &params->chandef); if (err) goto out; } else if (wdev->valid_links) { /* with MLD need to specify the channel configuration */ err = -EINVAL; goto out; } else if (wdev->u.ap.preset_chandef.chan) { params->chandef = wdev->u.ap.preset_chandef; } else if (!nl80211_get_ap_channel(rdev, params)) { err = -EINVAL; goto out; } if (info->attrs[NL80211_ATTR_PUNCT_BITMAP]) { err = nl80211_parse_punct_bitmap(rdev, info, &params->chandef, &params->punct_bitmap); if (err) goto out; } if (!cfg80211_reg_can_beacon_relax(&rdev->wiphy, &params->chandef, wdev->iftype)) { err = -EINVAL; goto out; } wdev_lock(wdev); if (info->attrs[NL80211_ATTR_TX_RATES]) { err = nl80211_parse_tx_bitrate_mask(info, info->attrs, NL80211_ATTR_TX_RATES, &params->beacon_rate, dev, false, link_id); if (err) goto out_unlock; err = validate_beacon_tx_rate(rdev, params->chandef.chan->band, &params->beacon_rate); if (err) goto out_unlock; } if (info->attrs[NL80211_ATTR_SMPS_MODE]) { params->smps_mode = nla_get_u8(info->attrs[NL80211_ATTR_SMPS_MODE]); switch (params->smps_mode) { case NL80211_SMPS_OFF: break; case NL80211_SMPS_STATIC: if (!(rdev->wiphy.features & NL80211_FEATURE_STATIC_SMPS)) { err = -EINVAL; goto out_unlock; } break; case NL80211_SMPS_DYNAMIC: if (!(rdev->wiphy.features & NL80211_FEATURE_DYNAMIC_SMPS)) { err = -EINVAL; goto out_unlock; } break; default: err = -EINVAL; goto out_unlock; } } else { params->smps_mode = NL80211_SMPS_OFF; } params->pbss = nla_get_flag(info->attrs[NL80211_ATTR_PBSS]); if (params->pbss && !rdev->wiphy.bands[NL80211_BAND_60GHZ]) { err = -EOPNOTSUPP; goto out_unlock; } if (info->attrs[NL80211_ATTR_ACL_POLICY]) { params->acl = parse_acl_data(&rdev->wiphy, info); if (IS_ERR(params->acl)) { err = PTR_ERR(params->acl); params->acl = NULL; goto out_unlock; } } params->twt_responder = nla_get_flag(info->attrs[NL80211_ATTR_TWT_RESPONDER]); if (info->attrs[NL80211_ATTR_HE_OBSS_PD]) { err = nl80211_parse_he_obss_pd( info->attrs[NL80211_ATTR_HE_OBSS_PD], &params->he_obss_pd); if (err) goto out_unlock; } if (info->attrs[NL80211_ATTR_FILS_DISCOVERY]) { err = nl80211_parse_fils_discovery(rdev, info->attrs[NL80211_ATTR_FILS_DISCOVERY], params); if (err) goto out_unlock; } if (info->attrs[NL80211_ATTR_UNSOL_BCAST_PROBE_RESP]) { err = nl80211_parse_unsol_bcast_probe_resp( rdev, info->attrs[NL80211_ATTR_UNSOL_BCAST_PROBE_RESP], params); if (err) goto out_unlock; } if (info->attrs[NL80211_ATTR_MBSSID_CONFIG]) { err = nl80211_parse_mbssid_config(&rdev->wiphy, dev, info->attrs[NL80211_ATTR_MBSSID_CONFIG], &params->mbssid_config, params->beacon.mbssid_ies ? params->beacon.mbssid_ies->cnt : 0); if (err) goto out_unlock; } if (!params->mbssid_config.ema && params->beacon.rnr_ies) { err = -EINVAL; goto out_unlock; } err = nl80211_calculate_ap_params(params); if (err) goto out_unlock; if (info->attrs[NL80211_ATTR_AP_SETTINGS_FLAGS]) params->flags = nla_get_u32( info->attrs[NL80211_ATTR_AP_SETTINGS_FLAGS]); else if (info->attrs[NL80211_ATTR_EXTERNAL_AUTH_SUPPORT]) params->flags |= NL80211_AP_SETTINGS_EXTERNAL_AUTH_SUPPORT; if (wdev->conn_owner_nlportid && info->attrs[NL80211_ATTR_SOCKET_OWNER] && wdev->conn_owner_nlportid != info->snd_portid) { err = -EINVAL; goto out_unlock; } /* FIXME: validate MLO/link-id against driver capabilities */ err = rdev_start_ap(rdev, dev, params); if (!err) { wdev->links[link_id].ap.beacon_interval = params->beacon_interval; wdev->links[link_id].ap.chandef = params->chandef; wdev->u.ap.ssid_len = params->ssid_len; memcpy(wdev->u.ap.ssid, params->ssid, params->ssid_len); if (info->attrs[NL80211_ATTR_SOCKET_OWNER]) wdev->conn_owner_nlportid = info->snd_portid; nl80211_send_ap_started(wdev, link_id); } out_unlock: wdev_unlock(wdev); out: kfree(params->acl); kfree(params->beacon.mbssid_ies); if (params->mbssid_config.tx_wdev && params->mbssid_config.tx_wdev->netdev && params->mbssid_config.tx_wdev->netdev != dev) dev_put(params->mbssid_config.tx_wdev->netdev); kfree(params->beacon.rnr_ies); kfree(params); return err; } static int nl80211_set_beacon(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; unsigned int link_id = nl80211_link_id(info->attrs); struct net_device *dev = info->user_ptr[1]; struct wireless_dev *wdev = dev->ieee80211_ptr; struct cfg80211_beacon_data params; int err; if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP && dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_GO) return -EOPNOTSUPP; if (!rdev->ops->change_beacon) return -EOPNOTSUPP; if (!wdev->links[link_id].ap.beacon_interval) return -EINVAL; err = nl80211_parse_beacon(rdev, info->attrs, &params, info->extack); if (err) goto out; wdev_lock(wdev); err = rdev_change_beacon(rdev, dev, &params); wdev_unlock(wdev); out: kfree(params.mbssid_ies); kfree(params.rnr_ies); return err; } static int nl80211_stop_ap(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; unsigned int link_id = nl80211_link_id(info->attrs); struct net_device *dev = info->user_ptr[1]; return cfg80211_stop_ap(rdev, dev, link_id, false); } static const struct nla_policy sta_flags_policy[NL80211_STA_FLAG_MAX + 1] = { [NL80211_STA_FLAG_AUTHORIZED] = { .type = NLA_FLAG }, [NL80211_STA_FLAG_SHORT_PREAMBLE] = { .type = NLA_FLAG }, [NL80211_STA_FLAG_WME] = { .type = NLA_FLAG }, [NL80211_STA_FLAG_MFP] = { .type = NLA_FLAG }, [NL80211_STA_FLAG_AUTHENTICATED] = { .type = NLA_FLAG }, [NL80211_STA_FLAG_TDLS_PEER] = { .type = NLA_FLAG }, }; static int parse_station_flags(struct genl_info *info, enum nl80211_iftype iftype, struct station_parameters *params) { struct nlattr *flags[NL80211_STA_FLAG_MAX + 1]; struct nlattr *nla; int flag; /* * Try parsing the new attribute first so userspace * can specify both for older kernels. */ nla = info->attrs[NL80211_ATTR_STA_FLAGS2]; if (nla) { struct nl80211_sta_flag_update *sta_flags; sta_flags = nla_data(nla); params->sta_flags_mask = sta_flags->mask; params->sta_flags_set = sta_flags->set; params->sta_flags_set &= params->sta_flags_mask; if ((params->sta_flags_mask | params->sta_flags_set) & BIT(__NL80211_STA_FLAG_INVALID)) return -EINVAL; return 0; } /* if present, parse the old attribute */ nla = info->attrs[NL80211_ATTR_STA_FLAGS]; if (!nla) return 0; if (nla_parse_nested_deprecated(flags, NL80211_STA_FLAG_MAX, nla, sta_flags_policy, info->extack)) return -EINVAL; /* * Only allow certain flags for interface types so that * other attributes are silently ignored. Remember that * this is backward compatibility code with old userspace * and shouldn't be hit in other cases anyway. */ switch (iftype) { case NL80211_IFTYPE_AP: case NL80211_IFTYPE_AP_VLAN: case NL80211_IFTYPE_P2P_GO: params->sta_flags_mask = BIT(NL80211_STA_FLAG_AUTHORIZED) | BIT(NL80211_STA_FLAG_SHORT_PREAMBLE) | BIT(NL80211_STA_FLAG_WME) | BIT(NL80211_STA_FLAG_MFP); break; case NL80211_IFTYPE_P2P_CLIENT: case NL80211_IFTYPE_STATION: params->sta_flags_mask = BIT(NL80211_STA_FLAG_AUTHORIZED) | BIT(NL80211_STA_FLAG_TDLS_PEER); break; case NL80211_IFTYPE_MESH_POINT: params->sta_flags_mask = BIT(NL80211_STA_FLAG_AUTHENTICATED) | BIT(NL80211_STA_FLAG_MFP) | BIT(NL80211_STA_FLAG_AUTHORIZED); break; default: return -EINVAL; } for (flag = 1; flag <= NL80211_STA_FLAG_MAX; flag++) { if (flags[flag]) { params->sta_flags_set |= (1<<flag); /* no longer support new API additions in old API */ if (flag > NL80211_STA_FLAG_MAX_OLD_API) return -EINVAL; } } return 0; } bool nl80211_put_sta_rate(struct sk_buff *msg, struct rate_info *info, int attr) { struct nlattr *rate; u32 bitrate; u16 bitrate_compat; enum nl80211_rate_info rate_flg; rate = nla_nest_start_noflag(msg, attr); if (!rate) return false; /* cfg80211_calculate_bitrate will return 0 for mcs >= 32 */ bitrate = cfg80211_calculate_bitrate(info); /* report 16-bit bitrate only if we can */ bitrate_compat = bitrate < (1UL << 16) ? bitrate : 0; if (bitrate > 0 && nla_put_u32(msg, NL80211_RATE_INFO_BITRATE32, bitrate)) return false; if (bitrate_compat > 0 && nla_put_u16(msg, NL80211_RATE_INFO_BITRATE, bitrate_compat)) return false; switch (info->bw) { case RATE_INFO_BW_1: rate_flg = NL80211_RATE_INFO_1_MHZ_WIDTH; break; case RATE_INFO_BW_2: rate_flg = NL80211_RATE_INFO_2_MHZ_WIDTH; break; case RATE_INFO_BW_4: rate_flg = NL80211_RATE_INFO_4_MHZ_WIDTH; break; case RATE_INFO_BW_5: rate_flg = NL80211_RATE_INFO_5_MHZ_WIDTH; break; case RATE_INFO_BW_8: rate_flg = NL80211_RATE_INFO_8_MHZ_WIDTH; break; case RATE_INFO_BW_10: rate_flg = NL80211_RATE_INFO_10_MHZ_WIDTH; break; case RATE_INFO_BW_16: rate_flg = NL80211_RATE_INFO_16_MHZ_WIDTH; break; default: WARN_ON(1); fallthrough; case RATE_INFO_BW_20: rate_flg = 0; break; case RATE_INFO_BW_40: rate_flg = NL80211_RATE_INFO_40_MHZ_WIDTH; break; case RATE_INFO_BW_80: rate_flg = NL80211_RATE_INFO_80_MHZ_WIDTH; break; case RATE_INFO_BW_160: rate_flg = NL80211_RATE_INFO_160_MHZ_WIDTH; break; case RATE_INFO_BW_HE_RU: rate_flg = 0; WARN_ON(!(info->flags & RATE_INFO_FLAGS_HE_MCS)); break; case RATE_INFO_BW_320: rate_flg = NL80211_RATE_INFO_320_MHZ_WIDTH; break; case RATE_INFO_BW_EHT_RU: rate_flg = 0; WARN_ON(!(info->flags & RATE_INFO_FLAGS_EHT_MCS)); break; } if (rate_flg && nla_put_flag(msg, rate_flg)) return false; if (info->flags & RATE_INFO_FLAGS_MCS) { if (nla_put_u8(msg, NL80211_RATE_INFO_MCS, info->mcs)) return false; if (info->flags & RATE_INFO_FLAGS_SHORT_GI && nla_put_flag(msg, NL80211_RATE_INFO_SHORT_GI)) return false; } else if (info->flags & RATE_INFO_FLAGS_VHT_MCS) { if (nla_put_u8(msg, NL80211_RATE_INFO_VHT_MCS, info->mcs)) return false; if (nla_put_u8(msg, NL80211_RATE_INFO_VHT_NSS, info->nss)) return false; if (info->flags & RATE_INFO_FLAGS_SHORT_GI && nla_put_flag(msg, NL80211_RATE_INFO_SHORT_GI)) return false; } else if (info->flags & RATE_INFO_FLAGS_HE_MCS) { if (nla_put_u8(msg, NL80211_RATE_INFO_HE_MCS, info->mcs)) return false; if (nla_put_u8(msg, NL80211_RATE_INFO_HE_NSS, info->nss)) return false; if (nla_put_u8(msg, NL80211_RATE_INFO_HE_GI, info->he_gi)) return false; if (nla_put_u8(msg, NL80211_RATE_INFO_HE_DCM, info->he_dcm)) return false; if (info->bw == RATE_INFO_BW_HE_RU && nla_put_u8(msg, NL80211_RATE_INFO_HE_RU_ALLOC, info->he_ru_alloc)) return false; } else if (info->flags & RATE_INFO_FLAGS_S1G_MCS) { if (nla_put_u8(msg, NL80211_RATE_INFO_S1G_MCS, info->mcs)) return false; if (nla_put_u8(msg, NL80211_RATE_INFO_S1G_NSS, info->nss)) return false; if (info->flags & RATE_INFO_FLAGS_SHORT_GI && nla_put_flag(msg, NL80211_RATE_INFO_SHORT_GI)) return false; } else if (info->flags & RATE_INFO_FLAGS_EHT_MCS) { if (nla_put_u8(msg, NL80211_RATE_INFO_EHT_MCS, info->mcs)) return false; if (nla_put_u8(msg, NL80211_RATE_INFO_EHT_NSS, info->nss)) return false; if (nla_put_u8(msg, NL80211_RATE_INFO_EHT_GI, info->eht_gi)) return false; if (info->bw == RATE_INFO_BW_EHT_RU && nla_put_u8(msg, NL80211_RATE_INFO_EHT_RU_ALLOC, info->eht_ru_alloc)) return false; } nla_nest_end(msg, rate); return true; } static bool nl80211_put_signal(struct sk_buff *msg, u8 mask, s8 *signal, int id) { void *attr; int i = 0; if (!mask) return true; attr = nla_nest_start_noflag(msg, id); if (!attr) return false; for (i = 0; i < IEEE80211_MAX_CHAINS; i++) { if (!(mask & BIT(i))) continue; if (nla_put_u8(msg, i, signal[i])) return false; } nla_nest_end(msg, attr); return true; } static int nl80211_send_station(struct sk_buff *msg, u32 cmd, u32 portid, u32 seq, int flags, struct cfg80211_registered_device *rdev, struct net_device *dev, const u8 *mac_addr, struct station_info *sinfo) { void *hdr; struct nlattr *sinfoattr, *bss_param; hdr = nl80211hdr_put(msg, portid, seq, flags, cmd); if (!hdr) { cfg80211_sinfo_release_content(sinfo); return -1; } if (nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex) || nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, mac_addr) || nla_put_u32(msg, NL80211_ATTR_GENERATION, sinfo->generation)) goto nla_put_failure; sinfoattr = nla_nest_start_noflag(msg, NL80211_ATTR_STA_INFO); if (!sinfoattr) goto nla_put_failure; #define PUT_SINFO(attr, memb, type) do { \ BUILD_BUG_ON(sizeof(type) == sizeof(u64)); \ if (sinfo->filled & BIT_ULL(NL80211_STA_INFO_ ## attr) && \ nla_put_ ## type(msg, NL80211_STA_INFO_ ## attr, \ sinfo->memb)) \ goto nla_put_failure; \ } while (0) #define PUT_SINFO_U64(attr, memb) do { \ if (sinfo->filled & BIT_ULL(NL80211_STA_INFO_ ## attr) && \ nla_put_u64_64bit(msg, NL80211_STA_INFO_ ## attr, \ sinfo->memb, NL80211_STA_INFO_PAD)) \ goto nla_put_failure; \ } while (0) PUT_SINFO(CONNECTED_TIME, connected_time, u32); PUT_SINFO(INACTIVE_TIME, inactive_time, u32); PUT_SINFO_U64(ASSOC_AT_BOOTTIME, assoc_at); if (sinfo->filled & (BIT_ULL(NL80211_STA_INFO_RX_BYTES) | BIT_ULL(NL80211_STA_INFO_RX_BYTES64)) && nla_put_u32(msg, NL80211_STA_INFO_RX_BYTES, (u32)sinfo->rx_bytes)) goto nla_put_failure; if (sinfo->filled & (BIT_ULL(NL80211_STA_INFO_TX_BYTES) | BIT_ULL(NL80211_STA_INFO_TX_BYTES64)) && nla_put_u32(msg, NL80211_STA_INFO_TX_BYTES, (u32)sinfo->tx_bytes)) goto nla_put_failure; PUT_SINFO_U64(RX_BYTES64, rx_bytes); PUT_SINFO_U64(TX_BYTES64, tx_bytes); PUT_SINFO(LLID, llid, u16); PUT_SINFO(PLID, plid, u16); PUT_SINFO(PLINK_STATE, plink_state, u8); PUT_SINFO_U64(RX_DURATION, rx_duration); PUT_SINFO_U64(TX_DURATION, tx_duration); if (wiphy_ext_feature_isset(&rdev->wiphy, NL80211_EXT_FEATURE_AIRTIME_FAIRNESS)) PUT_SINFO(AIRTIME_WEIGHT, airtime_weight, u16); switch (rdev->wiphy.signal_type) { case CFG80211_SIGNAL_TYPE_MBM: PUT_SINFO(SIGNAL, signal, u8); PUT_SINFO(SIGNAL_AVG, signal_avg, u8); break; default: break; } if (sinfo->filled & BIT_ULL(NL80211_STA_INFO_CHAIN_SIGNAL)) { if (!nl80211_put_signal(msg, sinfo->chains, sinfo->chain_signal, NL80211_STA_INFO_CHAIN_SIGNAL)) goto nla_put_failure; } if (sinfo->filled & BIT_ULL(NL80211_STA_INFO_CHAIN_SIGNAL_AVG)) { if (!nl80211_put_signal(msg, sinfo->chains, sinfo->chain_signal_avg, NL80211_STA_INFO_CHAIN_SIGNAL_AVG)) goto nla_put_failure; } if (sinfo->filled & BIT_ULL(NL80211_STA_INFO_TX_BITRATE)) { if (!nl80211_put_sta_rate(msg, &sinfo->txrate, NL80211_STA_INFO_TX_BITRATE)) goto nla_put_failure; } if (sinfo->filled & BIT_ULL(NL80211_STA_INFO_RX_BITRATE)) { if (!nl80211_put_sta_rate(msg, &sinfo->rxrate, NL80211_STA_INFO_RX_BITRATE)) goto nla_put_failure; } PUT_SINFO(RX_PACKETS, rx_packets, u32); PUT_SINFO(TX_PACKETS, tx_packets, u32); PUT_SINFO(TX_RETRIES, tx_retries, u32); PUT_SINFO(TX_FAILED, tx_failed, u32); PUT_SINFO(EXPECTED_THROUGHPUT, expected_throughput, u32); PUT_SINFO(AIRTIME_LINK_METRIC, airtime_link_metric, u32); PUT_SINFO(BEACON_LOSS, beacon_loss_count, u32); PUT_SINFO(LOCAL_PM, local_pm, u32); PUT_SINFO(PEER_PM, peer_pm, u32); PUT_SINFO(NONPEER_PM, nonpeer_pm, u32); PUT_SINFO(CONNECTED_TO_GATE, connected_to_gate, u8); PUT_SINFO(CONNECTED_TO_AS, connected_to_as, u8); if (sinfo->filled & BIT_ULL(NL80211_STA_INFO_BSS_PARAM)) { bss_param = nla_nest_start_noflag(msg, NL80211_STA_INFO_BSS_PARAM); if (!bss_param) goto nla_put_failure; if (((sinfo->bss_param.flags & BSS_PARAM_FLAGS_CTS_PROT) && nla_put_flag(msg, NL80211_STA_BSS_PARAM_CTS_PROT)) || ((sinfo->bss_param.flags & BSS_PARAM_FLAGS_SHORT_PREAMBLE) && nla_put_flag(msg, NL80211_STA_BSS_PARAM_SHORT_PREAMBLE)) || ((sinfo->bss_param.flags & BSS_PARAM_FLAGS_SHORT_SLOT_TIME) && nla_put_flag(msg, NL80211_STA_BSS_PARAM_SHORT_SLOT_TIME)) || nla_put_u8(msg, NL80211_STA_BSS_PARAM_DTIM_PERIOD, sinfo->bss_param.dtim_period) || nla_put_u16(msg, NL80211_STA_BSS_PARAM_BEACON_INTERVAL, sinfo->bss_param.beacon_interval)) goto nla_put_failure; nla_nest_end(msg, bss_param); } if ((sinfo->filled & BIT_ULL(NL80211_STA_INFO_STA_FLAGS)) && nla_put(msg, NL80211_STA_INFO_STA_FLAGS, sizeof(struct nl80211_sta_flag_update), &sinfo->sta_flags)) goto nla_put_failure; PUT_SINFO_U64(T_OFFSET, t_offset); PUT_SINFO_U64(RX_DROP_MISC, rx_dropped_misc); PUT_SINFO_U64(BEACON_RX, rx_beacon); PUT_SINFO(BEACON_SIGNAL_AVG, rx_beacon_signal_avg, u8); PUT_SINFO(RX_MPDUS, rx_mpdu_count, u32); PUT_SINFO(FCS_ERROR_COUNT, fcs_err_count, u32); if (wiphy_ext_feature_isset(&rdev->wiphy, NL80211_EXT_FEATURE_ACK_SIGNAL_SUPPORT)) { PUT_SINFO(ACK_SIGNAL, ack_signal, u8); PUT_SINFO(ACK_SIGNAL_AVG, avg_ack_signal, s8); } #undef PUT_SINFO #undef PUT_SINFO_U64 if (sinfo->pertid) { struct nlattr *tidsattr; int tid; tidsattr = nla_nest_start_noflag(msg, NL80211_STA_INFO_TID_STATS); if (!tidsattr) goto nla_put_failure; for (tid = 0; tid < IEEE80211_NUM_TIDS + 1; tid++) { struct cfg80211_tid_stats *tidstats; struct nlattr *tidattr; tidstats = &sinfo->pertid[tid]; if (!tidstats->filled) continue; tidattr = nla_nest_start_noflag(msg, tid + 1); if (!tidattr) goto nla_put_failure; #define PUT_TIDVAL_U64(attr, memb) do { \ if (tidstats->filled & BIT(NL80211_TID_STATS_ ## attr) && \ nla_put_u64_64bit(msg, NL80211_TID_STATS_ ## attr, \ tidstats->memb, NL80211_TID_STATS_PAD)) \ goto nla_put_failure; \ } while (0) PUT_TIDVAL_U64(RX_MSDU, rx_msdu); PUT_TIDVAL_U64(TX_MSDU, tx_msdu); PUT_TIDVAL_U64(TX_MSDU_RETRIES, tx_msdu_retries); PUT_TIDVAL_U64(TX_MSDU_FAILED, tx_msdu_failed); #undef PUT_TIDVAL_U64 if ((tidstats->filled & BIT(NL80211_TID_STATS_TXQ_STATS)) && !nl80211_put_txq_stats(msg, &tidstats->txq_stats, NL80211_TID_STATS_TXQ_STATS)) goto nla_put_failure; nla_nest_end(msg, tidattr); } nla_nest_end(msg, tidsattr); } nla_nest_end(msg, sinfoattr); if (sinfo->assoc_req_ies_len && nla_put(msg, NL80211_ATTR_IE, sinfo->assoc_req_ies_len, sinfo->assoc_req_ies)) goto nla_put_failure; if (sinfo->assoc_resp_ies_len && nla_put(msg, NL80211_ATTR_RESP_IE, sinfo->assoc_resp_ies_len, sinfo->assoc_resp_ies)) goto nla_put_failure; if (sinfo->mlo_params_valid) { if (nla_put_u8(msg, NL80211_ATTR_MLO_LINK_ID, sinfo->assoc_link_id)) goto nla_put_failure; if (!is_zero_ether_addr(sinfo->mld_addr) && nla_put(msg, NL80211_ATTR_MLD_ADDR, ETH_ALEN, sinfo->mld_addr)) goto nla_put_failure; } cfg80211_sinfo_release_content(sinfo); genlmsg_end(msg, hdr); return 0; nla_put_failure: cfg80211_sinfo_release_content(sinfo); genlmsg_cancel(msg, hdr); return -EMSGSIZE; } static int nl80211_dump_station(struct sk_buff *skb, struct netlink_callback *cb) { struct station_info sinfo; struct cfg80211_registered_device *rdev; struct wireless_dev *wdev; u8 mac_addr[ETH_ALEN]; int sta_idx = cb->args[2]; int err; err = nl80211_prepare_wdev_dump(cb, &rdev, &wdev, NULL); if (err) return err; /* nl80211_prepare_wdev_dump acquired it in the successful case */ __acquire(&rdev->wiphy.mtx); if (!wdev->netdev) { err = -EINVAL; goto out_err; } if (!rdev->ops->dump_station) { err = -EOPNOTSUPP; goto out_err; } while (1) { memset(&sinfo, 0, sizeof(sinfo)); err = rdev_dump_station(rdev, wdev->netdev, sta_idx, mac_addr, &sinfo); if (err == -ENOENT) break; if (err) goto out_err; if (nl80211_send_station(skb, NL80211_CMD_NEW_STATION, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, NLM_F_MULTI, rdev, wdev->netdev, mac_addr, &sinfo) < 0) goto out; sta_idx++; } out: cb->args[2] = sta_idx; err = skb->len; out_err: wiphy_unlock(&rdev->wiphy); return err; } static int nl80211_get_station(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct net_device *dev = info->user_ptr[1]; struct station_info sinfo; struct sk_buff *msg; u8 *mac_addr = NULL; int err; memset(&sinfo, 0, sizeof(sinfo)); if (!info->attrs[NL80211_ATTR_MAC]) return -EINVAL; mac_addr = nla_data(info->attrs[NL80211_ATTR_MAC]); if (!rdev->ops->get_station) return -EOPNOTSUPP; err = rdev_get_station(rdev, dev, mac_addr, &sinfo); if (err) return err; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) { cfg80211_sinfo_release_content(&sinfo); return -ENOMEM; } if (nl80211_send_station(msg, NL80211_CMD_NEW_STATION, info->snd_portid, info->snd_seq, 0, rdev, dev, mac_addr, &sinfo) < 0) { nlmsg_free(msg); return -ENOBUFS; } return genlmsg_reply(msg, info); } int cfg80211_check_station_change(struct wiphy *wiphy, struct station_parameters *params, enum cfg80211_station_type statype) { if (params->listen_interval != -1 && statype != CFG80211_STA_AP_CLIENT_UNASSOC) return -EINVAL; if (params->support_p2p_ps != -1 && statype != CFG80211_STA_AP_CLIENT_UNASSOC) return -EINVAL; if (params->aid && !(params->sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER)) && statype != CFG80211_STA_AP_CLIENT_UNASSOC) return -EINVAL; /* When you run into this, adjust the code below for the new flag */ BUILD_BUG_ON(NL80211_STA_FLAG_MAX != 7); switch (statype) { case CFG80211_STA_MESH_PEER_KERNEL: case CFG80211_STA_MESH_PEER_USER: /* * No ignoring the TDLS flag here -- the userspace mesh * code doesn't have the bug of including TDLS in the * mask everywhere. */ if (params->sta_flags_mask & ~(BIT(NL80211_STA_FLAG_AUTHENTICATED) | BIT(NL80211_STA_FLAG_MFP) | BIT(NL80211_STA_FLAG_AUTHORIZED))) return -EINVAL; break; case CFG80211_STA_TDLS_PEER_SETUP: case CFG80211_STA_TDLS_PEER_ACTIVE: if (!(params->sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER))) return -EINVAL; /* ignore since it can't change */ params->sta_flags_mask &= ~BIT(NL80211_STA_FLAG_TDLS_PEER); break; default: /* disallow mesh-specific things */ if (params->plink_action != NL80211_PLINK_ACTION_NO_ACTION) return -EINVAL; if (params->local_pm) return -EINVAL; if (params->sta_modify_mask & STATION_PARAM_APPLY_PLINK_STATE) return -EINVAL; } if (statype != CFG80211_STA_TDLS_PEER_SETUP && statype != CFG80211_STA_TDLS_PEER_ACTIVE) { /* TDLS can't be set, ... */ if (params->sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER)) return -EINVAL; /* * ... but don't bother the driver with it. This works around * a hostapd/wpa_supplicant issue -- it always includes the * TLDS_PEER flag in the mask even for AP mode. */ params->sta_flags_mask &= ~BIT(NL80211_STA_FLAG_TDLS_PEER); } if (statype != CFG80211_STA_TDLS_PEER_SETUP && statype != CFG80211_STA_AP_CLIENT_UNASSOC) { /* reject other things that can't change */ if (params->sta_modify_mask & STATION_PARAM_APPLY_UAPSD) return -EINVAL; if (params->sta_modify_mask & STATION_PARAM_APPLY_CAPABILITY) return -EINVAL; if (params->link_sta_params.supported_rates) return -EINVAL; if (params->ext_capab || params->link_sta_params.ht_capa || params->link_sta_params.vht_capa || params->link_sta_params.he_capa || params->link_sta_params.eht_capa) return -EINVAL; } if (statype != CFG80211_STA_AP_CLIENT && statype != CFG80211_STA_AP_CLIENT_UNASSOC) { if (params->vlan) return -EINVAL; } switch (statype) { case CFG80211_STA_AP_MLME_CLIENT: /* Use this only for authorizing/unauthorizing a station */ if (!(params->sta_flags_mask & BIT(NL80211_STA_FLAG_AUTHORIZED))) return -EOPNOTSUPP; break; case CFG80211_STA_AP_CLIENT: case CFG80211_STA_AP_CLIENT_UNASSOC: /* accept only the listed bits */ if (params->sta_flags_mask & ~(BIT(NL80211_STA_FLAG_AUTHORIZED) | BIT(NL80211_STA_FLAG_AUTHENTICATED) | BIT(NL80211_STA_FLAG_ASSOCIATED) | BIT(NL80211_STA_FLAG_SHORT_PREAMBLE) | BIT(NL80211_STA_FLAG_WME) | BIT(NL80211_STA_FLAG_MFP))) return -EINVAL; /* but authenticated/associated only if driver handles it */ if (!(wiphy->features & NL80211_FEATURE_FULL_AP_CLIENT_STATE) && params->sta_flags_mask & (BIT(NL80211_STA_FLAG_AUTHENTICATED) | BIT(NL80211_STA_FLAG_ASSOCIATED))) return -EINVAL; break; case CFG80211_STA_IBSS: case CFG80211_STA_AP_STA: /* reject any changes other than AUTHORIZED */ if (params->sta_flags_mask & ~BIT(NL80211_STA_FLAG_AUTHORIZED)) return -EINVAL; break; case CFG80211_STA_TDLS_PEER_SETUP: /* reject any changes other than AUTHORIZED or WME */ if (params->sta_flags_mask & ~(BIT(NL80211_STA_FLAG_AUTHORIZED) | BIT(NL80211_STA_FLAG_WME))) return -EINVAL; /* force (at least) rates when authorizing */ if (params->sta_flags_set & BIT(NL80211_STA_FLAG_AUTHORIZED) && !params->link_sta_params.supported_rates) return -EINVAL; break; case CFG80211_STA_TDLS_PEER_ACTIVE: /* reject any changes */ return -EINVAL; case CFG80211_STA_MESH_PEER_KERNEL: if (params->sta_modify_mask & STATION_PARAM_APPLY_PLINK_STATE) return -EINVAL; break; case CFG80211_STA_MESH_PEER_USER: if (params->plink_action != NL80211_PLINK_ACTION_NO_ACTION && params->plink_action != NL80211_PLINK_ACTION_BLOCK) return -EINVAL; break; } /* * Older kernel versions ignored this attribute entirely, so don't * reject attempts to update it but mark it as unused instead so the * driver won't look at the data. */ if (statype != CFG80211_STA_AP_CLIENT_UNASSOC && statype != CFG80211_STA_TDLS_PEER_SETUP) params->link_sta_params.opmode_notif_used = false; return 0; } EXPORT_SYMBOL(cfg80211_check_station_change); /* * Get vlan interface making sure it is running and on the right wiphy. */ static struct net_device *get_vlan(struct genl_info *info, struct cfg80211_registered_device *rdev) { struct nlattr *vlanattr = info->attrs[NL80211_ATTR_STA_VLAN]; struct net_device *v; int ret; if (!vlanattr) return NULL; v = dev_get_by_index(genl_info_net(info), nla_get_u32(vlanattr)); if (!v) return ERR_PTR(-ENODEV); if (!v->ieee80211_ptr || v->ieee80211_ptr->wiphy != &rdev->wiphy) { ret = -EINVAL; goto error; } if (v->ieee80211_ptr->iftype != NL80211_IFTYPE_AP_VLAN && v->ieee80211_ptr->iftype != NL80211_IFTYPE_AP && v->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_GO) { ret = -EINVAL; goto error; } if (!netif_running(v)) { ret = -ENETDOWN; goto error; } return v; error: dev_put(v); return ERR_PTR(ret); } static int nl80211_parse_sta_wme(struct genl_info *info, struct station_parameters *params) { struct nlattr *tb[NL80211_STA_WME_MAX + 1]; struct nlattr *nla; int err; /* parse WME attributes if present */ if (!info->attrs[NL80211_ATTR_STA_WME]) return 0; nla = info->attrs[NL80211_ATTR_STA_WME]; err = nla_parse_nested_deprecated(tb, NL80211_STA_WME_MAX, nla, nl80211_sta_wme_policy, info->extack); if (err) return err; if (tb[NL80211_STA_WME_UAPSD_QUEUES]) params->uapsd_queues = nla_get_u8( tb[NL80211_STA_WME_UAPSD_QUEUES]); if (params->uapsd_queues & ~IEEE80211_WMM_IE_STA_QOSINFO_AC_MASK) return -EINVAL; if (tb[NL80211_STA_WME_MAX_SP]) params->max_sp = nla_get_u8(tb[NL80211_STA_WME_MAX_SP]); if (params->max_sp & ~IEEE80211_WMM_IE_STA_QOSINFO_SP_MASK) return -EINVAL; params->sta_modify_mask |= STATION_PARAM_APPLY_UAPSD; return 0; } static int nl80211_parse_sta_channel_info(struct genl_info *info, struct station_parameters *params) { if (info->attrs[NL80211_ATTR_STA_SUPPORTED_CHANNELS]) { params->supported_channels = nla_data(info->attrs[NL80211_ATTR_STA_SUPPORTED_CHANNELS]); params->supported_channels_len = nla_len(info->attrs[NL80211_ATTR_STA_SUPPORTED_CHANNELS]); /* * Need to include at least one (first channel, number of * channels) tuple for each subband (checked in policy), * and must have proper tuples for the rest of the data as well. */ if (params->supported_channels_len % 2) return -EINVAL; } if (info->attrs[NL80211_ATTR_STA_SUPPORTED_OPER_CLASSES]) { params->supported_oper_classes = nla_data(info->attrs[NL80211_ATTR_STA_SUPPORTED_OPER_CLASSES]); params->supported_oper_classes_len = nla_len(info->attrs[NL80211_ATTR_STA_SUPPORTED_OPER_CLASSES]); } return 0; } static int nl80211_set_station_tdls(struct genl_info *info, struct station_parameters *params) { int err; /* Dummy STA entry gets updated once the peer capabilities are known */ if (info->attrs[NL80211_ATTR_PEER_AID]) params->aid = nla_get_u16(info->attrs[NL80211_ATTR_PEER_AID]); if (info->attrs[NL80211_ATTR_HT_CAPABILITY]) params->link_sta_params.ht_capa = nla_data(info->attrs[NL80211_ATTR_HT_CAPABILITY]); if (info->attrs[NL80211_ATTR_VHT_CAPABILITY]) params->link_sta_params.vht_capa = nla_data(info->attrs[NL80211_ATTR_VHT_CAPABILITY]); if (info->attrs[NL80211_ATTR_HE_CAPABILITY]) { params->link_sta_params.he_capa = nla_data(info->attrs[NL80211_ATTR_HE_CAPABILITY]); params->link_sta_params.he_capa_len = nla_len(info->attrs[NL80211_ATTR_HE_CAPABILITY]); if (info->attrs[NL80211_ATTR_EHT_CAPABILITY]) { params->link_sta_params.eht_capa = nla_data(info->attrs[NL80211_ATTR_EHT_CAPABILITY]); params->link_sta_params.eht_capa_len = nla_len(info->attrs[NL80211_ATTR_EHT_CAPABILITY]); if (!ieee80211_eht_capa_size_ok((const u8 *)params->link_sta_params.he_capa, (const u8 *)params->link_sta_params.eht_capa, params->link_sta_params.eht_capa_len, false)) return -EINVAL; } } err = nl80211_parse_sta_channel_info(info, params); if (err) return err; return nl80211_parse_sta_wme(info, params); } static int nl80211_parse_sta_txpower_setting(struct genl_info *info, struct sta_txpwr *txpwr, bool *txpwr_set) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; int idx; if (info->attrs[NL80211_ATTR_STA_TX_POWER_SETTING]) { if (!rdev->ops->set_tx_power || !wiphy_ext_feature_isset(&rdev->wiphy, NL80211_EXT_FEATURE_STA_TX_PWR)) return -EOPNOTSUPP; idx = NL80211_ATTR_STA_TX_POWER_SETTING; txpwr->type = nla_get_u8(info->attrs[idx]); if (txpwr->type == NL80211_TX_POWER_LIMITED) { idx = NL80211_ATTR_STA_TX_POWER; if (info->attrs[idx]) txpwr->power = nla_get_s16(info->attrs[idx]); else return -EINVAL; } *txpwr_set = true; } else { *txpwr_set = false; } return 0; } static int nl80211_set_station(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct net_device *dev = info->user_ptr[1]; struct station_parameters params; u8 *mac_addr; int err; memset(&params, 0, sizeof(params)); if (!rdev->ops->change_station) return -EOPNOTSUPP; /* * AID and listen_interval properties can be set only for unassociated * station. Include these parameters here and will check them in * cfg80211_check_station_change(). */ if (info->attrs[NL80211_ATTR_STA_AID]) params.aid = nla_get_u16(info->attrs[NL80211_ATTR_STA_AID]); if (info->attrs[NL80211_ATTR_VLAN_ID]) params.vlan_id = nla_get_u16(info->attrs[NL80211_ATTR_VLAN_ID]); if (info->attrs[NL80211_ATTR_STA_LISTEN_INTERVAL]) params.listen_interval = nla_get_u16(info->attrs[NL80211_ATTR_STA_LISTEN_INTERVAL]); else params.listen_interval = -1; if (info->attrs[NL80211_ATTR_STA_SUPPORT_P2P_PS]) params.support_p2p_ps = nla_get_u8(info->attrs[NL80211_ATTR_STA_SUPPORT_P2P_PS]); else params.support_p2p_ps = -1; if (!info->attrs[NL80211_ATTR_MAC]) return -EINVAL; params.link_sta_params.link_id = nl80211_link_id_or_invalid(info->attrs); if (info->attrs[NL80211_ATTR_MLD_ADDR]) { /* If MLD_ADDR attribute is set then this is an MLD station * and the MLD_ADDR attribute holds the MLD address and the * MAC attribute holds for the LINK address. * In that case, the link_id is also expected to be valid. */ if (params.link_sta_params.link_id < 0) return -EINVAL; mac_addr = nla_data(info->attrs[NL80211_ATTR_MLD_ADDR]); params.link_sta_params.mld_mac = mac_addr; params.link_sta_params.link_mac = nla_data(info->attrs[NL80211_ATTR_MAC]); if (!is_valid_ether_addr(params.link_sta_params.link_mac)) return -EINVAL; } else { mac_addr = nla_data(info->attrs[NL80211_ATTR_MAC]); } if (info->attrs[NL80211_ATTR_STA_SUPPORTED_RATES]) { params.link_sta_params.supported_rates = nla_data(info->attrs[NL80211_ATTR_STA_SUPPORTED_RATES]); params.link_sta_params.supported_rates_len = nla_len(info->attrs[NL80211_ATTR_STA_SUPPORTED_RATES]); } if (info->attrs[NL80211_ATTR_STA_CAPABILITY]) { params.capability = nla_get_u16(info->attrs[NL80211_ATTR_STA_CAPABILITY]); params.sta_modify_mask |= STATION_PARAM_APPLY_CAPABILITY; } if (info->attrs[NL80211_ATTR_STA_EXT_CAPABILITY]) { params.ext_capab = nla_data(info->attrs[NL80211_ATTR_STA_EXT_CAPABILITY]); params.ext_capab_len = nla_len(info->attrs[NL80211_ATTR_STA_EXT_CAPABILITY]); } if (parse_station_flags(info, dev->ieee80211_ptr->iftype, &params)) return -EINVAL; if (info->attrs[NL80211_ATTR_STA_PLINK_ACTION]) params.plink_action = nla_get_u8(info->attrs[NL80211_ATTR_STA_PLINK_ACTION]); if (info->attrs[NL80211_ATTR_STA_PLINK_STATE]) { params.plink_state = nla_get_u8(info->attrs[NL80211_ATTR_STA_PLINK_STATE]); if (info->attrs[NL80211_ATTR_MESH_PEER_AID]) params.peer_aid = nla_get_u16( info->attrs[NL80211_ATTR_MESH_PEER_AID]); params.sta_modify_mask |= STATION_PARAM_APPLY_PLINK_STATE; } if (info->attrs[NL80211_ATTR_LOCAL_MESH_POWER_MODE]) params.local_pm = nla_get_u32( info->attrs[NL80211_ATTR_LOCAL_MESH_POWER_MODE]); if (info->attrs[NL80211_ATTR_OPMODE_NOTIF]) { params.link_sta_params.opmode_notif_used = true; params.link_sta_params.opmode_notif = nla_get_u8(info->attrs[NL80211_ATTR_OPMODE_NOTIF]); } if (info->attrs[NL80211_ATTR_HE_6GHZ_CAPABILITY]) params.link_sta_params.he_6ghz_capa = nla_data(info->attrs[NL80211_ATTR_HE_6GHZ_CAPABILITY]); if (info->attrs[NL80211_ATTR_AIRTIME_WEIGHT]) params.airtime_weight = nla_get_u16(info->attrs[NL80211_ATTR_AIRTIME_WEIGHT]); if (params.airtime_weight && !wiphy_ext_feature_isset(&rdev->wiphy, NL80211_EXT_FEATURE_AIRTIME_FAIRNESS)) return -EOPNOTSUPP; err = nl80211_parse_sta_txpower_setting(info, &params.link_sta_params.txpwr, &params.link_sta_params.txpwr_set); if (err) return err; /* Include parameters for TDLS peer (will check later) */ err = nl80211_set_station_tdls(info, &params); if (err) return err; params.vlan = get_vlan(info, rdev); if (IS_ERR(params.vlan)) return PTR_ERR(params.vlan); switch (dev->ieee80211_ptr->iftype) { case NL80211_IFTYPE_AP: case NL80211_IFTYPE_AP_VLAN: case NL80211_IFTYPE_P2P_GO: case NL80211_IFTYPE_P2P_CLIENT: case NL80211_IFTYPE_STATION: case NL80211_IFTYPE_ADHOC: case NL80211_IFTYPE_MESH_POINT: break; default: err = -EOPNOTSUPP; goto out_put_vlan; } /* driver will call cfg80211_check_station_change() */ wdev_lock(dev->ieee80211_ptr); err = rdev_change_station(rdev, dev, mac_addr, &params); wdev_unlock(dev->ieee80211_ptr); out_put_vlan: dev_put(params.vlan); return err; } static int nl80211_new_station(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; int err; struct net_device *dev = info->user_ptr[1]; struct wireless_dev *wdev = dev->ieee80211_ptr; struct station_parameters params; u8 *mac_addr = NULL; u32 auth_assoc = BIT(NL80211_STA_FLAG_AUTHENTICATED) | BIT(NL80211_STA_FLAG_ASSOCIATED); memset(&params, 0, sizeof(params)); if (!rdev->ops->add_station) return -EOPNOTSUPP; if (!info->attrs[NL80211_ATTR_MAC]) return -EINVAL; if (!info->attrs[NL80211_ATTR_STA_LISTEN_INTERVAL]) return -EINVAL; if (!info->attrs[NL80211_ATTR_STA_SUPPORTED_RATES]) return -EINVAL; if (!info->attrs[NL80211_ATTR_STA_AID] && !info->attrs[NL80211_ATTR_PEER_AID]) return -EINVAL; params.link_sta_params.link_id = nl80211_link_id_or_invalid(info->attrs); if (info->attrs[NL80211_ATTR_MLD_ADDR]) { mac_addr = nla_data(info->attrs[NL80211_ATTR_MLD_ADDR]); params.link_sta_params.mld_mac = mac_addr; params.link_sta_params.link_mac = nla_data(info->attrs[NL80211_ATTR_MAC]); if (!is_valid_ether_addr(params.link_sta_params.link_mac)) return -EINVAL; } else { mac_addr = nla_data(info->attrs[NL80211_ATTR_MAC]); } params.link_sta_params.supported_rates = nla_data(info->attrs[NL80211_ATTR_STA_SUPPORTED_RATES]); params.link_sta_params.supported_rates_len = nla_len(info->attrs[NL80211_ATTR_STA_SUPPORTED_RATES]); params.listen_interval = nla_get_u16(info->attrs[NL80211_ATTR_STA_LISTEN_INTERVAL]); if (info->attrs[NL80211_ATTR_VLAN_ID]) params.vlan_id = nla_get_u16(info->attrs[NL80211_ATTR_VLAN_ID]); if (info->attrs[NL80211_ATTR_STA_SUPPORT_P2P_PS]) { params.support_p2p_ps = nla_get_u8(info->attrs[NL80211_ATTR_STA_SUPPORT_P2P_PS]); } else { /* * if not specified, assume it's supported for P2P GO interface, * and is NOT supported for AP interface */ params.support_p2p_ps = dev->ieee80211_ptr->iftype == NL80211_IFTYPE_P2P_GO; } if (info->attrs[NL80211_ATTR_PEER_AID]) params.aid = nla_get_u16(info->attrs[NL80211_ATTR_PEER_AID]); else params.aid = nla_get_u16(info->attrs[NL80211_ATTR_STA_AID]); if (info->attrs[NL80211_ATTR_STA_CAPABILITY]) { params.capability = nla_get_u16(info->attrs[NL80211_ATTR_STA_CAPABILITY]); params.sta_modify_mask |= STATION_PARAM_APPLY_CAPABILITY; } if (info->attrs[NL80211_ATTR_STA_EXT_CAPABILITY]) { params.ext_capab = nla_data(info->attrs[NL80211_ATTR_STA_EXT_CAPABILITY]); params.ext_capab_len = nla_len(info->attrs[NL80211_ATTR_STA_EXT_CAPABILITY]); } if (info->attrs[NL80211_ATTR_HT_CAPABILITY]) params.link_sta_params.ht_capa = nla_data(info->attrs[NL80211_ATTR_HT_CAPABILITY]); if (info->attrs[NL80211_ATTR_VHT_CAPABILITY]) params.link_sta_params.vht_capa = nla_data(info->attrs[NL80211_ATTR_VHT_CAPABILITY]); if (info->attrs[NL80211_ATTR_HE_CAPABILITY]) { params.link_sta_params.he_capa = nla_data(info->attrs[NL80211_ATTR_HE_CAPABILITY]); params.link_sta_params.he_capa_len = nla_len(info->attrs[NL80211_ATTR_HE_CAPABILITY]); if (info->attrs[NL80211_ATTR_EHT_CAPABILITY]) { params.link_sta_params.eht_capa = nla_data(info->attrs[NL80211_ATTR_EHT_CAPABILITY]); params.link_sta_params.eht_capa_len = nla_len(info->attrs[NL80211_ATTR_EHT_CAPABILITY]); if (!ieee80211_eht_capa_size_ok((const u8 *)params.link_sta_params.he_capa, (const u8 *)params.link_sta_params.eht_capa, params.link_sta_params.eht_capa_len, false)) return -EINVAL; } } if (info->attrs[NL80211_ATTR_HE_6GHZ_CAPABILITY]) params.link_sta_params.he_6ghz_capa = nla_data(info->attrs[NL80211_ATTR_HE_6GHZ_CAPABILITY]); if (info->attrs[NL80211_ATTR_OPMODE_NOTIF]) { params.link_sta_params.opmode_notif_used = true; params.link_sta_params.opmode_notif = nla_get_u8(info->attrs[NL80211_ATTR_OPMODE_NOTIF]); } if (info->attrs[NL80211_ATTR_STA_PLINK_ACTION]) params.plink_action = nla_get_u8(info->attrs[NL80211_ATTR_STA_PLINK_ACTION]); if (info->attrs[NL80211_ATTR_AIRTIME_WEIGHT]) params.airtime_weight = nla_get_u16(info->attrs[NL80211_ATTR_AIRTIME_WEIGHT]); if (params.airtime_weight && !wiphy_ext_feature_isset(&rdev->wiphy, NL80211_EXT_FEATURE_AIRTIME_FAIRNESS)) return -EOPNOTSUPP; err = nl80211_parse_sta_txpower_setting(info, &params.link_sta_params.txpwr, &params.link_sta_params.txpwr_set); if (err) return err; err = nl80211_parse_sta_channel_info(info, &params); if (err) return err; err = nl80211_parse_sta_wme(info, &params); if (err) return err; if (parse_station_flags(info, dev->ieee80211_ptr->iftype, &params)) return -EINVAL; /* HT/VHT requires QoS, but if we don't have that just ignore HT/VHT * as userspace might just pass through the capabilities from the IEs * directly, rather than enforcing this restriction and returning an * error in this case. */ if (!(params.sta_flags_set & BIT(NL80211_STA_FLAG_WME))) { params.link_sta_params.ht_capa = NULL; params.link_sta_params.vht_capa = NULL; /* HE and EHT require WME */ if (params.link_sta_params.he_capa_len || params.link_sta_params.he_6ghz_capa || params.link_sta_params.eht_capa_len) return -EINVAL; } /* Ensure that HT/VHT capabilities are not set for 6 GHz HE STA */ if (params.link_sta_params.he_6ghz_capa && (params.link_sta_params.ht_capa || params.link_sta_params.vht_capa)) return -EINVAL; /* When you run into this, adjust the code below for the new flag */ BUILD_BUG_ON(NL80211_STA_FLAG_MAX != 7); switch (dev->ieee80211_ptr->iftype) { case NL80211_IFTYPE_AP: case NL80211_IFTYPE_AP_VLAN: case NL80211_IFTYPE_P2P_GO: /* ignore WME attributes if iface/sta is not capable */ if (!(rdev->wiphy.flags & WIPHY_FLAG_AP_UAPSD) || !(params.sta_flags_set & BIT(NL80211_STA_FLAG_WME))) params.sta_modify_mask &= ~STATION_PARAM_APPLY_UAPSD; /* TDLS peers cannot be added */ if ((params.sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER)) || info->attrs[NL80211_ATTR_PEER_AID]) return -EINVAL; /* but don't bother the driver with it */ params.sta_flags_mask &= ~BIT(NL80211_STA_FLAG_TDLS_PEER); /* allow authenticated/associated only if driver handles it */ if (!(rdev->wiphy.features & NL80211_FEATURE_FULL_AP_CLIENT_STATE) && params.sta_flags_mask & auth_assoc) return -EINVAL; /* Older userspace, or userspace wanting to be compatible with * !NL80211_FEATURE_FULL_AP_CLIENT_STATE, will not set the auth * and assoc flags in the mask, but assumes the station will be * added as associated anyway since this was the required driver * behaviour before NL80211_FEATURE_FULL_AP_CLIENT_STATE was * introduced. * In order to not bother drivers with this quirk in the API * set the flags in both the mask and set for new stations in * this case. */ if (!(params.sta_flags_mask & auth_assoc)) { params.sta_flags_mask |= auth_assoc; params.sta_flags_set |= auth_assoc; } /* must be last in here for error handling */ params.vlan = get_vlan(info, rdev); if (IS_ERR(params.vlan)) return PTR_ERR(params.vlan); break; case NL80211_IFTYPE_MESH_POINT: /* ignore uAPSD data */ params.sta_modify_mask &= ~STATION_PARAM_APPLY_UAPSD; /* associated is disallowed */ if (params.sta_flags_mask & BIT(NL80211_STA_FLAG_ASSOCIATED)) return -EINVAL; /* TDLS peers cannot be added */ if ((params.sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER)) || info->attrs[NL80211_ATTR_PEER_AID]) return -EINVAL; break; case NL80211_IFTYPE_STATION: case NL80211_IFTYPE_P2P_CLIENT: /* ignore uAPSD data */ params.sta_modify_mask &= ~STATION_PARAM_APPLY_UAPSD; /* these are disallowed */ if (params.sta_flags_mask & (BIT(NL80211_STA_FLAG_ASSOCIATED) | BIT(NL80211_STA_FLAG_AUTHENTICATED))) return -EINVAL; /* Only TDLS peers can be added */ if (!(params.sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER))) return -EINVAL; /* Can only add if TDLS ... */ if (!(rdev->wiphy.flags & WIPHY_FLAG_SUPPORTS_TDLS)) return -EOPNOTSUPP; /* ... with external setup is supported */ if (!(rdev->wiphy.flags & WIPHY_FLAG_TDLS_EXTERNAL_SETUP)) return -EOPNOTSUPP; /* * Older wpa_supplicant versions always mark the TDLS peer * as authorized, but it shouldn't yet be. */ params.sta_flags_mask &= ~BIT(NL80211_STA_FLAG_AUTHORIZED); break; default: return -EOPNOTSUPP; } /* be aware of params.vlan when changing code here */ wdev_lock(dev->ieee80211_ptr); if (wdev->valid_links) { if (params.link_sta_params.link_id < 0) { err = -EINVAL; goto out; } if (!(wdev->valid_links & BIT(params.link_sta_params.link_id))) { err = -ENOLINK; goto out; } } else { if (params.link_sta_params.link_id >= 0) { err = -EINVAL; goto out; } } err = rdev_add_station(rdev, dev, mac_addr, &params); out: wdev_unlock(dev->ieee80211_ptr); dev_put(params.vlan); return err; } static int nl80211_del_station(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct net_device *dev = info->user_ptr[1]; struct station_del_parameters params; int ret; memset(&params, 0, sizeof(params)); if (info->attrs[NL80211_ATTR_MAC]) params.mac = nla_data(info->attrs[NL80211_ATTR_MAC]); switch (dev->ieee80211_ptr->iftype) { case NL80211_IFTYPE_AP: case NL80211_IFTYPE_AP_VLAN: case NL80211_IFTYPE_MESH_POINT: case NL80211_IFTYPE_P2P_GO: /* always accept these */ break; case NL80211_IFTYPE_ADHOC: /* conditionally accept */ if (wiphy_ext_feature_isset(&rdev->wiphy, NL80211_EXT_FEATURE_DEL_IBSS_STA)) break; return -EINVAL; default: return -EINVAL; } if (!rdev->ops->del_station) return -EOPNOTSUPP; if (info->attrs[NL80211_ATTR_MGMT_SUBTYPE]) { params.subtype = nla_get_u8(info->attrs[NL80211_ATTR_MGMT_SUBTYPE]); if (params.subtype != IEEE80211_STYPE_DISASSOC >> 4 && params.subtype != IEEE80211_STYPE_DEAUTH >> 4) return -EINVAL; } else { /* Default to Deauthentication frame */ params.subtype = IEEE80211_STYPE_DEAUTH >> 4; } if (info->attrs[NL80211_ATTR_REASON_CODE]) { params.reason_code = nla_get_u16(info->attrs[NL80211_ATTR_REASON_CODE]); if (params.reason_code == 0) return -EINVAL; /* 0 is reserved */ } else { /* Default to reason code 2 */ params.reason_code = WLAN_REASON_PREV_AUTH_NOT_VALID; } wdev_lock(dev->ieee80211_ptr); ret = rdev_del_station(rdev, dev, &params); wdev_unlock(dev->ieee80211_ptr); return ret; } static int nl80211_send_mpath(struct sk_buff *msg, u32 portid, u32 seq, int flags, struct net_device *dev, u8 *dst, u8 *next_hop, struct mpath_info *pinfo) { void *hdr; struct nlattr *pinfoattr; hdr = nl80211hdr_put(msg, portid, seq, flags, NL80211_CMD_NEW_MPATH); if (!hdr) return -1; if (nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex) || nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, dst) || nla_put(msg, NL80211_ATTR_MPATH_NEXT_HOP, ETH_ALEN, next_hop) || nla_put_u32(msg, NL80211_ATTR_GENERATION, pinfo->generation)) goto nla_put_failure; pinfoattr = nla_nest_start_noflag(msg, NL80211_ATTR_MPATH_INFO); if (!pinfoattr) goto nla_put_failure; if ((pinfo->filled & MPATH_INFO_FRAME_QLEN) && nla_put_u32(msg, NL80211_MPATH_INFO_FRAME_QLEN, pinfo->frame_qlen)) goto nla_put_failure; if (((pinfo->filled & MPATH_INFO_SN) && nla_put_u32(msg, NL80211_MPATH_INFO_SN, pinfo->sn)) || ((pinfo->filled & MPATH_INFO_METRIC) && nla_put_u32(msg, NL80211_MPATH_INFO_METRIC, pinfo->metric)) || ((pinfo->filled & MPATH_INFO_EXPTIME) && nla_put_u32(msg, NL80211_MPATH_INFO_EXPTIME, pinfo->exptime)) || ((pinfo->filled & MPATH_INFO_FLAGS) && nla_put_u8(msg, NL80211_MPATH_INFO_FLAGS, pinfo->flags)) || ((pinfo->filled & MPATH_INFO_DISCOVERY_TIMEOUT) && nla_put_u32(msg, NL80211_MPATH_INFO_DISCOVERY_TIMEOUT, pinfo->discovery_timeout)) || ((pinfo->filled & MPATH_INFO_DISCOVERY_RETRIES) && nla_put_u8(msg, NL80211_MPATH_INFO_DISCOVERY_RETRIES, pinfo->discovery_retries)) || ((pinfo->filled & MPATH_INFO_HOP_COUNT) && nla_put_u8(msg, NL80211_MPATH_INFO_HOP_COUNT, pinfo->hop_count)) || ((pinfo->filled & MPATH_INFO_PATH_CHANGE) && nla_put_u32(msg, NL80211_MPATH_INFO_PATH_CHANGE, pinfo->path_change_count))) goto nla_put_failure; nla_nest_end(msg, pinfoattr); genlmsg_end(msg, hdr); return 0; nla_put_failure: genlmsg_cancel(msg, hdr); return -EMSGSIZE; } static int nl80211_dump_mpath(struct sk_buff *skb, struct netlink_callback *cb) { struct mpath_info pinfo; struct cfg80211_registered_device *rdev; struct wireless_dev *wdev; u8 dst[ETH_ALEN]; u8 next_hop[ETH_ALEN]; int path_idx = cb->args[2]; int err; err = nl80211_prepare_wdev_dump(cb, &rdev, &wdev, NULL); if (err) return err; /* nl80211_prepare_wdev_dump acquired it in the successful case */ __acquire(&rdev->wiphy.mtx); if (!rdev->ops->dump_mpath) { err = -EOPNOTSUPP; goto out_err; } if (wdev->iftype != NL80211_IFTYPE_MESH_POINT) { err = -EOPNOTSUPP; goto out_err; } while (1) { err = rdev_dump_mpath(rdev, wdev->netdev, path_idx, dst, next_hop, &pinfo); if (err == -ENOENT) break; if (err) goto out_err; if (nl80211_send_mpath(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, NLM_F_MULTI, wdev->netdev, dst, next_hop, &pinfo) < 0) goto out; path_idx++; } out: cb->args[2] = path_idx; err = skb->len; out_err: wiphy_unlock(&rdev->wiphy); return err; } static int nl80211_get_mpath(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; int err; struct net_device *dev = info->user_ptr[1]; struct mpath_info pinfo; struct sk_buff *msg; u8 *dst = NULL; u8 next_hop[ETH_ALEN]; memset(&pinfo, 0, sizeof(pinfo)); if (!info->attrs[NL80211_ATTR_MAC]) return -EINVAL; dst = nla_data(info->attrs[NL80211_ATTR_MAC]); if (!rdev->ops->get_mpath) return -EOPNOTSUPP; if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_MESH_POINT) return -EOPNOTSUPP; err = rdev_get_mpath(rdev, dev, dst, next_hop, &pinfo); if (err) return err; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) return -ENOMEM; if (nl80211_send_mpath(msg, info->snd_portid, info->snd_seq, 0, dev, dst, next_hop, &pinfo) < 0) { nlmsg_free(msg); return -ENOBUFS; } return genlmsg_reply(msg, info); } static int nl80211_set_mpath(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct net_device *dev = info->user_ptr[1]; u8 *dst = NULL; u8 *next_hop = NULL; if (!info->attrs[NL80211_ATTR_MAC]) return -EINVAL; if (!info->attrs[NL80211_ATTR_MPATH_NEXT_HOP]) return -EINVAL; dst = nla_data(info->attrs[NL80211_ATTR_MAC]); next_hop = nla_data(info->attrs[NL80211_ATTR_MPATH_NEXT_HOP]); if (!rdev->ops->change_mpath) return -EOPNOTSUPP; if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_MESH_POINT) return -EOPNOTSUPP; return rdev_change_mpath(rdev, dev, dst, next_hop); } static int nl80211_new_mpath(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct net_device *dev = info->user_ptr[1]; u8 *dst = NULL; u8 *next_hop = NULL; if (!info->attrs[NL80211_ATTR_MAC]) return -EINVAL; if (!info->attrs[NL80211_ATTR_MPATH_NEXT_HOP]) return -EINVAL; dst = nla_data(info->attrs[NL80211_ATTR_MAC]); next_hop = nla_data(info->attrs[NL80211_ATTR_MPATH_NEXT_HOP]); if (!rdev->ops->add_mpath) return -EOPNOTSUPP; if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_MESH_POINT) return -EOPNOTSUPP; return rdev_add_mpath(rdev, dev, dst, next_hop); } static int nl80211_del_mpath(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct net_device *dev = info->user_ptr[1]; u8 *dst = NULL; if (info->attrs[NL80211_ATTR_MAC]) dst = nla_data(info->attrs[NL80211_ATTR_MAC]); if (!rdev->ops->del_mpath) return -EOPNOTSUPP; if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_MESH_POINT) return -EOPNOTSUPP; return rdev_del_mpath(rdev, dev, dst); } static int nl80211_get_mpp(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; int err; struct net_device *dev = info->user_ptr[1]; struct mpath_info pinfo; struct sk_buff *msg; u8 *dst = NULL; u8 mpp[ETH_ALEN]; memset(&pinfo, 0, sizeof(pinfo)); if (!info->attrs[NL80211_ATTR_MAC]) return -EINVAL; dst = nla_data(info->attrs[NL80211_ATTR_MAC]); if (!rdev->ops->get_mpp) return -EOPNOTSUPP; if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_MESH_POINT) return -EOPNOTSUPP; err = rdev_get_mpp(rdev, dev, dst, mpp, &pinfo); if (err) return err; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) return -ENOMEM; if (nl80211_send_mpath(msg, info->snd_portid, info->snd_seq, 0, dev, dst, mpp, &pinfo) < 0) { nlmsg_free(msg); return -ENOBUFS; } return genlmsg_reply(msg, info); } static int nl80211_dump_mpp(struct sk_buff *skb, struct netlink_callback *cb) { struct mpath_info pinfo; struct cfg80211_registered_device *rdev; struct wireless_dev *wdev; u8 dst[ETH_ALEN]; u8 mpp[ETH_ALEN]; int path_idx = cb->args[2]; int err; err = nl80211_prepare_wdev_dump(cb, &rdev, &wdev, NULL); if (err) return err; /* nl80211_prepare_wdev_dump acquired it in the successful case */ __acquire(&rdev->wiphy.mtx); if (!rdev->ops->dump_mpp) { err = -EOPNOTSUPP; goto out_err; } if (wdev->iftype != NL80211_IFTYPE_MESH_POINT) { err = -EOPNOTSUPP; goto out_err; } while (1) { err = rdev_dump_mpp(rdev, wdev->netdev, path_idx, dst, mpp, &pinfo); if (err == -ENOENT) break; if (err) goto out_err; if (nl80211_send_mpath(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, NLM_F_MULTI, wdev->netdev, dst, mpp, &pinfo) < 0) goto out; path_idx++; } out: cb->args[2] = path_idx; err = skb->len; out_err: wiphy_unlock(&rdev->wiphy); return err; } static int nl80211_set_bss(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct net_device *dev = info->user_ptr[1]; struct wireless_dev *wdev = dev->ieee80211_ptr; struct bss_parameters params; int err; memset(&params, 0, sizeof(params)); params.link_id = nl80211_link_id_or_invalid(info->attrs); /* default to not changing parameters */ params.use_cts_prot = -1; params.use_short_preamble = -1; params.use_short_slot_time = -1; params.ap_isolate = -1; params.ht_opmode = -1; params.p2p_ctwindow = -1; params.p2p_opp_ps = -1; if (info->attrs[NL80211_ATTR_BSS_CTS_PROT]) params.use_cts_prot = nla_get_u8(info->attrs[NL80211_ATTR_BSS_CTS_PROT]); if (info->attrs[NL80211_ATTR_BSS_SHORT_PREAMBLE]) params.use_short_preamble = nla_get_u8(info->attrs[NL80211_ATTR_BSS_SHORT_PREAMBLE]); if (info->attrs[NL80211_ATTR_BSS_SHORT_SLOT_TIME]) params.use_short_slot_time = nla_get_u8(info->attrs[NL80211_ATTR_BSS_SHORT_SLOT_TIME]); if (info->attrs[NL80211_ATTR_BSS_BASIC_RATES]) { params.basic_rates = nla_data(info->attrs[NL80211_ATTR_BSS_BASIC_RATES]); params.basic_rates_len = nla_len(info->attrs[NL80211_ATTR_BSS_BASIC_RATES]); } if (info->attrs[NL80211_ATTR_AP_ISOLATE]) params.ap_isolate = !!nla_get_u8(info->attrs[NL80211_ATTR_AP_ISOLATE]); if (info->attrs[NL80211_ATTR_BSS_HT_OPMODE]) params.ht_opmode = nla_get_u16(info->attrs[NL80211_ATTR_BSS_HT_OPMODE]); if (info->attrs[NL80211_ATTR_P2P_CTWINDOW]) { if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_GO) return -EINVAL; params.p2p_ctwindow = nla_get_u8(info->attrs[NL80211_ATTR_P2P_CTWINDOW]); if (params.p2p_ctwindow != 0 && !(rdev->wiphy.features & NL80211_FEATURE_P2P_GO_CTWIN)) return -EINVAL; } if (info->attrs[NL80211_ATTR_P2P_OPPPS]) { u8 tmp; if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_GO) return -EINVAL; tmp = nla_get_u8(info->attrs[NL80211_ATTR_P2P_OPPPS]); params.p2p_opp_ps = tmp; if (params.p2p_opp_ps && !(rdev->wiphy.features & NL80211_FEATURE_P2P_GO_OPPPS)) return -EINVAL; } if (!rdev->ops->change_bss) return -EOPNOTSUPP; if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP && dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_GO) return -EOPNOTSUPP; wdev_lock(wdev); err = rdev_change_bss(rdev, dev, &params); wdev_unlock(wdev); return err; } static int nl80211_req_set_reg(struct sk_buff *skb, struct genl_info *info) { char *data = NULL; bool is_indoor; enum nl80211_user_reg_hint_type user_reg_hint_type; u32 owner_nlportid; /* * You should only get this when cfg80211 hasn't yet initialized * completely when built-in to the kernel right between the time * window between nl80211_init() and regulatory_init(), if that is * even possible. */ if (unlikely(!rcu_access_pointer(cfg80211_regdomain))) return -EINPROGRESS; if (info->attrs[NL80211_ATTR_USER_REG_HINT_TYPE]) user_reg_hint_type = nla_get_u32(info->attrs[NL80211_ATTR_USER_REG_HINT_TYPE]); else user_reg_hint_type = NL80211_USER_REG_HINT_USER; switch (user_reg_hint_type) { case NL80211_USER_REG_HINT_USER: case NL80211_USER_REG_HINT_CELL_BASE: if (!info->attrs[NL80211_ATTR_REG_ALPHA2]) return -EINVAL; data = nla_data(info->attrs[NL80211_ATTR_REG_ALPHA2]); return regulatory_hint_user(data, user_reg_hint_type); case NL80211_USER_REG_HINT_INDOOR: if (info->attrs[NL80211_ATTR_SOCKET_OWNER]) { owner_nlportid = info->snd_portid; is_indoor = !!info->attrs[NL80211_ATTR_REG_INDOOR]; } else { owner_nlportid = 0; is_indoor = true; } return regulatory_hint_indoor(is_indoor, owner_nlportid); default: return -EINVAL; } } static int nl80211_reload_regdb(struct sk_buff *skb, struct genl_info *info) { return reg_reload_regdb(); } static int nl80211_get_mesh_config(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct net_device *dev = info->user_ptr[1]; struct wireless_dev *wdev = dev->ieee80211_ptr; struct mesh_config cur_params; int err = 0; void *hdr; struct nlattr *pinfoattr; struct sk_buff *msg; if (wdev->iftype != NL80211_IFTYPE_MESH_POINT) return -EOPNOTSUPP; if (!rdev->ops->get_mesh_config) return -EOPNOTSUPP; wdev_lock(wdev); /* If not connected, get default parameters */ if (!wdev->u.mesh.id_len) memcpy(&cur_params, &default_mesh_config, sizeof(cur_params)); else err = rdev_get_mesh_config(rdev, dev, &cur_params); wdev_unlock(wdev); if (err) return err; /* Draw up a netlink message to send back */ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) return -ENOMEM; hdr = nl80211hdr_put(msg, info->snd_portid, info->snd_seq, 0, NL80211_CMD_GET_MESH_CONFIG); if (!hdr) goto out; pinfoattr = nla_nest_start_noflag(msg, NL80211_ATTR_MESH_CONFIG); if (!pinfoattr) goto nla_put_failure; if (nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex) || nla_put_u16(msg, NL80211_MESHCONF_RETRY_TIMEOUT, cur_params.dot11MeshRetryTimeout) || nla_put_u16(msg, NL80211_MESHCONF_CONFIRM_TIMEOUT, cur_params.dot11MeshConfirmTimeout) || nla_put_u16(msg, NL80211_MESHCONF_HOLDING_TIMEOUT, cur_params.dot11MeshHoldingTimeout) || nla_put_u16(msg, NL80211_MESHCONF_MAX_PEER_LINKS, cur_params.dot11MeshMaxPeerLinks) || nla_put_u8(msg, NL80211_MESHCONF_MAX_RETRIES, cur_params.dot11MeshMaxRetries) || nla_put_u8(msg, NL80211_MESHCONF_TTL, cur_params.dot11MeshTTL) || nla_put_u8(msg, NL80211_MESHCONF_ELEMENT_TTL, cur_params.element_ttl) || nla_put_u8(msg, NL80211_MESHCONF_AUTO_OPEN_PLINKS, cur_params.auto_open_plinks) || nla_put_u32(msg, NL80211_MESHCONF_SYNC_OFFSET_MAX_NEIGHBOR, cur_params.dot11MeshNbrOffsetMaxNeighbor) || nla_put_u8(msg, NL80211_MESHCONF_HWMP_MAX_PREQ_RETRIES, cur_params.dot11MeshHWMPmaxPREQretries) || nla_put_u32(msg, NL80211_MESHCONF_PATH_REFRESH_TIME, cur_params.path_refresh_time) || nla_put_u16(msg, NL80211_MESHCONF_MIN_DISCOVERY_TIMEOUT, cur_params.min_discovery_timeout) || nla_put_u32(msg, NL80211_MESHCONF_HWMP_ACTIVE_PATH_TIMEOUT, cur_params.dot11MeshHWMPactivePathTimeout) || nla_put_u16(msg, NL80211_MESHCONF_HWMP_PREQ_MIN_INTERVAL, cur_params.dot11MeshHWMPpreqMinInterval) || nla_put_u16(msg, NL80211_MESHCONF_HWMP_PERR_MIN_INTERVAL, cur_params.dot11MeshHWMPperrMinInterval) || nla_put_u16(msg, NL80211_MESHCONF_HWMP_NET_DIAM_TRVS_TIME, cur_params.dot11MeshHWMPnetDiameterTraversalTime) || nla_put_u8(msg, NL80211_MESHCONF_HWMP_ROOTMODE, cur_params.dot11MeshHWMPRootMode) || nla_put_u16(msg, NL80211_MESHCONF_HWMP_RANN_INTERVAL, cur_params.dot11MeshHWMPRannInterval) || nla_put_u8(msg, NL80211_MESHCONF_GATE_ANNOUNCEMENTS, cur_params.dot11MeshGateAnnouncementProtocol) || nla_put_u8(msg, NL80211_MESHCONF_FORWARDING, cur_params.dot11MeshForwarding) || nla_put_s32(msg, NL80211_MESHCONF_RSSI_THRESHOLD, cur_params.rssi_threshold) || nla_put_u32(msg, NL80211_MESHCONF_HT_OPMODE, cur_params.ht_opmode) || nla_put_u32(msg, NL80211_MESHCONF_HWMP_PATH_TO_ROOT_TIMEOUT, cur_params.dot11MeshHWMPactivePathToRootTimeout) || nla_put_u16(msg, NL80211_MESHCONF_HWMP_ROOT_INTERVAL, cur_params.dot11MeshHWMProotInterval) || nla_put_u16(msg, NL80211_MESHCONF_HWMP_CONFIRMATION_INTERVAL, cur_params.dot11MeshHWMPconfirmationInterval) || nla_put_u32(msg, NL80211_MESHCONF_POWER_MODE, cur_params.power_mode) || nla_put_u16(msg, NL80211_MESHCONF_AWAKE_WINDOW, cur_params.dot11MeshAwakeWindowDuration) || nla_put_u32(msg, NL80211_MESHCONF_PLINK_TIMEOUT, cur_params.plink_timeout) || nla_put_u8(msg, NL80211_MESHCONF_CONNECTED_TO_GATE, cur_params.dot11MeshConnectedToMeshGate) || nla_put_u8(msg, NL80211_MESHCONF_NOLEARN, cur_params.dot11MeshNolearn) || nla_put_u8(msg, NL80211_MESHCONF_CONNECTED_TO_AS, cur_params.dot11MeshConnectedToAuthServer)) goto nla_put_failure; nla_nest_end(msg, pinfoattr); genlmsg_end(msg, hdr); return genlmsg_reply(msg, info); nla_put_failure: out: nlmsg_free(msg); return -ENOBUFS; } static const struct nla_policy nl80211_meshconf_params_policy[NL80211_MESHCONF_ATTR_MAX+1] = { [NL80211_MESHCONF_RETRY_TIMEOUT] = NLA_POLICY_RANGE(NLA_U16, 1, 255), [NL80211_MESHCONF_CONFIRM_TIMEOUT] = NLA_POLICY_RANGE(NLA_U16, 1, 255), [NL80211_MESHCONF_HOLDING_TIMEOUT] = NLA_POLICY_RANGE(NLA_U16, 1, 255), [NL80211_MESHCONF_MAX_PEER_LINKS] = NLA_POLICY_RANGE(NLA_U16, 0, 255), [NL80211_MESHCONF_MAX_RETRIES] = NLA_POLICY_MAX(NLA_U8, 16), [NL80211_MESHCONF_TTL] = NLA_POLICY_MIN(NLA_U8, 1), [NL80211_MESHCONF_ELEMENT_TTL] = NLA_POLICY_MIN(NLA_U8, 1), [NL80211_MESHCONF_AUTO_OPEN_PLINKS] = NLA_POLICY_MAX(NLA_U8, 1), [NL80211_MESHCONF_SYNC_OFFSET_MAX_NEIGHBOR] = NLA_POLICY_RANGE(NLA_U32, 1, 255), [NL80211_MESHCONF_HWMP_MAX_PREQ_RETRIES] = { .type = NLA_U8 }, [NL80211_MESHCONF_PATH_REFRESH_TIME] = { .type = NLA_U32 }, [NL80211_MESHCONF_MIN_DISCOVERY_TIMEOUT] = NLA_POLICY_MIN(NLA_U16, 1), [NL80211_MESHCONF_HWMP_ACTIVE_PATH_TIMEOUT] = { .type = NLA_U32 }, [NL80211_MESHCONF_HWMP_PREQ_MIN_INTERVAL] = NLA_POLICY_MIN(NLA_U16, 1), [NL80211_MESHCONF_HWMP_PERR_MIN_INTERVAL] = NLA_POLICY_MIN(NLA_U16, 1), [NL80211_MESHCONF_HWMP_NET_DIAM_TRVS_TIME] = NLA_POLICY_MIN(NLA_U16, 1), [NL80211_MESHCONF_HWMP_ROOTMODE] = NLA_POLICY_MAX(NLA_U8, 4), [NL80211_MESHCONF_HWMP_RANN_INTERVAL] = NLA_POLICY_MIN(NLA_U16, 1), [NL80211_MESHCONF_GATE_ANNOUNCEMENTS] = NLA_POLICY_MAX(NLA_U8, 1), [NL80211_MESHCONF_FORWARDING] = NLA_POLICY_MAX(NLA_U8, 1), [NL80211_MESHCONF_RSSI_THRESHOLD] = NLA_POLICY_RANGE(NLA_S32, -255, 0), [NL80211_MESHCONF_HT_OPMODE] = { .type = NLA_U16 }, [NL80211_MESHCONF_HWMP_PATH_TO_ROOT_TIMEOUT] = { .type = NLA_U32 }, [NL80211_MESHCONF_HWMP_ROOT_INTERVAL] = NLA_POLICY_MIN(NLA_U16, 1), [NL80211_MESHCONF_HWMP_CONFIRMATION_INTERVAL] = NLA_POLICY_MIN(NLA_U16, 1), [NL80211_MESHCONF_POWER_MODE] = NLA_POLICY_RANGE(NLA_U32, NL80211_MESH_POWER_ACTIVE, NL80211_MESH_POWER_MAX), [NL80211_MESHCONF_AWAKE_WINDOW] = { .type = NLA_U16 }, [NL80211_MESHCONF_PLINK_TIMEOUT] = { .type = NLA_U32 }, [NL80211_MESHCONF_CONNECTED_TO_GATE] = NLA_POLICY_RANGE(NLA_U8, 0, 1), [NL80211_MESHCONF_NOLEARN] = NLA_POLICY_RANGE(NLA_U8, 0, 1), [NL80211_MESHCONF_CONNECTED_TO_AS] = NLA_POLICY_RANGE(NLA_U8, 0, 1), }; static const struct nla_policy nl80211_mesh_setup_params_policy[NL80211_MESH_SETUP_ATTR_MAX+1] = { [NL80211_MESH_SETUP_ENABLE_VENDOR_SYNC] = { .type = NLA_U8 }, [NL80211_MESH_SETUP_ENABLE_VENDOR_PATH_SEL] = { .type = NLA_U8 }, [NL80211_MESH_SETUP_ENABLE_VENDOR_METRIC] = { .type = NLA_U8 }, [NL80211_MESH_SETUP_USERSPACE_AUTH] = { .type = NLA_FLAG }, [NL80211_MESH_SETUP_AUTH_PROTOCOL] = { .type = NLA_U8 }, [NL80211_MESH_SETUP_USERSPACE_MPM] = { .type = NLA_FLAG }, [NL80211_MESH_SETUP_IE] = NLA_POLICY_VALIDATE_FN(NLA_BINARY, validate_ie_attr, IEEE80211_MAX_DATA_LEN), [NL80211_MESH_SETUP_USERSPACE_AMPE] = { .type = NLA_FLAG }, }; static int nl80211_parse_mesh_config(struct genl_info *info, struct mesh_config *cfg, u32 *mask_out) { struct nlattr *tb[NL80211_MESHCONF_ATTR_MAX + 1]; u32 mask = 0; u16 ht_opmode; #define FILL_IN_MESH_PARAM_IF_SET(tb, cfg, param, mask, attr, fn) \ do { \ if (tb[attr]) { \ cfg->param = fn(tb[attr]); \ mask |= BIT((attr) - 1); \ } \ } while (0) if (!info->attrs[NL80211_ATTR_MESH_CONFIG]) return -EINVAL; if (nla_parse_nested_deprecated(tb, NL80211_MESHCONF_ATTR_MAX, info->attrs[NL80211_ATTR_MESH_CONFIG], nl80211_meshconf_params_policy, info->extack)) return -EINVAL; /* This makes sure that there aren't more than 32 mesh config * parameters (otherwise our bitfield scheme would not work.) */ BUILD_BUG_ON(NL80211_MESHCONF_ATTR_MAX > 32); /* Fill in the params struct */ FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshRetryTimeout, mask, NL80211_MESHCONF_RETRY_TIMEOUT, nla_get_u16); FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshConfirmTimeout, mask, NL80211_MESHCONF_CONFIRM_TIMEOUT, nla_get_u16); FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHoldingTimeout, mask, NL80211_MESHCONF_HOLDING_TIMEOUT, nla_get_u16); FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshMaxPeerLinks, mask, NL80211_MESHCONF_MAX_PEER_LINKS, nla_get_u16); FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshMaxRetries, mask, NL80211_MESHCONF_MAX_RETRIES, nla_get_u8); FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshTTL, mask, NL80211_MESHCONF_TTL, nla_get_u8); FILL_IN_MESH_PARAM_IF_SET(tb, cfg, element_ttl, mask, NL80211_MESHCONF_ELEMENT_TTL, nla_get_u8); FILL_IN_MESH_PARAM_IF_SET(tb, cfg, auto_open_plinks, mask, NL80211_MESHCONF_AUTO_OPEN_PLINKS, nla_get_u8); FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshNbrOffsetMaxNeighbor, mask, NL80211_MESHCONF_SYNC_OFFSET_MAX_NEIGHBOR, nla_get_u32); FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPmaxPREQretries, mask, NL80211_MESHCONF_HWMP_MAX_PREQ_RETRIES, nla_get_u8); FILL_IN_MESH_PARAM_IF_SET(tb, cfg, path_refresh_time, mask, NL80211_MESHCONF_PATH_REFRESH_TIME, nla_get_u32); if (mask & BIT(NL80211_MESHCONF_PATH_REFRESH_TIME) && (cfg->path_refresh_time < 1 || cfg->path_refresh_time > 65535)) return -EINVAL; FILL_IN_MESH_PARAM_IF_SET(tb, cfg, min_discovery_timeout, mask, NL80211_MESHCONF_MIN_DISCOVERY_TIMEOUT, nla_get_u16); FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPactivePathTimeout, mask, NL80211_MESHCONF_HWMP_ACTIVE_PATH_TIMEOUT, nla_get_u32); if (mask & BIT(NL80211_MESHCONF_HWMP_ACTIVE_PATH_TIMEOUT) && (cfg->dot11MeshHWMPactivePathTimeout < 1 || cfg->dot11MeshHWMPactivePathTimeout > 65535)) return -EINVAL; FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPpreqMinInterval, mask, NL80211_MESHCONF_HWMP_PREQ_MIN_INTERVAL, nla_get_u16); FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPperrMinInterval, mask, NL80211_MESHCONF_HWMP_PERR_MIN_INTERVAL, nla_get_u16); FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPnetDiameterTraversalTime, mask, NL80211_MESHCONF_HWMP_NET_DIAM_TRVS_TIME, nla_get_u16); FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPRootMode, mask, NL80211_MESHCONF_HWMP_ROOTMODE, nla_get_u8); FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPRannInterval, mask, NL80211_MESHCONF_HWMP_RANN_INTERVAL, nla_get_u16); FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshGateAnnouncementProtocol, mask, NL80211_MESHCONF_GATE_ANNOUNCEMENTS, nla_get_u8); FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshForwarding, mask, NL80211_MESHCONF_FORWARDING, nla_get_u8); FILL_IN_MESH_PARAM_IF_SET(tb, cfg, rssi_threshold, mask, NL80211_MESHCONF_RSSI_THRESHOLD, nla_get_s32); FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshConnectedToMeshGate, mask, NL80211_MESHCONF_CONNECTED_TO_GATE, nla_get_u8); FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshConnectedToAuthServer, mask, NL80211_MESHCONF_CONNECTED_TO_AS, nla_get_u8); /* * Check HT operation mode based on * IEEE 802.11-2016 9.4.2.57 HT Operation element. */ if (tb[NL80211_MESHCONF_HT_OPMODE]) { ht_opmode = nla_get_u16(tb[NL80211_MESHCONF_HT_OPMODE]); if (ht_opmode & ~(IEEE80211_HT_OP_MODE_PROTECTION | IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT | IEEE80211_HT_OP_MODE_NON_HT_STA_PRSNT)) return -EINVAL; /* NON_HT_STA bit is reserved, but some programs set it */ ht_opmode &= ~IEEE80211_HT_OP_MODE_NON_HT_STA_PRSNT; cfg->ht_opmode = ht_opmode; mask |= (1 << (NL80211_MESHCONF_HT_OPMODE - 1)); } FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPactivePathToRootTimeout, mask, NL80211_MESHCONF_HWMP_PATH_TO_ROOT_TIMEOUT, nla_get_u32); if (mask & BIT(NL80211_MESHCONF_HWMP_PATH_TO_ROOT_TIMEOUT) && (cfg->dot11MeshHWMPactivePathToRootTimeout < 1 || cfg->dot11MeshHWMPactivePathToRootTimeout > 65535)) return -EINVAL; FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMProotInterval, mask, NL80211_MESHCONF_HWMP_ROOT_INTERVAL, nla_get_u16); FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPconfirmationInterval, mask, NL80211_MESHCONF_HWMP_CONFIRMATION_INTERVAL, nla_get_u16); FILL_IN_MESH_PARAM_IF_SET(tb, cfg, power_mode, mask, NL80211_MESHCONF_POWER_MODE, nla_get_u32); FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshAwakeWindowDuration, mask, NL80211_MESHCONF_AWAKE_WINDOW, nla_get_u16); FILL_IN_MESH_PARAM_IF_SET(tb, cfg, plink_timeout, mask, NL80211_MESHCONF_PLINK_TIMEOUT, nla_get_u32); FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshNolearn, mask, NL80211_MESHCONF_NOLEARN, nla_get_u8); if (mask_out) *mask_out = mask; return 0; #undef FILL_IN_MESH_PARAM_IF_SET } static int nl80211_parse_mesh_setup(struct genl_info *info, struct mesh_setup *setup) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct nlattr *tb[NL80211_MESH_SETUP_ATTR_MAX + 1]; if (!info->attrs[NL80211_ATTR_MESH_SETUP]) return -EINVAL; if (nla_parse_nested_deprecated(tb, NL80211_MESH_SETUP_ATTR_MAX, info->attrs[NL80211_ATTR_MESH_SETUP], nl80211_mesh_setup_params_policy, info->extack)) return -EINVAL; if (tb[NL80211_MESH_SETUP_ENABLE_VENDOR_SYNC]) setup->sync_method = (nla_get_u8(tb[NL80211_MESH_SETUP_ENABLE_VENDOR_SYNC])) ? IEEE80211_SYNC_METHOD_VENDOR : IEEE80211_SYNC_METHOD_NEIGHBOR_OFFSET; if (tb[NL80211_MESH_SETUP_ENABLE_VENDOR_PATH_SEL]) setup->path_sel_proto = (nla_get_u8(tb[NL80211_MESH_SETUP_ENABLE_VENDOR_PATH_SEL])) ? IEEE80211_PATH_PROTOCOL_VENDOR : IEEE80211_PATH_PROTOCOL_HWMP; if (tb[NL80211_MESH_SETUP_ENABLE_VENDOR_METRIC]) setup->path_metric = (nla_get_u8(tb[NL80211_MESH_SETUP_ENABLE_VENDOR_METRIC])) ? IEEE80211_PATH_METRIC_VENDOR : IEEE80211_PATH_METRIC_AIRTIME; if (tb[NL80211_MESH_SETUP_IE]) { struct nlattr *ieattr = tb[NL80211_MESH_SETUP_IE]; setup->ie = nla_data(ieattr); setup->ie_len = nla_len(ieattr); } if (tb[NL80211_MESH_SETUP_USERSPACE_MPM] && !(rdev->wiphy.features & NL80211_FEATURE_USERSPACE_MPM)) return -EINVAL; setup->user_mpm = nla_get_flag(tb[NL80211_MESH_SETUP_USERSPACE_MPM]); setup->is_authenticated = nla_get_flag(tb[NL80211_MESH_SETUP_USERSPACE_AUTH]); setup->is_secure = nla_get_flag(tb[NL80211_MESH_SETUP_USERSPACE_AMPE]); if (setup->is_secure) setup->user_mpm = true; if (tb[NL80211_MESH_SETUP_AUTH_PROTOCOL]) { if (!setup->user_mpm) return -EINVAL; setup->auth_id = nla_get_u8(tb[NL80211_MESH_SETUP_AUTH_PROTOCOL]); } return 0; } static int nl80211_update_mesh_config(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct net_device *dev = info->user_ptr[1]; struct wireless_dev *wdev = dev->ieee80211_ptr; struct mesh_config cfg; u32 mask; int err; if (wdev->iftype != NL80211_IFTYPE_MESH_POINT) return -EOPNOTSUPP; if (!rdev->ops->update_mesh_config) return -EOPNOTSUPP; err = nl80211_parse_mesh_config(info, &cfg, &mask); if (err) return err; wdev_lock(wdev); if (!wdev->u.mesh.id_len) err = -ENOLINK; if (!err) err = rdev_update_mesh_config(rdev, dev, mask, &cfg); wdev_unlock(wdev); return err; } static int nl80211_put_regdom(const struct ieee80211_regdomain *regdom, struct sk_buff *msg) { struct nlattr *nl_reg_rules; unsigned int i; if (nla_put_string(msg, NL80211_ATTR_REG_ALPHA2, regdom->alpha2) || (regdom->dfs_region && nla_put_u8(msg, NL80211_ATTR_DFS_REGION, regdom->dfs_region))) goto nla_put_failure; nl_reg_rules = nla_nest_start_noflag(msg, NL80211_ATTR_REG_RULES); if (!nl_reg_rules) goto nla_put_failure; for (i = 0; i < regdom->n_reg_rules; i++) { struct nlattr *nl_reg_rule; const struct ieee80211_reg_rule *reg_rule; const struct ieee80211_freq_range *freq_range; const struct ieee80211_power_rule *power_rule; unsigned int max_bandwidth_khz; reg_rule = &regdom->reg_rules[i]; freq_range = &reg_rule->freq_range; power_rule = &reg_rule->power_rule; nl_reg_rule = nla_nest_start_noflag(msg, i); if (!nl_reg_rule) goto nla_put_failure; max_bandwidth_khz = freq_range->max_bandwidth_khz; if (!max_bandwidth_khz) max_bandwidth_khz = reg_get_max_bandwidth(regdom, reg_rule); if (nla_put_u32(msg, NL80211_ATTR_REG_RULE_FLAGS, reg_rule->flags) || nla_put_u32(msg, NL80211_ATTR_FREQ_RANGE_START, freq_range->start_freq_khz) || nla_put_u32(msg, NL80211_ATTR_FREQ_RANGE_END, freq_range->end_freq_khz) || nla_put_u32(msg, NL80211_ATTR_FREQ_RANGE_MAX_BW, max_bandwidth_khz) || nla_put_u32(msg, NL80211_ATTR_POWER_RULE_MAX_ANT_GAIN, power_rule->max_antenna_gain) || nla_put_u32(msg, NL80211_ATTR_POWER_RULE_MAX_EIRP, power_rule->max_eirp) || nla_put_u32(msg, NL80211_ATTR_DFS_CAC_TIME, reg_rule->dfs_cac_ms)) goto nla_put_failure; nla_nest_end(msg, nl_reg_rule); } nla_nest_end(msg, nl_reg_rules); return 0; nla_put_failure: return -EMSGSIZE; } static int nl80211_get_reg_do(struct sk_buff *skb, struct genl_info *info) { const struct ieee80211_regdomain *regdom = NULL; struct cfg80211_registered_device *rdev; struct wiphy *wiphy = NULL; struct sk_buff *msg; int err = -EMSGSIZE; void *hdr; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) return -ENOBUFS; hdr = nl80211hdr_put(msg, info->snd_portid, info->snd_seq, 0, NL80211_CMD_GET_REG); if (!hdr) goto put_failure; rtnl_lock(); if (info->attrs[NL80211_ATTR_WIPHY]) { bool self_managed; rdev = cfg80211_get_dev_from_info(genl_info_net(info), info); if (IS_ERR(rdev)) { err = PTR_ERR(rdev); goto nla_put_failure; } wiphy = &rdev->wiphy; self_managed = wiphy->regulatory_flags & REGULATORY_WIPHY_SELF_MANAGED; rcu_read_lock(); regdom = get_wiphy_regdom(wiphy); /* a self-managed-reg device must have a private regdom */ if (WARN_ON(!regdom && self_managed)) { err = -EINVAL; goto nla_put_failure_rcu; } if (regdom && nla_put_u32(msg, NL80211_ATTR_WIPHY, get_wiphy_idx(wiphy))) goto nla_put_failure_rcu; } else { rcu_read_lock(); } if (!wiphy && reg_last_request_cell_base() && nla_put_u32(msg, NL80211_ATTR_USER_REG_HINT_TYPE, NL80211_USER_REG_HINT_CELL_BASE)) goto nla_put_failure_rcu; if (!regdom) regdom = rcu_dereference(cfg80211_regdomain); if (nl80211_put_regdom(regdom, msg)) goto nla_put_failure_rcu; rcu_read_unlock(); genlmsg_end(msg, hdr); rtnl_unlock(); return genlmsg_reply(msg, info); nla_put_failure_rcu: rcu_read_unlock(); nla_put_failure: rtnl_unlock(); put_failure: nlmsg_free(msg); return err; } static int nl80211_send_regdom(struct sk_buff *msg, struct netlink_callback *cb, u32 seq, int flags, struct wiphy *wiphy, const struct ieee80211_regdomain *regdom) { void *hdr = nl80211hdr_put(msg, NETLINK_CB(cb->skb).portid, seq, flags, NL80211_CMD_GET_REG); if (!hdr) return -1; genl_dump_check_consistent(cb, hdr); if (nl80211_put_regdom(regdom, msg)) goto nla_put_failure; if (!wiphy && reg_last_request_cell_base() && nla_put_u32(msg, NL80211_ATTR_USER_REG_HINT_TYPE, NL80211_USER_REG_HINT_CELL_BASE)) goto nla_put_failure; if (wiphy && nla_put_u32(msg, NL80211_ATTR_WIPHY, get_wiphy_idx(wiphy))) goto nla_put_failure; if (wiphy && wiphy->regulatory_flags & REGULATORY_WIPHY_SELF_MANAGED && nla_put_flag(msg, NL80211_ATTR_WIPHY_SELF_MANAGED_REG)) goto nla_put_failure; genlmsg_end(msg, hdr); return 0; nla_put_failure: genlmsg_cancel(msg, hdr); return -EMSGSIZE; } static int nl80211_get_reg_dump(struct sk_buff *skb, struct netlink_callback *cb) { const struct ieee80211_regdomain *regdom = NULL; struct cfg80211_registered_device *rdev; int err, reg_idx, start = cb->args[2]; rcu_read_lock(); if (cfg80211_regdomain && start == 0) { err = nl80211_send_regdom(skb, cb, cb->nlh->nlmsg_seq, NLM_F_MULTI, NULL, rcu_dereference(cfg80211_regdomain)); if (err < 0) goto out_err; } /* the global regdom is idx 0 */ reg_idx = 1; list_for_each_entry_rcu(rdev, &cfg80211_rdev_list, list) { regdom = get_wiphy_regdom(&rdev->wiphy); if (!regdom) continue; if (++reg_idx <= start) continue; err = nl80211_send_regdom(skb, cb, cb->nlh->nlmsg_seq, NLM_F_MULTI, &rdev->wiphy, regdom); if (err < 0) { reg_idx--; break; } } cb->args[2] = reg_idx; err = skb->len; out_err: rcu_read_unlock(); return err; } #ifdef CONFIG_CFG80211_CRDA_SUPPORT static const struct nla_policy reg_rule_policy[NL80211_REG_RULE_ATTR_MAX + 1] = { [NL80211_ATTR_REG_RULE_FLAGS] = { .type = NLA_U32 }, [NL80211_ATTR_FREQ_RANGE_START] = { .type = NLA_U32 }, [NL80211_ATTR_FREQ_RANGE_END] = { .type = NLA_U32 }, [NL80211_ATTR_FREQ_RANGE_MAX_BW] = { .type = NLA_U32 }, [NL80211_ATTR_POWER_RULE_MAX_ANT_GAIN] = { .type = NLA_U32 }, [NL80211_ATTR_POWER_RULE_MAX_EIRP] = { .type = NLA_U32 }, [NL80211_ATTR_DFS_CAC_TIME] = { .type = NLA_U32 }, }; static int parse_reg_rule(struct nlattr *tb[], struct ieee80211_reg_rule *reg_rule) { struct ieee80211_freq_range *freq_range = &reg_rule->freq_range; struct ieee80211_power_rule *power_rule = &reg_rule->power_rule; if (!tb[NL80211_ATTR_REG_RULE_FLAGS]) return -EINVAL; if (!tb[NL80211_ATTR_FREQ_RANGE_START]) return -EINVAL; if (!tb[NL80211_ATTR_FREQ_RANGE_END]) return -EINVAL; if (!tb[NL80211_ATTR_FREQ_RANGE_MAX_BW]) return -EINVAL; if (!tb[NL80211_ATTR_POWER_RULE_MAX_EIRP]) return -EINVAL; reg_rule->flags = nla_get_u32(tb[NL80211_ATTR_REG_RULE_FLAGS]); freq_range->start_freq_khz = nla_get_u32(tb[NL80211_ATTR_FREQ_RANGE_START]); freq_range->end_freq_khz = nla_get_u32(tb[NL80211_ATTR_FREQ_RANGE_END]); freq_range->max_bandwidth_khz = nla_get_u32(tb[NL80211_ATTR_FREQ_RANGE_MAX_BW]); power_rule->max_eirp = nla_get_u32(tb[NL80211_ATTR_POWER_RULE_MAX_EIRP]); if (tb[NL80211_ATTR_POWER_RULE_MAX_ANT_GAIN]) power_rule->max_antenna_gain = nla_get_u32(tb[NL80211_ATTR_POWER_RULE_MAX_ANT_GAIN]); if (tb[NL80211_ATTR_DFS_CAC_TIME]) reg_rule->dfs_cac_ms = nla_get_u32(tb[NL80211_ATTR_DFS_CAC_TIME]); return 0; } static int nl80211_set_reg(struct sk_buff *skb, struct genl_info *info) { struct nlattr *tb[NL80211_REG_RULE_ATTR_MAX + 1]; struct nlattr *nl_reg_rule; char *alpha2; int rem_reg_rules, r; u32 num_rules = 0, rule_idx = 0; enum nl80211_dfs_regions dfs_region = NL80211_DFS_UNSET; struct ieee80211_regdomain *rd; if (!info->attrs[NL80211_ATTR_REG_ALPHA2]) return -EINVAL; if (!info->attrs[NL80211_ATTR_REG_RULES]) return -EINVAL; alpha2 = nla_data(info->attrs[NL80211_ATTR_REG_ALPHA2]); if (info->attrs[NL80211_ATTR_DFS_REGION]) dfs_region = nla_get_u8(info->attrs[NL80211_ATTR_DFS_REGION]); nla_for_each_nested(nl_reg_rule, info->attrs[NL80211_ATTR_REG_RULES], rem_reg_rules) { num_rules++; if (num_rules > NL80211_MAX_SUPP_REG_RULES) return -EINVAL; } rtnl_lock(); if (!reg_is_valid_request(alpha2)) { r = -EINVAL; goto out; } rd = kzalloc(struct_size(rd, reg_rules, num_rules), GFP_KERNEL); if (!rd) { r = -ENOMEM; goto out; } rd->n_reg_rules = num_rules; rd->alpha2[0] = alpha2[0]; rd->alpha2[1] = alpha2[1]; /* * Disable DFS master mode if the DFS region was * not supported or known on this kernel. */ if (reg_supported_dfs_region(dfs_region)) rd->dfs_region = dfs_region; nla_for_each_nested(nl_reg_rule, info->attrs[NL80211_ATTR_REG_RULES], rem_reg_rules) { r = nla_parse_nested_deprecated(tb, NL80211_REG_RULE_ATTR_MAX, nl_reg_rule, reg_rule_policy, info->extack); if (r) goto bad_reg; r = parse_reg_rule(tb, &rd->reg_rules[rule_idx]); if (r) goto bad_reg; rule_idx++; if (rule_idx > NL80211_MAX_SUPP_REG_RULES) { r = -EINVAL; goto bad_reg; } } r = set_regdom(rd, REGD_SOURCE_CRDA); /* set_regdom takes ownership of rd */ rd = NULL; bad_reg: kfree(rd); out: rtnl_unlock(); return r; } #endif /* CONFIG_CFG80211_CRDA_SUPPORT */ static int validate_scan_freqs(struct nlattr *freqs) { struct nlattr *attr1, *attr2; int n_channels = 0, tmp1, tmp2; nla_for_each_nested(attr1, freqs, tmp1) if (nla_len(attr1) != sizeof(u32)) return 0; nla_for_each_nested(attr1, freqs, tmp1) { n_channels++; /* * Some hardware has a limited channel list for * scanning, and it is pretty much nonsensical * to scan for a channel twice, so disallow that * and don't require drivers to check that the * channel list they get isn't longer than what * they can scan, as long as they can scan all * the channels they registered at once. */ nla_for_each_nested(attr2, freqs, tmp2) if (attr1 != attr2 && nla_get_u32(attr1) == nla_get_u32(attr2)) return 0; } return n_channels; } static bool is_band_valid(struct wiphy *wiphy, enum nl80211_band b) { return b < NUM_NL80211_BANDS && wiphy->bands[b]; } static int parse_bss_select(struct nlattr *nla, struct wiphy *wiphy, struct cfg80211_bss_selection *bss_select) { struct nlattr *attr[NL80211_BSS_SELECT_ATTR_MAX + 1]; struct nlattr *nest; int err; bool found = false; int i; /* only process one nested attribute */ nest = nla_data(nla); if (!nla_ok(nest, nla_len(nest))) return -EINVAL; err = nla_parse_nested_deprecated(attr, NL80211_BSS_SELECT_ATTR_MAX, nest, nl80211_bss_select_policy, NULL); if (err) return err; /* only one attribute may be given */ for (i = 0; i <= NL80211_BSS_SELECT_ATTR_MAX; i++) { if (attr[i]) { if (found) return -EINVAL; found = true; } } bss_select->behaviour = __NL80211_BSS_SELECT_ATTR_INVALID; if (attr[NL80211_BSS_SELECT_ATTR_RSSI]) bss_select->behaviour = NL80211_BSS_SELECT_ATTR_RSSI; if (attr[NL80211_BSS_SELECT_ATTR_BAND_PREF]) { bss_select->behaviour = NL80211_BSS_SELECT_ATTR_BAND_PREF; bss_select->param.band_pref = nla_get_u32(attr[NL80211_BSS_SELECT_ATTR_BAND_PREF]); if (!is_band_valid(wiphy, bss_select->param.band_pref)) return -EINVAL; } if (attr[NL80211_BSS_SELECT_ATTR_RSSI_ADJUST]) { struct nl80211_bss_select_rssi_adjust *adj_param; adj_param = nla_data(attr[NL80211_BSS_SELECT_ATTR_RSSI_ADJUST]); bss_select->behaviour = NL80211_BSS_SELECT_ATTR_RSSI_ADJUST; bss_select->param.adjust.band = adj_param->band; bss_select->param.adjust.delta = adj_param->delta; if (!is_band_valid(wiphy, bss_select->param.adjust.band)) return -EINVAL; } /* user-space did not provide behaviour attribute */ if (bss_select->behaviour == __NL80211_BSS_SELECT_ATTR_INVALID) return -EINVAL; if (!(wiphy->bss_select_support & BIT(bss_select->behaviour))) return -EINVAL; return 0; } int nl80211_parse_random_mac(struct nlattr **attrs, u8 *mac_addr, u8 *mac_addr_mask) { int i; if (!attrs[NL80211_ATTR_MAC] && !attrs[NL80211_ATTR_MAC_MASK]) { eth_zero_addr(mac_addr); eth_zero_addr(mac_addr_mask); mac_addr[0] = 0x2; mac_addr_mask[0] = 0x3; return 0; } /* need both or none */ if (!attrs[NL80211_ATTR_MAC] || !attrs[NL80211_ATTR_MAC_MASK]) return -EINVAL; memcpy(mac_addr, nla_data(attrs[NL80211_ATTR_MAC]), ETH_ALEN); memcpy(mac_addr_mask, nla_data(attrs[NL80211_ATTR_MAC_MASK]), ETH_ALEN); /* don't allow or configure an mcast address */ if (!is_multicast_ether_addr(mac_addr_mask) || is_multicast_ether_addr(mac_addr)) return -EINVAL; /* * allow users to pass a MAC address that has bits set outside * of the mask, but don't bother drivers with having to deal * with such bits */ for (i = 0; i < ETH_ALEN; i++) mac_addr[i] &= mac_addr_mask[i]; return 0; } static bool cfg80211_off_channel_oper_allowed(struct wireless_dev *wdev, struct ieee80211_channel *chan) { unsigned int link_id; bool all_ok = true; ASSERT_WDEV_LOCK(wdev); if (!cfg80211_beaconing_iface_active(wdev)) return true; /* * FIXME: check if we have a free HW resource/link for chan * * This, as well as the FIXME below, requires knowing the link * capabilities of the hardware. */ /* we cannot leave radar channels */ for_each_valid_link(wdev, link_id) { struct cfg80211_chan_def *chandef; chandef = wdev_chandef(wdev, link_id); if (!chandef || !chandef->chan) continue; /* * FIXME: don't require all_ok, but rather check only the * correct HW resource/link onto which 'chan' falls, * as only that link leaves the channel for doing * the off-channel operation. */ if (chandef->chan->flags & IEEE80211_CHAN_RADAR) all_ok = false; } if (all_ok) return true; return regulatory_pre_cac_allowed(wdev->wiphy); } static bool nl80211_check_scan_feat(struct wiphy *wiphy, u32 flags, u32 flag, enum nl80211_ext_feature_index feat) { if (!(flags & flag)) return true; if (wiphy_ext_feature_isset(wiphy, feat)) return true; return false; } static int nl80211_check_scan_flags(struct wiphy *wiphy, struct wireless_dev *wdev, void *request, struct nlattr **attrs, bool is_sched_scan) { u8 *mac_addr, *mac_addr_mask; u32 *flags; enum nl80211_feature_flags randomness_flag; if (!attrs[NL80211_ATTR_SCAN_FLAGS]) return 0; if (is_sched_scan) { struct cfg80211_sched_scan_request *req = request; randomness_flag = wdev ? NL80211_FEATURE_SCHED_SCAN_RANDOM_MAC_ADDR : NL80211_FEATURE_ND_RANDOM_MAC_ADDR; flags = &req->flags; mac_addr = req->mac_addr; mac_addr_mask = req->mac_addr_mask; } else { struct cfg80211_scan_request *req = request; randomness_flag = NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR; flags = &req->flags; mac_addr = req->mac_addr; mac_addr_mask = req->mac_addr_mask; } *flags = nla_get_u32(attrs[NL80211_ATTR_SCAN_FLAGS]); if (((*flags & NL80211_SCAN_FLAG_LOW_PRIORITY) && !(wiphy->features & NL80211_FEATURE_LOW_PRIORITY_SCAN)) || !nl80211_check_scan_feat(wiphy, *flags, NL80211_SCAN_FLAG_LOW_SPAN, NL80211_EXT_FEATURE_LOW_SPAN_SCAN) || !nl80211_check_scan_feat(wiphy, *flags, NL80211_SCAN_FLAG_LOW_POWER, NL80211_EXT_FEATURE_LOW_POWER_SCAN) || !nl80211_check_scan_feat(wiphy, *flags, NL80211_SCAN_FLAG_HIGH_ACCURACY, NL80211_EXT_FEATURE_HIGH_ACCURACY_SCAN) || !nl80211_check_scan_feat(wiphy, *flags, NL80211_SCAN_FLAG_FILS_MAX_CHANNEL_TIME, NL80211_EXT_FEATURE_FILS_MAX_CHANNEL_TIME) || !nl80211_check_scan_feat(wiphy, *flags, NL80211_SCAN_FLAG_ACCEPT_BCAST_PROBE_RESP, NL80211_EXT_FEATURE_ACCEPT_BCAST_PROBE_RESP) || !nl80211_check_scan_feat(wiphy, *flags, NL80211_SCAN_FLAG_OCE_PROBE_REQ_DEFERRAL_SUPPRESSION, NL80211_EXT_FEATURE_OCE_PROBE_REQ_DEFERRAL_SUPPRESSION) || !nl80211_check_scan_feat(wiphy, *flags, NL80211_SCAN_FLAG_OCE_PROBE_REQ_HIGH_TX_RATE, NL80211_EXT_FEATURE_OCE_PROBE_REQ_HIGH_TX_RATE) || !nl80211_check_scan_feat(wiphy, *flags, NL80211_SCAN_FLAG_RANDOM_SN, NL80211_EXT_FEATURE_SCAN_RANDOM_SN) || !nl80211_check_scan_feat(wiphy, *flags, NL80211_SCAN_FLAG_MIN_PREQ_CONTENT, NL80211_EXT_FEATURE_SCAN_MIN_PREQ_CONTENT)) return -EOPNOTSUPP; if (*flags & NL80211_SCAN_FLAG_RANDOM_ADDR) { int err; if (!(wiphy->features & randomness_flag) || (wdev && wdev->connected)) return -EOPNOTSUPP; err = nl80211_parse_random_mac(attrs, mac_addr, mac_addr_mask); if (err) return err; } return 0; } static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct wireless_dev *wdev = info->user_ptr[1]; struct cfg80211_scan_request *request; struct nlattr *scan_freqs = NULL; bool scan_freqs_khz = false; struct nlattr *attr; struct wiphy *wiphy; int err, tmp, n_ssids = 0, n_channels, i; size_t ie_len, size; wiphy = &rdev->wiphy; if (wdev->iftype == NL80211_IFTYPE_NAN) return -EOPNOTSUPP; if (!rdev->ops->scan) return -EOPNOTSUPP; if (rdev->scan_req || rdev->scan_msg) return -EBUSY; if (info->attrs[NL80211_ATTR_SCAN_FREQ_KHZ]) { if (!wiphy_ext_feature_isset(wiphy, NL80211_EXT_FEATURE_SCAN_FREQ_KHZ)) return -EOPNOTSUPP; scan_freqs = info->attrs[NL80211_ATTR_SCAN_FREQ_KHZ]; scan_freqs_khz = true; } else if (info->attrs[NL80211_ATTR_SCAN_FREQUENCIES]) scan_freqs = info->attrs[NL80211_ATTR_SCAN_FREQUENCIES]; if (scan_freqs) { n_channels = validate_scan_freqs(scan_freqs); if (!n_channels) return -EINVAL; } else { n_channels = ieee80211_get_num_supported_channels(wiphy); } if (info->attrs[NL80211_ATTR_SCAN_SSIDS]) nla_for_each_nested(attr, info->attrs[NL80211_ATTR_SCAN_SSIDS], tmp) n_ssids++; if (n_ssids > wiphy->max_scan_ssids) return -EINVAL; if (info->attrs[NL80211_ATTR_IE]) ie_len = nla_len(info->attrs[NL80211_ATTR_IE]); else ie_len = 0; if (ie_len > wiphy->max_scan_ie_len) return -EINVAL; size = struct_size(request, channels, n_channels); size = size_add(size, array_size(sizeof(*request->ssids), n_ssids)); size = size_add(size, ie_len); request = kzalloc(size, GFP_KERNEL); if (!request) return -ENOMEM; if (n_ssids) request->ssids = (void *)&request->channels[n_channels]; request->n_ssids = n_ssids; if (ie_len) { if (n_ssids) request->ie = (void *)(request->ssids + n_ssids); else request->ie = (void *)(request->channels + n_channels); } i = 0; if (scan_freqs) { /* user specified, bail out if channel not found */ nla_for_each_nested(attr, scan_freqs, tmp) { struct ieee80211_channel *chan; int freq = nla_get_u32(attr); if (!scan_freqs_khz) freq = MHZ_TO_KHZ(freq); chan = ieee80211_get_channel_khz(wiphy, freq); if (!chan) { err = -EINVAL; goto out_free; } /* ignore disabled channels */ if (chan->flags & IEEE80211_CHAN_DISABLED) continue; request->channels[i] = chan; i++; } } else { enum nl80211_band band; /* all channels */ for (band = 0; band < NUM_NL80211_BANDS; band++) { int j; if (!wiphy->bands[band]) continue; for (j = 0; j < wiphy->bands[band]->n_channels; j++) { struct ieee80211_channel *chan; chan = &wiphy->bands[band]->channels[j]; if (chan->flags & IEEE80211_CHAN_DISABLED) continue; request->channels[i] = chan; i++; } } } if (!i) { err = -EINVAL; goto out_free; } request->n_channels = i; wdev_lock(wdev); for (i = 0; i < request->n_channels; i++) { struct ieee80211_channel *chan = request->channels[i]; /* if we can go off-channel to the target channel we're good */ if (cfg80211_off_channel_oper_allowed(wdev, chan)) continue; if (!cfg80211_wdev_on_sub_chan(wdev, chan, true)) { wdev_unlock(wdev); err = -EBUSY; goto out_free; } } wdev_unlock(wdev); i = 0; if (n_ssids) { nla_for_each_nested(attr, info->attrs[NL80211_ATTR_SCAN_SSIDS], tmp) { if (nla_len(attr) > IEEE80211_MAX_SSID_LEN) { err = -EINVAL; goto out_free; } request->ssids[i].ssid_len = nla_len(attr); memcpy(request->ssids[i].ssid, nla_data(attr), nla_len(attr)); i++; } } if (info->attrs[NL80211_ATTR_IE]) { request->ie_len = nla_len(info->attrs[NL80211_ATTR_IE]); memcpy((void *)request->ie, nla_data(info->attrs[NL80211_ATTR_IE]), request->ie_len); } for (i = 0; i < NUM_NL80211_BANDS; i++) if (wiphy->bands[i]) request->rates[i] = (1 << wiphy->bands[i]->n_bitrates) - 1; if (info->attrs[NL80211_ATTR_SCAN_SUPP_RATES]) { nla_for_each_nested(attr, info->attrs[NL80211_ATTR_SCAN_SUPP_RATES], tmp) { enum nl80211_band band = nla_type(attr); if (band < 0 || band >= NUM_NL80211_BANDS) { err = -EINVAL; goto out_free; } if (!wiphy->bands[band]) continue; err = ieee80211_get_ratemask(wiphy->bands[band], nla_data(attr), nla_len(attr), &request->rates[band]); if (err) goto out_free; } } if (info->attrs[NL80211_ATTR_MEASUREMENT_DURATION]) { request->duration = nla_get_u16(info->attrs[NL80211_ATTR_MEASUREMENT_DURATION]); request->duration_mandatory = nla_get_flag(info->attrs[NL80211_ATTR_MEASUREMENT_DURATION_MANDATORY]); } err = nl80211_check_scan_flags(wiphy, wdev, request, info->attrs, false); if (err) goto out_free; request->no_cck = nla_get_flag(info->attrs[NL80211_ATTR_TX_NO_CCK_RATE]); /* Initial implementation used NL80211_ATTR_MAC to set the specific * BSSID to scan for. This was problematic because that same attribute * was already used for another purpose (local random MAC address). The * NL80211_ATTR_BSSID attribute was added to fix this. For backwards * compatibility with older userspace components, also use the * NL80211_ATTR_MAC value here if it can be determined to be used for * the specific BSSID use case instead of the random MAC address * (NL80211_ATTR_SCAN_FLAGS is used to enable random MAC address use). */ if (info->attrs[NL80211_ATTR_BSSID]) memcpy(request->bssid, nla_data(info->attrs[NL80211_ATTR_BSSID]), ETH_ALEN); else if (!(request->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) && info->attrs[NL80211_ATTR_MAC]) memcpy(request->bssid, nla_data(info->attrs[NL80211_ATTR_MAC]), ETH_ALEN); else eth_broadcast_addr(request->bssid); request->wdev = wdev; request->wiphy = &rdev->wiphy; request->scan_start = jiffies; rdev->scan_req = request; err = cfg80211_scan(rdev); if (err) goto out_free; nl80211_send_scan_start(rdev, wdev); dev_hold(wdev->netdev); return 0; out_free: rdev->scan_req = NULL; kfree(request); return err; } static int nl80211_abort_scan(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct wireless_dev *wdev = info->user_ptr[1]; if (!rdev->ops->abort_scan) return -EOPNOTSUPP; if (rdev->scan_msg) return 0; if (!rdev->scan_req) return -ENOENT; rdev_abort_scan(rdev, wdev); return 0; } static int nl80211_parse_sched_scan_plans(struct wiphy *wiphy, int n_plans, struct cfg80211_sched_scan_request *request, struct nlattr **attrs) { int tmp, err, i = 0; struct nlattr *attr; if (!attrs[NL80211_ATTR_SCHED_SCAN_PLANS]) { u32 interval; /* * If scan plans are not specified, * %NL80211_ATTR_SCHED_SCAN_INTERVAL will be specified. In this * case one scan plan will be set with the specified scan * interval and infinite number of iterations. */ interval = nla_get_u32(attrs[NL80211_ATTR_SCHED_SCAN_INTERVAL]); if (!interval) return -EINVAL; request->scan_plans[0].interval = DIV_ROUND_UP(interval, MSEC_PER_SEC); if (!request->scan_plans[0].interval) return -EINVAL; if (request->scan_plans[0].interval > wiphy->max_sched_scan_plan_interval) request->scan_plans[0].interval = wiphy->max_sched_scan_plan_interval; return 0; } nla_for_each_nested(attr, attrs[NL80211_ATTR_SCHED_SCAN_PLANS], tmp) { struct nlattr *plan[NL80211_SCHED_SCAN_PLAN_MAX + 1]; if (WARN_ON(i >= n_plans)) return -EINVAL; err = nla_parse_nested_deprecated(plan, NL80211_SCHED_SCAN_PLAN_MAX, attr, nl80211_plan_policy, NULL); if (err) return err; if (!plan[NL80211_SCHED_SCAN_PLAN_INTERVAL]) return -EINVAL; request->scan_plans[i].interval = nla_get_u32(plan[NL80211_SCHED_SCAN_PLAN_INTERVAL]); if (!request->scan_plans[i].interval || request->scan_plans[i].interval > wiphy->max_sched_scan_plan_interval) return -EINVAL; if (plan[NL80211_SCHED_SCAN_PLAN_ITERATIONS]) { request->scan_plans[i].iterations = nla_get_u32(plan[NL80211_SCHED_SCAN_PLAN_ITERATIONS]); if (!request->scan_plans[i].iterations || (request->scan_plans[i].iterations > wiphy->max_sched_scan_plan_iterations)) return -EINVAL; } else if (i < n_plans - 1) { /* * All scan plans but the last one must specify * a finite number of iterations */ return -EINVAL; } i++; } /* * The last scan plan must not specify the number of * iterations, it is supposed to run infinitely */ if (request->scan_plans[n_plans - 1].iterations) return -EINVAL; return 0; } static int nl80211_parse_sched_scan_per_band_rssi(struct wiphy *wiphy, struct cfg80211_match_set *match_sets, struct nlattr *tb_band_rssi, s32 rssi_thold) { struct nlattr *attr; int i, tmp, ret = 0; if (!wiphy_ext_feature_isset(wiphy, NL80211_EXT_FEATURE_SCHED_SCAN_BAND_SPECIFIC_RSSI_THOLD)) { if (tb_band_rssi) ret = -EOPNOTSUPP; else for (i = 0; i < NUM_NL80211_BANDS; i++) match_sets->per_band_rssi_thold[i] = NL80211_SCAN_RSSI_THOLD_OFF; return ret; } for (i = 0; i < NUM_NL80211_BANDS; i++) match_sets->per_band_rssi_thold[i] = rssi_thold; nla_for_each_nested(attr, tb_band_rssi, tmp) { enum nl80211_band band = nla_type(attr); if (band < 0 || band >= NUM_NL80211_BANDS) return -EINVAL; match_sets->per_band_rssi_thold[band] = nla_get_s32(attr); } return 0; } static struct cfg80211_sched_scan_request * nl80211_parse_sched_scan(struct wiphy *wiphy, struct wireless_dev *wdev, struct nlattr **attrs, int max_match_sets) { struct cfg80211_sched_scan_request *request; struct nlattr *attr; int err, tmp, n_ssids = 0, n_match_sets = 0, n_channels, i, n_plans = 0; enum nl80211_band band; size_t ie_len, size; struct nlattr *tb[NL80211_SCHED_SCAN_MATCH_ATTR_MAX + 1]; s32 default_match_rssi = NL80211_SCAN_RSSI_THOLD_OFF; if (attrs[NL80211_ATTR_SCAN_FREQUENCIES]) { n_channels = validate_scan_freqs( attrs[NL80211_ATTR_SCAN_FREQUENCIES]); if (!n_channels) return ERR_PTR(-EINVAL); } else { n_channels = ieee80211_get_num_supported_channels(wiphy); } if (attrs[NL80211_ATTR_SCAN_SSIDS]) nla_for_each_nested(attr, attrs[NL80211_ATTR_SCAN_SSIDS], tmp) n_ssids++; if (n_ssids > wiphy->max_sched_scan_ssids) return ERR_PTR(-EINVAL); /* * First, count the number of 'real' matchsets. Due to an issue with * the old implementation, matchsets containing only the RSSI attribute * (NL80211_SCHED_SCAN_MATCH_ATTR_RSSI) are considered as the 'default' * RSSI for all matchsets, rather than their own matchset for reporting * all APs with a strong RSSI. This is needed to be compatible with * older userspace that treated a matchset with only the RSSI as the * global RSSI for all other matchsets - if there are other matchsets. */ if (attrs[NL80211_ATTR_SCHED_SCAN_MATCH]) { nla_for_each_nested(attr, attrs[NL80211_ATTR_SCHED_SCAN_MATCH], tmp) { struct nlattr *rssi; err = nla_parse_nested_deprecated(tb, NL80211_SCHED_SCAN_MATCH_ATTR_MAX, attr, nl80211_match_policy, NULL); if (err) return ERR_PTR(err); /* SSID and BSSID are mutually exclusive */ if (tb[NL80211_SCHED_SCAN_MATCH_ATTR_SSID] && tb[NL80211_SCHED_SCAN_MATCH_ATTR_BSSID]) return ERR_PTR(-EINVAL); /* add other standalone attributes here */ if (tb[NL80211_SCHED_SCAN_MATCH_ATTR_SSID] || tb[NL80211_SCHED_SCAN_MATCH_ATTR_BSSID]) { n_match_sets++; continue; } rssi = tb[NL80211_SCHED_SCAN_MATCH_ATTR_RSSI]; if (rssi) default_match_rssi = nla_get_s32(rssi); } } /* However, if there's no other matchset, add the RSSI one */ if (!n_match_sets && default_match_rssi != NL80211_SCAN_RSSI_THOLD_OFF) n_match_sets = 1; if (n_match_sets > max_match_sets) return ERR_PTR(-EINVAL); if (attrs[NL80211_ATTR_IE]) ie_len = nla_len(attrs[NL80211_ATTR_IE]); else ie_len = 0; if (ie_len > wiphy->max_sched_scan_ie_len) return ERR_PTR(-EINVAL); if (attrs[NL80211_ATTR_SCHED_SCAN_PLANS]) { /* * NL80211_ATTR_SCHED_SCAN_INTERVAL must not be specified since * each scan plan already specifies its own interval */ if (attrs[NL80211_ATTR_SCHED_SCAN_INTERVAL]) return ERR_PTR(-EINVAL); nla_for_each_nested(attr, attrs[NL80211_ATTR_SCHED_SCAN_PLANS], tmp) n_plans++; } else { /* * The scan interval attribute is kept for backward * compatibility. If no scan plans are specified and sched scan * interval is specified, one scan plan will be set with this * scan interval and infinite number of iterations. */ if (!attrs[NL80211_ATTR_SCHED_SCAN_INTERVAL]) return ERR_PTR(-EINVAL); n_plans = 1; } if (!n_plans || n_plans > wiphy->max_sched_scan_plans) return ERR_PTR(-EINVAL); if (!wiphy_ext_feature_isset( wiphy, NL80211_EXT_FEATURE_SCHED_SCAN_RELATIVE_RSSI) && (attrs[NL80211_ATTR_SCHED_SCAN_RELATIVE_RSSI] || attrs[NL80211_ATTR_SCHED_SCAN_RSSI_ADJUST])) return ERR_PTR(-EINVAL); size = struct_size(request, channels, n_channels); size = size_add(size, array_size(sizeof(*request->ssids), n_ssids)); size = size_add(size, array_size(sizeof(*request->match_sets), n_match_sets)); size = size_add(size, array_size(sizeof(*request->scan_plans), n_plans)); size = size_add(size, ie_len); request = kzalloc(size, GFP_KERNEL); if (!request) return ERR_PTR(-ENOMEM); if (n_ssids) request->ssids = (void *)&request->channels[n_channels]; request->n_ssids = n_ssids; if (ie_len) { if (n_ssids) request->ie = (void *)(request->ssids + n_ssids); else request->ie = (void *)(request->channels + n_channels); } if (n_match_sets) { if (request->ie) request->match_sets = (void *)(request->ie + ie_len); else if (n_ssids) request->match_sets = (void *)(request->ssids + n_ssids); else request->match_sets = (void *)(request->channels + n_channels); } request->n_match_sets = n_match_sets; if (n_match_sets) request->scan_plans = (void *)(request->match_sets + n_match_sets); else if (request->ie) request->scan_plans = (void *)(request->ie + ie_len); else if (n_ssids) request->scan_plans = (void *)(request->ssids + n_ssids); else request->scan_plans = (void *)(request->channels + n_channels); request->n_scan_plans = n_plans; i = 0; if (attrs[NL80211_ATTR_SCAN_FREQUENCIES]) { /* user specified, bail out if channel not found */ nla_for_each_nested(attr, attrs[NL80211_ATTR_SCAN_FREQUENCIES], tmp) { struct ieee80211_channel *chan; chan = ieee80211_get_channel(wiphy, nla_get_u32(attr)); if (!chan) { err = -EINVAL; goto out_free; } /* ignore disabled channels */ if (chan->flags & IEEE80211_CHAN_DISABLED) continue; request->channels[i] = chan; i++; } } else { /* all channels */ for (band = 0; band < NUM_NL80211_BANDS; band++) { int j; if (!wiphy->bands[band]) continue; for (j = 0; j < wiphy->bands[band]->n_channels; j++) { struct ieee80211_channel *chan; chan = &wiphy->bands[band]->channels[j]; if (chan->flags & IEEE80211_CHAN_DISABLED) continue; request->channels[i] = chan; i++; } } } if (!i) { err = -EINVAL; goto out_free; } request->n_channels = i; i = 0; if (n_ssids) { nla_for_each_nested(attr, attrs[NL80211_ATTR_SCAN_SSIDS], tmp) { if (nla_len(attr) > IEEE80211_MAX_SSID_LEN) { err = -EINVAL; goto out_free; } request->ssids[i].ssid_len = nla_len(attr); memcpy(request->ssids[i].ssid, nla_data(attr), nla_len(attr)); i++; } } i = 0; if (attrs[NL80211_ATTR_SCHED_SCAN_MATCH]) { nla_for_each_nested(attr, attrs[NL80211_ATTR_SCHED_SCAN_MATCH], tmp) { struct nlattr *ssid, *bssid, *rssi; err = nla_parse_nested_deprecated(tb, NL80211_SCHED_SCAN_MATCH_ATTR_MAX, attr, nl80211_match_policy, NULL); if (err) goto out_free; ssid = tb[NL80211_SCHED_SCAN_MATCH_ATTR_SSID]; bssid = tb[NL80211_SCHED_SCAN_MATCH_ATTR_BSSID]; if (!ssid && !bssid) { i++; continue; } if (WARN_ON(i >= n_match_sets)) { /* this indicates a programming error, * the loop above should have verified * things properly */ err = -EINVAL; goto out_free; } if (ssid) { memcpy(request->match_sets[i].ssid.ssid, nla_data(ssid), nla_len(ssid)); request->match_sets[i].ssid.ssid_len = nla_len(ssid); } if (bssid) memcpy(request->match_sets[i].bssid, nla_data(bssid), ETH_ALEN); /* special attribute - old implementation w/a */ request->match_sets[i].rssi_thold = default_match_rssi; rssi = tb[NL80211_SCHED_SCAN_MATCH_ATTR_RSSI]; if (rssi) request->match_sets[i].rssi_thold = nla_get_s32(rssi); /* Parse per band RSSI attribute */ err = nl80211_parse_sched_scan_per_band_rssi(wiphy, &request->match_sets[i], tb[NL80211_SCHED_SCAN_MATCH_PER_BAND_RSSI], request->match_sets[i].rssi_thold); if (err) goto out_free; i++; } /* there was no other matchset, so the RSSI one is alone */ if (i == 0 && n_match_sets) request->match_sets[0].rssi_thold = default_match_rssi; request->min_rssi_thold = INT_MAX; for (i = 0; i < n_match_sets; i++) request->min_rssi_thold = min(request->match_sets[i].rssi_thold, request->min_rssi_thold); } else { request->min_rssi_thold = NL80211_SCAN_RSSI_THOLD_OFF; } if (ie_len) { request->ie_len = ie_len; memcpy((void *)request->ie, nla_data(attrs[NL80211_ATTR_IE]), request->ie_len); } err = nl80211_check_scan_flags(wiphy, wdev, request, attrs, true); if (err) goto out_free; if (attrs[NL80211_ATTR_SCHED_SCAN_DELAY]) request->delay = nla_get_u32(attrs[NL80211_ATTR_SCHED_SCAN_DELAY]); if (attrs[NL80211_ATTR_SCHED_SCAN_RELATIVE_RSSI]) { request->relative_rssi = nla_get_s8( attrs[NL80211_ATTR_SCHED_SCAN_RELATIVE_RSSI]); request->relative_rssi_set = true; } if (request->relative_rssi_set && attrs[NL80211_ATTR_SCHED_SCAN_RSSI_ADJUST]) { struct nl80211_bss_select_rssi_adjust *rssi_adjust; rssi_adjust = nla_data( attrs[NL80211_ATTR_SCHED_SCAN_RSSI_ADJUST]); request->rssi_adjust.band = rssi_adjust->band; request->rssi_adjust.delta = rssi_adjust->delta; if (!is_band_valid(wiphy, request->rssi_adjust.band)) { err = -EINVAL; goto out_free; } } err = nl80211_parse_sched_scan_plans(wiphy, n_plans, request, attrs); if (err) goto out_free; request->scan_start = jiffies; return request; out_free: kfree(request); return ERR_PTR(err); } static int nl80211_start_sched_scan(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct net_device *dev = info->user_ptr[1]; struct wireless_dev *wdev = dev->ieee80211_ptr; struct cfg80211_sched_scan_request *sched_scan_req; bool want_multi; int err; if (!rdev->wiphy.max_sched_scan_reqs || !rdev->ops->sched_scan_start) return -EOPNOTSUPP; want_multi = info->attrs[NL80211_ATTR_SCHED_SCAN_MULTI]; err = cfg80211_sched_scan_req_possible(rdev, want_multi); if (err) return err; sched_scan_req = nl80211_parse_sched_scan(&rdev->wiphy, wdev, info->attrs, rdev->wiphy.max_match_sets); err = PTR_ERR_OR_ZERO(sched_scan_req); if (err) goto out_err; /* leave request id zero for legacy request * or if driver does not support multi-scheduled scan */ if (want_multi && rdev->wiphy.max_sched_scan_reqs > 1) sched_scan_req->reqid = cfg80211_assign_cookie(rdev); err = rdev_sched_scan_start(rdev, dev, sched_scan_req); if (err) goto out_free; sched_scan_req->dev = dev; sched_scan_req->wiphy = &rdev->wiphy; if (info->attrs[NL80211_ATTR_SOCKET_OWNER]) sched_scan_req->owner_nlportid = info->snd_portid; cfg80211_add_sched_scan_req(rdev, sched_scan_req); nl80211_send_sched_scan(sched_scan_req, NL80211_CMD_START_SCHED_SCAN); return 0; out_free: kfree(sched_scan_req); out_err: return err; } static int nl80211_stop_sched_scan(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_sched_scan_request *req; struct cfg80211_registered_device *rdev = info->user_ptr[0]; u64 cookie; if (!rdev->wiphy.max_sched_scan_reqs || !rdev->ops->sched_scan_stop) return -EOPNOTSUPP; if (info->attrs[NL80211_ATTR_COOKIE]) { cookie = nla_get_u64(info->attrs[NL80211_ATTR_COOKIE]); return __cfg80211_stop_sched_scan(rdev, cookie, false); } req = list_first_or_null_rcu(&rdev->sched_scan_req_list, struct cfg80211_sched_scan_request, list); if (!req || req->reqid || (req->owner_nlportid && req->owner_nlportid != info->snd_portid)) return -ENOENT; return cfg80211_stop_sched_scan_req(rdev, req, false); } static int nl80211_start_radar_detection(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct net_device *dev = info->user_ptr[1]; struct wireless_dev *wdev = dev->ieee80211_ptr; struct wiphy *wiphy = wdev->wiphy; struct cfg80211_chan_def chandef; enum nl80211_dfs_regions dfs_region; unsigned int cac_time_ms; int err = -EINVAL; flush_delayed_work(&rdev->dfs_update_channels_wk); wiphy_lock(wiphy); dfs_region = reg_get_dfs_region(wiphy); if (dfs_region == NL80211_DFS_UNSET) goto unlock; err = nl80211_parse_chandef(rdev, info, &chandef); if (err) goto unlock; err = cfg80211_chandef_dfs_required(wiphy, &chandef, wdev->iftype); if (err < 0) goto unlock; if (err == 0) { err = -EINVAL; goto unlock; } if (!cfg80211_chandef_dfs_usable(wiphy, &chandef)) { err = -EINVAL; goto unlock; } if (nla_get_flag(info->attrs[NL80211_ATTR_RADAR_BACKGROUND])) { err = cfg80211_start_background_radar_detection(rdev, wdev, &chandef); goto unlock; } if (netif_carrier_ok(dev)) { err = -EBUSY; goto unlock; } if (wdev->cac_started) { err = -EBUSY; goto unlock; } /* CAC start is offloaded to HW and can't be started manually */ if (wiphy_ext_feature_isset(wiphy, NL80211_EXT_FEATURE_DFS_OFFLOAD)) { err = -EOPNOTSUPP; goto unlock; } if (!rdev->ops->start_radar_detection) { err = -EOPNOTSUPP; goto unlock; } cac_time_ms = cfg80211_chandef_dfs_cac_time(&rdev->wiphy, &chandef); if (WARN_ON(!cac_time_ms)) cac_time_ms = IEEE80211_DFS_MIN_CAC_TIME_MS; err = rdev_start_radar_detection(rdev, dev, &chandef, cac_time_ms); if (!err) { wdev->links[0].ap.chandef = chandef; wdev->cac_started = true; wdev->cac_start_time = jiffies; wdev->cac_time_ms = cac_time_ms; } unlock: wiphy_unlock(wiphy); return err; } static int nl80211_notify_radar_detection(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct net_device *dev = info->user_ptr[1]; struct wireless_dev *wdev = dev->ieee80211_ptr; struct wiphy *wiphy = wdev->wiphy; struct cfg80211_chan_def chandef; enum nl80211_dfs_regions dfs_region; int err; dfs_region = reg_get_dfs_region(wiphy); if (dfs_region == NL80211_DFS_UNSET) { GENL_SET_ERR_MSG(info, "DFS Region is not set. Unexpected Radar indication"); return -EINVAL; } err = nl80211_parse_chandef(rdev, info, &chandef); if (err) { GENL_SET_ERR_MSG(info, "Unable to extract chandef info"); return err; } err = cfg80211_chandef_dfs_required(wiphy, &chandef, wdev->iftype); if (err < 0) { GENL_SET_ERR_MSG(info, "chandef is invalid"); return err; } if (err == 0) { GENL_SET_ERR_MSG(info, "Unexpected Radar indication for chandef/iftype"); return -EINVAL; } /* Do not process this notification if radar is already detected * by kernel on this channel, and return success. */ if (chandef.chan->dfs_state == NL80211_DFS_UNAVAILABLE) return 0; cfg80211_set_dfs_state(wiphy, &chandef, NL80211_DFS_UNAVAILABLE); cfg80211_sched_dfs_chan_update(rdev); rdev->radar_chandef = chandef; /* Propagate this notification to other radios as well */ queue_work(cfg80211_wq, &rdev->propagate_radar_detect_wk); return 0; } static int nl80211_channel_switch(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; unsigned int link_id = nl80211_link_id(info->attrs); struct net_device *dev = info->user_ptr[1]; struct wireless_dev *wdev = dev->ieee80211_ptr; struct cfg80211_csa_settings params; struct nlattr **csa_attrs = NULL; int err; bool need_new_beacon = false; bool need_handle_dfs_flag = true; int len, i; u32 cs_count; if (!rdev->ops->channel_switch || !(rdev->wiphy.flags & WIPHY_FLAG_HAS_CHANNEL_SWITCH)) return -EOPNOTSUPP; switch (dev->ieee80211_ptr->iftype) { case NL80211_IFTYPE_AP: case NL80211_IFTYPE_P2P_GO: need_new_beacon = true; /* For all modes except AP the handle_dfs flag needs to be * supplied to tell the kernel that userspace will handle radar * events when they happen. Otherwise a switch to a channel * requiring DFS will be rejected. */ need_handle_dfs_flag = false; /* useless if AP is not running */ if (!wdev->links[link_id].ap.beacon_interval) return -ENOTCONN; break; case NL80211_IFTYPE_ADHOC: if (!wdev->u.ibss.ssid_len) return -ENOTCONN; break; case NL80211_IFTYPE_MESH_POINT: if (!wdev->u.mesh.id_len) return -ENOTCONN; break; default: return -EOPNOTSUPP; } memset(&params, 0, sizeof(params)); params.beacon_csa.ftm_responder = -1; if (!info->attrs[NL80211_ATTR_WIPHY_FREQ] || !info->attrs[NL80211_ATTR_CH_SWITCH_COUNT]) return -EINVAL; /* only important for AP, IBSS and mesh create IEs internally */ if (need_new_beacon && !info->attrs[NL80211_ATTR_CSA_IES]) return -EINVAL; /* Even though the attribute is u32, the specification says * u8, so let's make sure we don't overflow. */ cs_count = nla_get_u32(info->attrs[NL80211_ATTR_CH_SWITCH_COUNT]); if (cs_count > 255) return -EINVAL; params.count = cs_count; if (!need_new_beacon) goto skip_beacons; err = nl80211_parse_beacon(rdev, info->attrs, &params.beacon_after, info->extack); if (err) goto free; csa_attrs = kcalloc(NL80211_ATTR_MAX + 1, sizeof(*csa_attrs), GFP_KERNEL); if (!csa_attrs) { err = -ENOMEM; goto free; } err = nla_parse_nested_deprecated(csa_attrs, NL80211_ATTR_MAX, info->attrs[NL80211_ATTR_CSA_IES], nl80211_policy, info->extack); if (err) goto free; err = nl80211_parse_beacon(rdev, csa_attrs, &params.beacon_csa, info->extack); if (err) goto free; if (!csa_attrs[NL80211_ATTR_CNTDWN_OFFS_BEACON]) { err = -EINVAL; goto free; } len = nla_len(csa_attrs[NL80211_ATTR_CNTDWN_OFFS_BEACON]); if (!len || (len % sizeof(u16))) { err = -EINVAL; goto free; } params.n_counter_offsets_beacon = len / sizeof(u16); if (rdev->wiphy.max_num_csa_counters && (params.n_counter_offsets_beacon > rdev->wiphy.max_num_csa_counters)) { err = -EINVAL; goto free; } params.counter_offsets_beacon = nla_data(csa_attrs[NL80211_ATTR_CNTDWN_OFFS_BEACON]); /* sanity checks - counters should fit and be the same */ for (i = 0; i < params.n_counter_offsets_beacon; i++) { u16 offset = params.counter_offsets_beacon[i]; if (offset >= params.beacon_csa.tail_len) { err = -EINVAL; goto free; } if (params.beacon_csa.tail[offset] != params.count) { err = -EINVAL; goto free; } } if (csa_attrs[NL80211_ATTR_CNTDWN_OFFS_PRESP]) { len = nla_len(csa_attrs[NL80211_ATTR_CNTDWN_OFFS_PRESP]); if (!len || (len % sizeof(u16))) { err = -EINVAL; goto free; } params.n_counter_offsets_presp = len / sizeof(u16); if (rdev->wiphy.max_num_csa_counters && (params.n_counter_offsets_presp > rdev->wiphy.max_num_csa_counters)) { err = -EINVAL; goto free; } params.counter_offsets_presp = nla_data(csa_attrs[NL80211_ATTR_CNTDWN_OFFS_PRESP]); /* sanity checks - counters should fit and be the same */ for (i = 0; i < params.n_counter_offsets_presp; i++) { u16 offset = params.counter_offsets_presp[i]; if (offset >= params.beacon_csa.probe_resp_len) { err = -EINVAL; goto free; } if (params.beacon_csa.probe_resp[offset] != params.count) { err = -EINVAL; goto free; } } } skip_beacons: err = nl80211_parse_chandef(rdev, info, &params.chandef); if (err) goto free; if (!cfg80211_reg_can_beacon_relax(&rdev->wiphy, &params.chandef, wdev->iftype)) { err = -EINVAL; goto free; } err = cfg80211_chandef_dfs_required(wdev->wiphy, &params.chandef, wdev->iftype); if (err < 0) goto free; if (err > 0) { params.radar_required = true; if (need_handle_dfs_flag && !nla_get_flag(info->attrs[NL80211_ATTR_HANDLE_DFS])) { err = -EINVAL; goto free; } } if (info->attrs[NL80211_ATTR_CH_SWITCH_BLOCK_TX]) params.block_tx = true; if (info->attrs[NL80211_ATTR_PUNCT_BITMAP]) { err = nl80211_parse_punct_bitmap(rdev, info, &params.chandef, &params.punct_bitmap); if (err) goto free; } wdev_lock(wdev); err = rdev_channel_switch(rdev, dev, &params); wdev_unlock(wdev); free: kfree(params.beacon_after.mbssid_ies); kfree(params.beacon_csa.mbssid_ies); kfree(params.beacon_after.rnr_ies); kfree(params.beacon_csa.rnr_ies); kfree(csa_attrs); return err; } static int nl80211_send_bss(struct sk_buff *msg, struct netlink_callback *cb, u32 seq, int flags, struct cfg80211_registered_device *rdev, struct wireless_dev *wdev, struct cfg80211_internal_bss *intbss) { struct cfg80211_bss *res = &intbss->pub; const struct cfg80211_bss_ies *ies; unsigned int link_id; void *hdr; struct nlattr *bss; ASSERT_WDEV_LOCK(wdev); hdr = nl80211hdr_put(msg, NETLINK_CB(cb->skb).portid, seq, flags, NL80211_CMD_NEW_SCAN_RESULTS); if (!hdr) return -1; genl_dump_check_consistent(cb, hdr); if (nla_put_u32(msg, NL80211_ATTR_GENERATION, rdev->bss_generation)) goto nla_put_failure; if (wdev->netdev && nla_put_u32(msg, NL80211_ATTR_IFINDEX, wdev->netdev->ifindex)) goto nla_put_failure; if (nla_put_u64_64bit(msg, NL80211_ATTR_WDEV, wdev_id(wdev), NL80211_ATTR_PAD)) goto nla_put_failure; bss = nla_nest_start_noflag(msg, NL80211_ATTR_BSS); if (!bss) goto nla_put_failure; if ((!is_zero_ether_addr(res->bssid) && nla_put(msg, NL80211_BSS_BSSID, ETH_ALEN, res->bssid))) goto nla_put_failure; rcu_read_lock(); /* indicate whether we have probe response data or not */ if (rcu_access_pointer(res->proberesp_ies) && nla_put_flag(msg, NL80211_BSS_PRESP_DATA)) goto fail_unlock_rcu; /* this pointer prefers to be pointed to probe response data * but is always valid */ ies = rcu_dereference(res->ies); if (ies) { if (nla_put_u64_64bit(msg, NL80211_BSS_TSF, ies->tsf, NL80211_BSS_PAD)) goto fail_unlock_rcu; if (ies->len && nla_put(msg, NL80211_BSS_INFORMATION_ELEMENTS, ies->len, ies->data)) goto fail_unlock_rcu; } /* and this pointer is always (unless driver didn't know) beacon data */ ies = rcu_dereference(res->beacon_ies); if (ies && ies->from_beacon) { if (nla_put_u64_64bit(msg, NL80211_BSS_BEACON_TSF, ies->tsf, NL80211_BSS_PAD)) goto fail_unlock_rcu; if (ies->len && nla_put(msg, NL80211_BSS_BEACON_IES, ies->len, ies->data)) goto fail_unlock_rcu; } rcu_read_unlock(); if (res->beacon_interval && nla_put_u16(msg, NL80211_BSS_BEACON_INTERVAL, res->beacon_interval)) goto nla_put_failure; if (nla_put_u16(msg, NL80211_BSS_CAPABILITY, res->capability) || nla_put_u32(msg, NL80211_BSS_FREQUENCY, res->channel->center_freq) || nla_put_u32(msg, NL80211_BSS_FREQUENCY_OFFSET, res->channel->freq_offset) || nla_put_u32(msg, NL80211_BSS_CHAN_WIDTH, res->scan_width) || nla_put_u32(msg, NL80211_BSS_SEEN_MS_AGO, jiffies_to_msecs(jiffies - intbss->ts))) goto nla_put_failure; if (intbss->parent_tsf && (nla_put_u64_64bit(msg, NL80211_BSS_PARENT_TSF, intbss->parent_tsf, NL80211_BSS_PAD) || nla_put(msg, NL80211_BSS_PARENT_BSSID, ETH_ALEN, intbss->parent_bssid))) goto nla_put_failure; if (intbss->ts_boottime && nla_put_u64_64bit(msg, NL80211_BSS_LAST_SEEN_BOOTTIME, intbss->ts_boottime, NL80211_BSS_PAD)) goto nla_put_failure; if (!nl80211_put_signal(msg, intbss->pub.chains, intbss->pub.chain_signal, NL80211_BSS_CHAIN_SIGNAL)) goto nla_put_failure; switch (rdev->wiphy.signal_type) { case CFG80211_SIGNAL_TYPE_MBM: if (nla_put_u32(msg, NL80211_BSS_SIGNAL_MBM, res->signal)) goto nla_put_failure; break; case CFG80211_SIGNAL_TYPE_UNSPEC: if (nla_put_u8(msg, NL80211_BSS_SIGNAL_UNSPEC, res->signal)) goto nla_put_failure; break; default: break; } switch (wdev->iftype) { case NL80211_IFTYPE_P2P_CLIENT: case NL80211_IFTYPE_STATION: for_each_valid_link(wdev, link_id) { if (intbss == wdev->links[link_id].client.current_bss && (nla_put_u32(msg, NL80211_BSS_STATUS, NL80211_BSS_STATUS_ASSOCIATED) || (wdev->valid_links && (nla_put_u8(msg, NL80211_BSS_MLO_LINK_ID, link_id) || nla_put(msg, NL80211_BSS_MLD_ADDR, ETH_ALEN, wdev->u.client.connected_addr))))) goto nla_put_failure; } break; case NL80211_IFTYPE_ADHOC: if (intbss == wdev->u.ibss.current_bss && nla_put_u32(msg, NL80211_BSS_STATUS, NL80211_BSS_STATUS_IBSS_JOINED)) goto nla_put_failure; break; default: break; } nla_nest_end(msg, bss); genlmsg_end(msg, hdr); return 0; fail_unlock_rcu: rcu_read_unlock(); nla_put_failure: genlmsg_cancel(msg, hdr); return -EMSGSIZE; } static int nl80211_dump_scan(struct sk_buff *skb, struct netlink_callback *cb) { struct cfg80211_registered_device *rdev; struct cfg80211_internal_bss *scan; struct wireless_dev *wdev; int start = cb->args[2], idx = 0; int err; err = nl80211_prepare_wdev_dump(cb, &rdev, &wdev, NULL); if (err) return err; /* nl80211_prepare_wdev_dump acquired it in the successful case */ __acquire(&rdev->wiphy.mtx); wdev_lock(wdev); spin_lock_bh(&rdev->bss_lock); /* * dump_scan will be called multiple times to break up the scan results * into multiple messages. It is unlikely that any more bss-es will be * expired after the first call, so only call only call this on the * first dump_scan invocation. */ if (start == 0) cfg80211_bss_expire(rdev); cb->seq = rdev->bss_generation; list_for_each_entry(scan, &rdev->bss_list, list) { if (++idx <= start) continue; if (nl80211_send_bss(skb, cb, cb->nlh->nlmsg_seq, NLM_F_MULTI, rdev, wdev, scan) < 0) { idx--; break; } } spin_unlock_bh(&rdev->bss_lock); wdev_unlock(wdev); cb->args[2] = idx; wiphy_unlock(&rdev->wiphy); return skb->len; } static int nl80211_send_survey(struct sk_buff *msg, u32 portid, u32 seq, int flags, struct net_device *dev, bool allow_radio_stats, struct survey_info *survey) { void *hdr; struct nlattr *infoattr; /* skip radio stats if userspace didn't request them */ if (!survey->channel && !allow_radio_stats) return 0; hdr = nl80211hdr_put(msg, portid, seq, flags, NL80211_CMD_NEW_SURVEY_RESULTS); if (!hdr) return -ENOMEM; if (nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex)) goto nla_put_failure; infoattr = nla_nest_start_noflag(msg, NL80211_ATTR_SURVEY_INFO); if (!infoattr) goto nla_put_failure; if (survey->channel && nla_put_u32(msg, NL80211_SURVEY_INFO_FREQUENCY, survey->channel->center_freq)) goto nla_put_failure; if (survey->channel && survey->channel->freq_offset && nla_put_u32(msg, NL80211_SURVEY_INFO_FREQUENCY_OFFSET, survey->channel->freq_offset)) goto nla_put_failure; if ((survey->filled & SURVEY_INFO_NOISE_DBM) && nla_put_u8(msg, NL80211_SURVEY_INFO_NOISE, survey->noise)) goto nla_put_failure; if ((survey->filled & SURVEY_INFO_IN_USE) && nla_put_flag(msg, NL80211_SURVEY_INFO_IN_USE)) goto nla_put_failure; if ((survey->filled & SURVEY_INFO_TIME) && nla_put_u64_64bit(msg, NL80211_SURVEY_INFO_TIME, survey->time, NL80211_SURVEY_INFO_PAD)) goto nla_put_failure; if ((survey->filled & SURVEY_INFO_TIME_BUSY) && nla_put_u64_64bit(msg, NL80211_SURVEY_INFO_TIME_BUSY, survey->time_busy, NL80211_SURVEY_INFO_PAD)) goto nla_put_failure; if ((survey->filled & SURVEY_INFO_TIME_EXT_BUSY) && nla_put_u64_64bit(msg, NL80211_SURVEY_INFO_TIME_EXT_BUSY, survey->time_ext_busy, NL80211_SURVEY_INFO_PAD)) goto nla_put_failure; if ((survey->filled & SURVEY_INFO_TIME_RX) && nla_put_u64_64bit(msg, NL80211_SURVEY_INFO_TIME_RX, survey->time_rx, NL80211_SURVEY_INFO_PAD)) goto nla_put_failure; if ((survey->filled & SURVEY_INFO_TIME_TX) && nla_put_u64_64bit(msg, NL80211_SURVEY_INFO_TIME_TX, survey->time_tx, NL80211_SURVEY_INFO_PAD)) goto nla_put_failure; if ((survey->filled & SURVEY_INFO_TIME_SCAN) && nla_put_u64_64bit(msg, NL80211_SURVEY_INFO_TIME_SCAN, survey->time_scan, NL80211_SURVEY_INFO_PAD)) goto nla_put_failure; if ((survey->filled & SURVEY_INFO_TIME_BSS_RX) && nla_put_u64_64bit(msg, NL80211_SURVEY_INFO_TIME_BSS_RX, survey->time_bss_rx, NL80211_SURVEY_INFO_PAD)) goto nla_put_failure; nla_nest_end(msg, infoattr); genlmsg_end(msg, hdr); return 0; nla_put_failure: genlmsg_cancel(msg, hdr); return -EMSGSIZE; } static int nl80211_dump_survey(struct sk_buff *skb, struct netlink_callback *cb) { struct nlattr **attrbuf; struct survey_info survey; struct cfg80211_registered_device *rdev; struct wireless_dev *wdev; int survey_idx = cb->args[2]; int res; bool radio_stats; attrbuf = kcalloc(NUM_NL80211_ATTR, sizeof(*attrbuf), GFP_KERNEL); if (!attrbuf) return -ENOMEM; res = nl80211_prepare_wdev_dump(cb, &rdev, &wdev, attrbuf); if (res) { kfree(attrbuf); return res; } /* nl80211_prepare_wdev_dump acquired it in the successful case */ __acquire(&rdev->wiphy.mtx); /* prepare_wdev_dump parsed the attributes */ radio_stats = attrbuf[NL80211_ATTR_SURVEY_RADIO_STATS]; if (!wdev->netdev) { res = -EINVAL; goto out_err; } if (!rdev->ops->dump_survey) { res = -EOPNOTSUPP; goto out_err; } while (1) { wdev_lock(wdev); res = rdev_dump_survey(rdev, wdev->netdev, survey_idx, &survey); wdev_unlock(wdev); if (res == -ENOENT) break; if (res) goto out_err; /* don't send disabled channels, but do send non-channel data */ if (survey.channel && survey.channel->flags & IEEE80211_CHAN_DISABLED) { survey_idx++; continue; } if (nl80211_send_survey(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, NLM_F_MULTI, wdev->netdev, radio_stats, &survey) < 0) goto out; survey_idx++; } out: cb->args[2] = survey_idx; res = skb->len; out_err: kfree(attrbuf); wiphy_unlock(&rdev->wiphy); return res; } static bool nl80211_valid_wpa_versions(u32 wpa_versions) { return !(wpa_versions & ~(NL80211_WPA_VERSION_1 | NL80211_WPA_VERSION_2 | NL80211_WPA_VERSION_3)); } static int nl80211_authenticate(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct net_device *dev = info->user_ptr[1]; struct ieee80211_channel *chan; const u8 *bssid, *ssid; int err, ssid_len; enum nl80211_auth_type auth_type; struct key_parse key; bool local_state_change; struct cfg80211_auth_request req = {}; u32 freq; if (!info->attrs[NL80211_ATTR_MAC]) return -EINVAL; if (!info->attrs[NL80211_ATTR_AUTH_TYPE]) return -EINVAL; if (!info->attrs[NL80211_ATTR_SSID]) return -EINVAL; if (!info->attrs[NL80211_ATTR_WIPHY_FREQ]) return -EINVAL; err = nl80211_parse_key(info, &key); if (err) return err; if (key.idx >= 0) { if (key.type != -1 && key.type != NL80211_KEYTYPE_GROUP) return -EINVAL; if (!key.p.key || !key.p.key_len) return -EINVAL; if ((key.p.cipher != WLAN_CIPHER_SUITE_WEP40 || key.p.key_len != WLAN_KEY_LEN_WEP40) && (key.p.cipher != WLAN_CIPHER_SUITE_WEP104 || key.p.key_len != WLAN_KEY_LEN_WEP104)) return -EINVAL; if (key.idx > 3) return -EINVAL; } else { key.p.key_len = 0; key.p.key = NULL; } if (key.idx >= 0) { int i; bool ok = false; for (i = 0; i < rdev->wiphy.n_cipher_suites; i++) { if (key.p.cipher == rdev->wiphy.cipher_suites[i]) { ok = true; break; } } if (!ok) return -EINVAL; } if (!rdev->ops->auth) return -EOPNOTSUPP; if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_STATION && dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_CLIENT) return -EOPNOTSUPP; bssid = nla_data(info->attrs[NL80211_ATTR_MAC]); freq = MHZ_TO_KHZ(nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ])); if (info->attrs[NL80211_ATTR_WIPHY_FREQ_OFFSET]) freq += nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ_OFFSET]); chan = nl80211_get_valid_chan(&rdev->wiphy, freq); if (!chan) return -EINVAL; ssid = nla_data(info->attrs[NL80211_ATTR_SSID]); ssid_len = nla_len(info->attrs[NL80211_ATTR_SSID]); if (info->attrs[NL80211_ATTR_IE]) { req.ie = nla_data(info->attrs[NL80211_ATTR_IE]); req.ie_len = nla_len(info->attrs[NL80211_ATTR_IE]); } auth_type = nla_get_u32(info->attrs[NL80211_ATTR_AUTH_TYPE]); if (!nl80211_valid_auth_type(rdev, auth_type, NL80211_CMD_AUTHENTICATE)) return -EINVAL; if ((auth_type == NL80211_AUTHTYPE_SAE || auth_type == NL80211_AUTHTYPE_FILS_SK || auth_type == NL80211_AUTHTYPE_FILS_SK_PFS || auth_type == NL80211_AUTHTYPE_FILS_PK) && !info->attrs[NL80211_ATTR_AUTH_DATA]) return -EINVAL; if (info->attrs[NL80211_ATTR_AUTH_DATA]) { if (auth_type != NL80211_AUTHTYPE_SAE && auth_type != NL80211_AUTHTYPE_FILS_SK && auth_type != NL80211_AUTHTYPE_FILS_SK_PFS && auth_type != NL80211_AUTHTYPE_FILS_PK) return -EINVAL; req.auth_data = nla_data(info->attrs[NL80211_ATTR_AUTH_DATA]); req.auth_data_len = nla_len(info->attrs[NL80211_ATTR_AUTH_DATA]); } local_state_change = !!info->attrs[NL80211_ATTR_LOCAL_STATE_CHANGE]; /* * Since we no longer track auth state, ignore * requests to only change local state. */ if (local_state_change) return 0; req.auth_type = auth_type; req.key = key.p.key; req.key_len = key.p.key_len; req.key_idx = key.idx; req.link_id = nl80211_link_id_or_invalid(info->attrs); if (req.link_id >= 0) { if (!(rdev->wiphy.flags & WIPHY_FLAG_SUPPORTS_MLO)) return -EINVAL; if (!info->attrs[NL80211_ATTR_MLD_ADDR]) return -EINVAL; req.ap_mld_addr = nla_data(info->attrs[NL80211_ATTR_MLD_ADDR]); if (!is_valid_ether_addr(req.ap_mld_addr)) return -EINVAL; } req.bss = cfg80211_get_bss(&rdev->wiphy, chan, bssid, ssid, ssid_len, IEEE80211_BSS_TYPE_ESS, IEEE80211_PRIVACY_ANY); if (!req.bss) return -ENOENT; wdev_lock(dev->ieee80211_ptr); err = cfg80211_mlme_auth(rdev, dev, &req); wdev_unlock(dev->ieee80211_ptr); cfg80211_put_bss(&rdev->wiphy, req.bss); return err; } static int validate_pae_over_nl80211(struct cfg80211_registered_device *rdev, struct genl_info *info) { if (!info->attrs[NL80211_ATTR_SOCKET_OWNER]) { GENL_SET_ERR_MSG(info, "SOCKET_OWNER not set"); return -EINVAL; } if (!rdev->ops->tx_control_port || !wiphy_ext_feature_isset(&rdev->wiphy, NL80211_EXT_FEATURE_CONTROL_PORT_OVER_NL80211)) return -EOPNOTSUPP; return 0; } static int nl80211_crypto_settings(struct cfg80211_registered_device *rdev, struct genl_info *info, struct cfg80211_crypto_settings *settings, int cipher_limit) { memset(settings, 0, sizeof(*settings)); settings->control_port = info->attrs[NL80211_ATTR_CONTROL_PORT]; if (info->attrs[NL80211_ATTR_CONTROL_PORT_ETHERTYPE]) { u16 proto; proto = nla_get_u16( info->attrs[NL80211_ATTR_CONTROL_PORT_ETHERTYPE]); settings->control_port_ethertype = cpu_to_be16(proto); if (!(rdev->wiphy.flags & WIPHY_FLAG_CONTROL_PORT_PROTOCOL) && proto != ETH_P_PAE) return -EINVAL; if (info->attrs[NL80211_ATTR_CONTROL_PORT_NO_ENCRYPT]) settings->control_port_no_encrypt = true; } else settings->control_port_ethertype = cpu_to_be16(ETH_P_PAE); if (info->attrs[NL80211_ATTR_CONTROL_PORT_OVER_NL80211]) { int r = validate_pae_over_nl80211(rdev, info); if (r < 0) return r; settings->control_port_over_nl80211 = true; if (info->attrs[NL80211_ATTR_CONTROL_PORT_NO_PREAUTH]) settings->control_port_no_preauth = true; } if (info->attrs[NL80211_ATTR_CIPHER_SUITES_PAIRWISE]) { void *data; int len, i; data = nla_data(info->attrs[NL80211_ATTR_CIPHER_SUITES_PAIRWISE]); len = nla_len(info->attrs[NL80211_ATTR_CIPHER_SUITES_PAIRWISE]); settings->n_ciphers_pairwise = len / sizeof(u32); if (len % sizeof(u32)) return -EINVAL; if (settings->n_ciphers_pairwise > cipher_limit) return -EINVAL; memcpy(settings->ciphers_pairwise, data, len); for (i = 0; i < settings->n_ciphers_pairwise; i++) if (!cfg80211_supported_cipher_suite( &rdev->wiphy, settings->ciphers_pairwise[i])) return -EINVAL; } if (info->attrs[NL80211_ATTR_CIPHER_SUITE_GROUP]) { settings->cipher_group = nla_get_u32(info->attrs[NL80211_ATTR_CIPHER_SUITE_GROUP]); if (!cfg80211_supported_cipher_suite(&rdev->wiphy, settings->cipher_group)) return -EINVAL; } if (info->attrs[NL80211_ATTR_WPA_VERSIONS]) { settings->wpa_versions = nla_get_u32(info->attrs[NL80211_ATTR_WPA_VERSIONS]); if (!nl80211_valid_wpa_versions(settings->wpa_versions)) return -EINVAL; } if (info->attrs[NL80211_ATTR_AKM_SUITES]) { void *data; int len; data = nla_data(info->attrs[NL80211_ATTR_AKM_SUITES]); len = nla_len(info->attrs[NL80211_ATTR_AKM_SUITES]); settings->n_akm_suites = len / sizeof(u32); if (len % sizeof(u32)) return -EINVAL; if (settings->n_akm_suites > rdev->wiphy.max_num_akm_suites) return -EINVAL; memcpy(settings->akm_suites, data, len); } if (info->attrs[NL80211_ATTR_PMK]) { if (nla_len(info->attrs[NL80211_ATTR_PMK]) != WLAN_PMK_LEN) return -EINVAL; if (!wiphy_ext_feature_isset(&rdev->wiphy, NL80211_EXT_FEATURE_4WAY_HANDSHAKE_STA_PSK) && !wiphy_ext_feature_isset(&rdev->wiphy, NL80211_EXT_FEATURE_4WAY_HANDSHAKE_AP_PSK)) return -EINVAL; settings->psk = nla_data(info->attrs[NL80211_ATTR_PMK]); } if (info->attrs[NL80211_ATTR_SAE_PASSWORD]) { if (!wiphy_ext_feature_isset(&rdev->wiphy, NL80211_EXT_FEATURE_SAE_OFFLOAD) && !wiphy_ext_feature_isset(&rdev->wiphy, NL80211_EXT_FEATURE_SAE_OFFLOAD_AP)) return -EINVAL; settings->sae_pwd = nla_data(info->attrs[NL80211_ATTR_SAE_PASSWORD]); settings->sae_pwd_len = nla_len(info->attrs[NL80211_ATTR_SAE_PASSWORD]); } if (info->attrs[NL80211_ATTR_SAE_PWE]) settings->sae_pwe = nla_get_u8(info->attrs[NL80211_ATTR_SAE_PWE]); else settings->sae_pwe = NL80211_SAE_PWE_UNSPECIFIED; return 0; } static struct cfg80211_bss *nl80211_assoc_bss(struct cfg80211_registered_device *rdev, const u8 *ssid, int ssid_len, struct nlattr **attrs) { struct ieee80211_channel *chan; struct cfg80211_bss *bss; const u8 *bssid; u32 freq; if (!attrs[NL80211_ATTR_MAC] || !attrs[NL80211_ATTR_WIPHY_FREQ]) return ERR_PTR(-EINVAL); bssid = nla_data(attrs[NL80211_ATTR_MAC]); freq = MHZ_TO_KHZ(nla_get_u32(attrs[NL80211_ATTR_WIPHY_FREQ])); if (attrs[NL80211_ATTR_WIPHY_FREQ_OFFSET]) freq += nla_get_u32(attrs[NL80211_ATTR_WIPHY_FREQ_OFFSET]); chan = nl80211_get_valid_chan(&rdev->wiphy, freq); if (!chan) return ERR_PTR(-EINVAL); bss = cfg80211_get_bss(&rdev->wiphy, chan, bssid, ssid, ssid_len, IEEE80211_BSS_TYPE_ESS, IEEE80211_PRIVACY_ANY); if (!bss) return ERR_PTR(-ENOENT); return bss; } static int nl80211_associate(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct net_device *dev = info->user_ptr[1]; struct cfg80211_assoc_request req = {}; struct nlattr **attrs = NULL; const u8 *ap_addr, *ssid; unsigned int link_id; int err, ssid_len; if (dev->ieee80211_ptr->conn_owner_nlportid && dev->ieee80211_ptr->conn_owner_nlportid != info->snd_portid) return -EPERM; if (!info->attrs[NL80211_ATTR_SSID]) return -EINVAL; if (!rdev->ops->assoc) return -EOPNOTSUPP; if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_STATION && dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_CLIENT) return -EOPNOTSUPP; ssid = nla_data(info->attrs[NL80211_ATTR_SSID]); ssid_len = nla_len(info->attrs[NL80211_ATTR_SSID]); if (info->attrs[NL80211_ATTR_IE]) { req.ie = nla_data(info->attrs[NL80211_ATTR_IE]); req.ie_len = nla_len(info->attrs[NL80211_ATTR_IE]); if (cfg80211_find_ext_elem(WLAN_EID_EXT_NON_INHERITANCE, req.ie, req.ie_len)) { GENL_SET_ERR_MSG(info, "non-inheritance makes no sense"); return -EINVAL; } } if (info->attrs[NL80211_ATTR_USE_MFP]) { enum nl80211_mfp mfp = nla_get_u32(info->attrs[NL80211_ATTR_USE_MFP]); if (mfp == NL80211_MFP_REQUIRED) req.use_mfp = true; else if (mfp != NL80211_MFP_NO) return -EINVAL; } if (info->attrs[NL80211_ATTR_PREV_BSSID]) req.prev_bssid = nla_data(info->attrs[NL80211_ATTR_PREV_BSSID]); if (nla_get_flag(info->attrs[NL80211_ATTR_DISABLE_HT])) req.flags |= ASSOC_REQ_DISABLE_HT; if (info->attrs[NL80211_ATTR_HT_CAPABILITY_MASK]) memcpy(&req.ht_capa_mask, nla_data(info->attrs[NL80211_ATTR_HT_CAPABILITY_MASK]), sizeof(req.ht_capa_mask)); if (info->attrs[NL80211_ATTR_HT_CAPABILITY]) { if (!info->attrs[NL80211_ATTR_HT_CAPABILITY_MASK]) return -EINVAL; memcpy(&req.ht_capa, nla_data(info->attrs[NL80211_ATTR_HT_CAPABILITY]), sizeof(req.ht_capa)); } if (nla_get_flag(info->attrs[NL80211_ATTR_DISABLE_VHT])) req.flags |= ASSOC_REQ_DISABLE_VHT; if (nla_get_flag(info->attrs[NL80211_ATTR_DISABLE_HE])) req.flags |= ASSOC_REQ_DISABLE_HE; if (nla_get_flag(info->attrs[NL80211_ATTR_DISABLE_EHT])) req.flags |= ASSOC_REQ_DISABLE_EHT; if (info->attrs[NL80211_ATTR_VHT_CAPABILITY_MASK]) memcpy(&req.vht_capa_mask, nla_data(info->attrs[NL80211_ATTR_VHT_CAPABILITY_MASK]), sizeof(req.vht_capa_mask)); if (info->attrs[NL80211_ATTR_VHT_CAPABILITY]) { if (!info->attrs[NL80211_ATTR_VHT_CAPABILITY_MASK]) return -EINVAL; memcpy(&req.vht_capa, nla_data(info->attrs[NL80211_ATTR_VHT_CAPABILITY]), sizeof(req.vht_capa)); } if (nla_get_flag(info->attrs[NL80211_ATTR_USE_RRM])) { if (!((rdev->wiphy.features & NL80211_FEATURE_DS_PARAM_SET_IE_IN_PROBES) && (rdev->wiphy.features & NL80211_FEATURE_QUIET)) && !wiphy_ext_feature_isset(&rdev->wiphy, NL80211_EXT_FEATURE_RRM)) return -EINVAL; req.flags |= ASSOC_REQ_USE_RRM; } if (info->attrs[NL80211_ATTR_FILS_KEK]) { req.fils_kek = nla_data(info->attrs[NL80211_ATTR_FILS_KEK]); req.fils_kek_len = nla_len(info->attrs[NL80211_ATTR_FILS_KEK]); if (!info->attrs[NL80211_ATTR_FILS_NONCES]) return -EINVAL; req.fils_nonces = nla_data(info->attrs[NL80211_ATTR_FILS_NONCES]); } if (info->attrs[NL80211_ATTR_S1G_CAPABILITY_MASK]) { if (!info->attrs[NL80211_ATTR_S1G_CAPABILITY]) return -EINVAL; memcpy(&req.s1g_capa_mask, nla_data(info->attrs[NL80211_ATTR_S1G_CAPABILITY_MASK]), sizeof(req.s1g_capa_mask)); } if (info->attrs[NL80211_ATTR_S1G_CAPABILITY]) { if (!info->attrs[NL80211_ATTR_S1G_CAPABILITY_MASK]) return -EINVAL; memcpy(&req.s1g_capa, nla_data(info->attrs[NL80211_ATTR_S1G_CAPABILITY]), sizeof(req.s1g_capa)); } req.link_id = nl80211_link_id_or_invalid(info->attrs); if (info->attrs[NL80211_ATTR_MLO_LINKS]) { unsigned int attrsize = NUM_NL80211_ATTR * sizeof(*attrs); struct nlattr *link; int rem = 0; if (req.link_id < 0) return -EINVAL; if (!(rdev->wiphy.flags & WIPHY_FLAG_SUPPORTS_MLO)) return -EINVAL; if (info->attrs[NL80211_ATTR_MAC] || info->attrs[NL80211_ATTR_WIPHY_FREQ] || !info->attrs[NL80211_ATTR_MLD_ADDR]) return -EINVAL; req.ap_mld_addr = nla_data(info->attrs[NL80211_ATTR_MLD_ADDR]); ap_addr = req.ap_mld_addr; attrs = kzalloc(attrsize, GFP_KERNEL); if (!attrs) return -ENOMEM; nla_for_each_nested(link, info->attrs[NL80211_ATTR_MLO_LINKS], rem) { memset(attrs, 0, attrsize); nla_parse_nested(attrs, NL80211_ATTR_MAX, link, NULL, NULL); if (!attrs[NL80211_ATTR_MLO_LINK_ID]) { err = -EINVAL; goto free; } link_id = nla_get_u8(attrs[NL80211_ATTR_MLO_LINK_ID]); /* cannot use the same link ID again */ if (req.links[link_id].bss) { err = -EINVAL; goto free; } req.links[link_id].bss = nl80211_assoc_bss(rdev, ssid, ssid_len, attrs); if (IS_ERR(req.links[link_id].bss)) { err = PTR_ERR(req.links[link_id].bss); req.links[link_id].bss = NULL; goto free; } if (attrs[NL80211_ATTR_IE]) { req.links[link_id].elems = nla_data(attrs[NL80211_ATTR_IE]); req.links[link_id].elems_len = nla_len(attrs[NL80211_ATTR_IE]); if (cfg80211_find_elem(WLAN_EID_FRAGMENT, req.links[link_id].elems, req.links[link_id].elems_len)) { GENL_SET_ERR_MSG(info, "cannot deal with fragmentation"); err = -EINVAL; goto free; } if (cfg80211_find_ext_elem(WLAN_EID_EXT_NON_INHERITANCE, req.links[link_id].elems, req.links[link_id].elems_len)) { GENL_SET_ERR_MSG(info, "cannot deal with non-inheritance"); err = -EINVAL; goto free; } } req.links[link_id].disabled = nla_get_flag(attrs[NL80211_ATTR_MLO_LINK_DISABLED]); } if (!req.links[req.link_id].bss) { err = -EINVAL; goto free; } if (req.links[req.link_id].elems_len) { GENL_SET_ERR_MSG(info, "cannot have per-link elems on assoc link"); err = -EINVAL; goto free; } if (req.links[req.link_id].disabled) { GENL_SET_ERR_MSG(info, "cannot have assoc link disabled"); err = -EINVAL; goto free; } kfree(attrs); attrs = NULL; } else { if (req.link_id >= 0) return -EINVAL; req.bss = nl80211_assoc_bss(rdev, ssid, ssid_len, info->attrs); if (IS_ERR(req.bss)) return PTR_ERR(req.bss); ap_addr = req.bss->bssid; } err = nl80211_crypto_settings(rdev, info, &req.crypto, 1); if (!err) { wdev_lock(dev->ieee80211_ptr); err = cfg80211_mlme_assoc(rdev, dev, &req); if (!err && info->attrs[NL80211_ATTR_SOCKET_OWNER]) { dev->ieee80211_ptr->conn_owner_nlportid = info->snd_portid; memcpy(dev->ieee80211_ptr->disconnect_bssid, ap_addr, ETH_ALEN); } wdev_unlock(dev->ieee80211_ptr); } free: for (link_id = 0; link_id < ARRAY_SIZE(req.links); link_id++) cfg80211_put_bss(&rdev->wiphy, req.links[link_id].bss); cfg80211_put_bss(&rdev->wiphy, req.bss); kfree(attrs); return err; } static int nl80211_deauthenticate(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct net_device *dev = info->user_ptr[1]; const u8 *ie = NULL, *bssid; int ie_len = 0, err; u16 reason_code; bool local_state_change; if (dev->ieee80211_ptr->conn_owner_nlportid && dev->ieee80211_ptr->conn_owner_nlportid != info->snd_portid) return -EPERM; if (!info->attrs[NL80211_ATTR_MAC]) return -EINVAL; if (!info->attrs[NL80211_ATTR_REASON_CODE]) return -EINVAL; if (!rdev->ops->deauth) return -EOPNOTSUPP; if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_STATION && dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_CLIENT) return -EOPNOTSUPP; bssid = nla_data(info->attrs[NL80211_ATTR_MAC]); reason_code = nla_get_u16(info->attrs[NL80211_ATTR_REASON_CODE]); if (reason_code == 0) { /* Reason Code 0 is reserved */ return -EINVAL; } if (info->attrs[NL80211_ATTR_IE]) { ie = nla_data(info->attrs[NL80211_ATTR_IE]); ie_len = nla_len(info->attrs[NL80211_ATTR_IE]); } local_state_change = !!info->attrs[NL80211_ATTR_LOCAL_STATE_CHANGE]; wdev_lock(dev->ieee80211_ptr); err = cfg80211_mlme_deauth(rdev, dev, bssid, ie, ie_len, reason_code, local_state_change); wdev_unlock(dev->ieee80211_ptr); return err; } static int nl80211_disassociate(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct net_device *dev = info->user_ptr[1]; const u8 *ie = NULL, *bssid; int ie_len = 0, err; u16 reason_code; bool local_state_change; if (dev->ieee80211_ptr->conn_owner_nlportid && dev->ieee80211_ptr->conn_owner_nlportid != info->snd_portid) return -EPERM; if (!info->attrs[NL80211_ATTR_MAC]) return -EINVAL; if (!info->attrs[NL80211_ATTR_REASON_CODE]) return -EINVAL; if (!rdev->ops->disassoc) return -EOPNOTSUPP; if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_STATION && dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_CLIENT) return -EOPNOTSUPP; bssid = nla_data(info->attrs[NL80211_ATTR_MAC]); reason_code = nla_get_u16(info->attrs[NL80211_ATTR_REASON_CODE]); if (reason_code == 0) { /* Reason Code 0 is reserved */ return -EINVAL; } if (info->attrs[NL80211_ATTR_IE]) { ie = nla_data(info->attrs[NL80211_ATTR_IE]); ie_len = nla_len(info->attrs[NL80211_ATTR_IE]); } local_state_change = !!info->attrs[NL80211_ATTR_LOCAL_STATE_CHANGE]; wdev_lock(dev->ieee80211_ptr); err = cfg80211_mlme_disassoc(rdev, dev, bssid, ie, ie_len, reason_code, local_state_change); wdev_unlock(dev->ieee80211_ptr); return err; } static bool nl80211_parse_mcast_rate(struct cfg80211_registered_device *rdev, int mcast_rate[NUM_NL80211_BANDS], int rateval) { struct wiphy *wiphy = &rdev->wiphy; bool found = false; int band, i; for (band = 0; band < NUM_NL80211_BANDS; band++) { struct ieee80211_supported_band *sband; sband = wiphy->bands[band]; if (!sband) continue; for (i = 0; i < sband->n_bitrates; i++) { if (sband->bitrates[i].bitrate == rateval) { mcast_rate[band] = i + 1; found = true; break; } } } return found; } static int nl80211_join_ibss(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct net_device *dev = info->user_ptr[1]; struct cfg80211_ibss_params ibss; struct wiphy *wiphy; struct cfg80211_cached_keys *connkeys = NULL; int err; memset(&ibss, 0, sizeof(ibss)); if (!info->attrs[NL80211_ATTR_SSID] || !nla_len(info->attrs[NL80211_ATTR_SSID])) return -EINVAL; ibss.beacon_interval = 100; if (info->attrs[NL80211_ATTR_BEACON_INTERVAL]) ibss.beacon_interval = nla_get_u32(info->attrs[NL80211_ATTR_BEACON_INTERVAL]); err = cfg80211_validate_beacon_int(rdev, NL80211_IFTYPE_ADHOC, ibss.beacon_interval); if (err) return err; if (!rdev->ops->join_ibss) return -EOPNOTSUPP; if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_ADHOC) return -EOPNOTSUPP; wiphy = &rdev->wiphy; if (info->attrs[NL80211_ATTR_MAC]) { ibss.bssid = nla_data(info->attrs[NL80211_ATTR_MAC]); if (!is_valid_ether_addr(ibss.bssid)) return -EINVAL; } ibss.ssid = nla_data(info->attrs[NL80211_ATTR_SSID]); ibss.ssid_len = nla_len(info->attrs[NL80211_ATTR_SSID]); if (info->attrs[NL80211_ATTR_IE]) { ibss.ie = nla_data(info->attrs[NL80211_ATTR_IE]); ibss.ie_len = nla_len(info->attrs[NL80211_ATTR_IE]); } err = nl80211_parse_chandef(rdev, info, &ibss.chandef); if (err) return err; if (!cfg80211_reg_can_beacon(&rdev->wiphy, &ibss.chandef, NL80211_IFTYPE_ADHOC)) return -EINVAL; switch (ibss.chandef.width) { case NL80211_CHAN_WIDTH_5: case NL80211_CHAN_WIDTH_10: case NL80211_CHAN_WIDTH_20_NOHT: break; case NL80211_CHAN_WIDTH_20: case NL80211_CHAN_WIDTH_40: if (!(rdev->wiphy.features & NL80211_FEATURE_HT_IBSS)) return -EINVAL; break; case NL80211_CHAN_WIDTH_80: case NL80211_CHAN_WIDTH_80P80: case NL80211_CHAN_WIDTH_160: if (!(rdev->wiphy.features & NL80211_FEATURE_HT_IBSS)) return -EINVAL; if (!wiphy_ext_feature_isset(&rdev->wiphy, NL80211_EXT_FEATURE_VHT_IBSS)) return -EINVAL; break; case NL80211_CHAN_WIDTH_320: return -EINVAL; default: return -EINVAL; } ibss.channel_fixed = !!info->attrs[NL80211_ATTR_FREQ_FIXED]; ibss.privacy = !!info->attrs[NL80211_ATTR_PRIVACY]; if (info->attrs[NL80211_ATTR_BSS_BASIC_RATES]) { u8 *rates = nla_data(info->attrs[NL80211_ATTR_BSS_BASIC_RATES]); int n_rates = nla_len(info->attrs[NL80211_ATTR_BSS_BASIC_RATES]); struct ieee80211_supported_band *sband = wiphy->bands[ibss.chandef.chan->band]; err = ieee80211_get_ratemask(sband, rates, n_rates, &ibss.basic_rates); if (err) return err; } if (info->attrs[NL80211_ATTR_HT_CAPABILITY_MASK]) memcpy(&ibss.ht_capa_mask, nla_data(info->attrs[NL80211_ATTR_HT_CAPABILITY_MASK]), sizeof(ibss.ht_capa_mask)); if (info->attrs[NL80211_ATTR_HT_CAPABILITY]) { if (!info->attrs[NL80211_ATTR_HT_CAPABILITY_MASK]) return -EINVAL; memcpy(&ibss.ht_capa, nla_data(info->attrs[NL80211_ATTR_HT_CAPABILITY]), sizeof(ibss.ht_capa)); } if (info->attrs[NL80211_ATTR_MCAST_RATE] && !nl80211_parse_mcast_rate(rdev, ibss.mcast_rate, nla_get_u32(info->attrs[NL80211_ATTR_MCAST_RATE]))) return -EINVAL; if (ibss.privacy && info->attrs[NL80211_ATTR_KEYS]) { bool no_ht = false; connkeys = nl80211_parse_connkeys(rdev, info, &no_ht); if (IS_ERR(connkeys)) return PTR_ERR(connkeys); if ((ibss.chandef.width != NL80211_CHAN_WIDTH_20_NOHT) && no_ht) { kfree_sensitive(connkeys); return -EINVAL; } } ibss.control_port = nla_get_flag(info->attrs[NL80211_ATTR_CONTROL_PORT]); if (info->attrs[NL80211_ATTR_CONTROL_PORT_OVER_NL80211]) { int r = validate_pae_over_nl80211(rdev, info); if (r < 0) { kfree_sensitive(connkeys); return r; } ibss.control_port_over_nl80211 = true; } ibss.userspace_handles_dfs = nla_get_flag(info->attrs[NL80211_ATTR_HANDLE_DFS]); wdev_lock(dev->ieee80211_ptr); err = __cfg80211_join_ibss(rdev, dev, &ibss, connkeys); if (err) kfree_sensitive(connkeys); else if (info->attrs[NL80211_ATTR_SOCKET_OWNER]) dev->ieee80211_ptr->conn_owner_nlportid = info->snd_portid; wdev_unlock(dev->ieee80211_ptr); return err; } static int nl80211_leave_ibss(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct net_device *dev = info->user_ptr[1]; if (!rdev->ops->leave_ibss) return -EOPNOTSUPP; if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_ADHOC) return -EOPNOTSUPP; return cfg80211_leave_ibss(rdev, dev, false); } static int nl80211_set_mcast_rate(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct net_device *dev = info->user_ptr[1]; int mcast_rate[NUM_NL80211_BANDS]; u32 nla_rate; if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_ADHOC && dev->ieee80211_ptr->iftype != NL80211_IFTYPE_MESH_POINT && dev->ieee80211_ptr->iftype != NL80211_IFTYPE_OCB) return -EOPNOTSUPP; if (!rdev->ops->set_mcast_rate) return -EOPNOTSUPP; memset(mcast_rate, 0, sizeof(mcast_rate)); if (!info->attrs[NL80211_ATTR_MCAST_RATE]) return -EINVAL; nla_rate = nla_get_u32(info->attrs[NL80211_ATTR_MCAST_RATE]); if (!nl80211_parse_mcast_rate(rdev, mcast_rate, nla_rate)) return -EINVAL; return rdev_set_mcast_rate(rdev, dev, mcast_rate); } static struct sk_buff * __cfg80211_alloc_vendor_skb(struct cfg80211_registered_device *rdev, struct wireless_dev *wdev, int approxlen, u32 portid, u32 seq, enum nl80211_commands cmd, enum nl80211_attrs attr, const struct nl80211_vendor_cmd_info *info, gfp_t gfp) { struct sk_buff *skb; void *hdr; struct nlattr *data; skb = nlmsg_new(approxlen + 100, gfp); if (!skb) return NULL; hdr = nl80211hdr_put(skb, portid, seq, 0, cmd); if (!hdr) { kfree_skb(skb); return NULL; } if (nla_put_u32(skb, NL80211_ATTR_WIPHY, rdev->wiphy_idx)) goto nla_put_failure; if (info) { if (nla_put_u32(skb, NL80211_ATTR_VENDOR_ID, info->vendor_id)) goto nla_put_failure; if (nla_put_u32(skb, NL80211_ATTR_VENDOR_SUBCMD, info->subcmd)) goto nla_put_failure; } if (wdev) { if (nla_put_u64_64bit(skb, NL80211_ATTR_WDEV, wdev_id(wdev), NL80211_ATTR_PAD)) goto nla_put_failure; if (wdev->netdev && nla_put_u32(skb, NL80211_ATTR_IFINDEX, wdev->netdev->ifindex)) goto nla_put_failure; } data = nla_nest_start_noflag(skb, attr); if (!data) goto nla_put_failure; ((void **)skb->cb)[0] = rdev; ((void **)skb->cb)[1] = hdr; ((void **)skb->cb)[2] = data; return skb; nla_put_failure: kfree_skb(skb); return NULL; } struct sk_buff *__cfg80211_alloc_event_skb(struct wiphy *wiphy, struct wireless_dev *wdev, enum nl80211_commands cmd, enum nl80211_attrs attr, unsigned int portid, int vendor_event_idx, int approxlen, gfp_t gfp) { struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy); const struct nl80211_vendor_cmd_info *info; switch (cmd) { case NL80211_CMD_TESTMODE: if (WARN_ON(vendor_event_idx != -1)) return NULL; info = NULL; break; case NL80211_CMD_VENDOR: if (WARN_ON(vendor_event_idx < 0 || vendor_event_idx >= wiphy->n_vendor_events)) return NULL; info = &wiphy->vendor_events[vendor_event_idx]; break; default: WARN_ON(1); return NULL; } return __cfg80211_alloc_vendor_skb(rdev, wdev, approxlen, portid, 0, cmd, attr, info, gfp); } EXPORT_SYMBOL(__cfg80211_alloc_event_skb); void __cfg80211_send_event_skb(struct sk_buff *skb, gfp_t gfp) { struct cfg80211_registered_device *rdev = ((void **)skb->cb)[0]; void *hdr = ((void **)skb->cb)[1]; struct nlmsghdr *nlhdr = nlmsg_hdr(skb); struct nlattr *data = ((void **)skb->cb)[2]; enum nl80211_multicast_groups mcgrp = NL80211_MCGRP_TESTMODE; /* clear CB data for netlink core to own from now on */ memset(skb->cb, 0, sizeof(skb->cb)); nla_nest_end(skb, data); genlmsg_end(skb, hdr); if (nlhdr->nlmsg_pid) { genlmsg_unicast(wiphy_net(&rdev->wiphy), skb, nlhdr->nlmsg_pid); } else { if (data->nla_type == NL80211_ATTR_VENDOR_DATA) mcgrp = NL80211_MCGRP_VENDOR; genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), skb, 0, mcgrp, gfp); } } EXPORT_SYMBOL(__cfg80211_send_event_skb); #ifdef CONFIG_NL80211_TESTMODE static int nl80211_testmode_do(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct wireless_dev *wdev; int err; lockdep_assert_held(&rdev->wiphy.mtx); wdev = __cfg80211_wdev_from_attrs(rdev, genl_info_net(info), info->attrs); if (!rdev->ops->testmode_cmd) return -EOPNOTSUPP; if (IS_ERR(wdev)) { err = PTR_ERR(wdev); if (err != -EINVAL) return err; wdev = NULL; } else if (wdev->wiphy != &rdev->wiphy) { return -EINVAL; } if (!info->attrs[NL80211_ATTR_TESTDATA]) return -EINVAL; rdev->cur_cmd_info = info; err = rdev_testmode_cmd(rdev, wdev, nla_data(info->attrs[NL80211_ATTR_TESTDATA]), nla_len(info->attrs[NL80211_ATTR_TESTDATA])); rdev->cur_cmd_info = NULL; return err; } static int nl80211_testmode_dump(struct sk_buff *skb, struct netlink_callback *cb) { struct cfg80211_registered_device *rdev; struct nlattr **attrbuf = NULL; int err; long phy_idx; void *data = NULL; int data_len = 0; rtnl_lock(); if (cb->args[0]) { /* * 0 is a valid index, but not valid for args[0], * so we need to offset by 1. */ phy_idx = cb->args[0] - 1; rdev = cfg80211_rdev_by_wiphy_idx(phy_idx); if (!rdev) { err = -ENOENT; goto out_err; } } else { attrbuf = kcalloc(NUM_NL80211_ATTR, sizeof(*attrbuf), GFP_KERNEL); if (!attrbuf) { err = -ENOMEM; goto out_err; } err = nlmsg_parse_deprecated(cb->nlh, GENL_HDRLEN + nl80211_fam.hdrsize, attrbuf, nl80211_fam.maxattr, nl80211_policy, NULL); if (err) goto out_err; rdev = __cfg80211_rdev_from_attrs(sock_net(skb->sk), attrbuf); if (IS_ERR(rdev)) { err = PTR_ERR(rdev); goto out_err; } phy_idx = rdev->wiphy_idx; if (attrbuf[NL80211_ATTR_TESTDATA]) cb->args[1] = (long)attrbuf[NL80211_ATTR_TESTDATA]; } if (cb->args[1]) { data = nla_data((void *)cb->args[1]); data_len = nla_len((void *)cb->args[1]); } if (!rdev->ops->testmode_dump) { err = -EOPNOTSUPP; goto out_err; } while (1) { void *hdr = nl80211hdr_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, NLM_F_MULTI, NL80211_CMD_TESTMODE); struct nlattr *tmdata; if (!hdr) break; if (nla_put_u32(skb, NL80211_ATTR_WIPHY, phy_idx)) { genlmsg_cancel(skb, hdr); break; } tmdata = nla_nest_start_noflag(skb, NL80211_ATTR_TESTDATA); if (!tmdata) { genlmsg_cancel(skb, hdr); break; } err = rdev_testmode_dump(rdev, skb, cb, data, data_len); nla_nest_end(skb, tmdata); if (err == -ENOBUFS || err == -ENOENT) { genlmsg_cancel(skb, hdr); break; } else if (err) { genlmsg_cancel(skb, hdr); goto out_err; } genlmsg_end(skb, hdr); } err = skb->len; /* see above */ cb->args[0] = phy_idx + 1; out_err: kfree(attrbuf); rtnl_unlock(); return err; } #endif static int nl80211_connect(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct net_device *dev = info->user_ptr[1]; struct cfg80211_connect_params connect; struct wiphy *wiphy; struct cfg80211_cached_keys *connkeys = NULL; u32 freq = 0; int err; memset(&connect, 0, sizeof(connect)); if (!info->attrs[NL80211_ATTR_SSID] || !nla_len(info->attrs[NL80211_ATTR_SSID])) return -EINVAL; if (info->attrs[NL80211_ATTR_AUTH_TYPE]) { connect.auth_type = nla_get_u32(info->attrs[NL80211_ATTR_AUTH_TYPE]); if (!nl80211_valid_auth_type(rdev, connect.auth_type, NL80211_CMD_CONNECT)) return -EINVAL; } else connect.auth_type = NL80211_AUTHTYPE_AUTOMATIC; connect.privacy = info->attrs[NL80211_ATTR_PRIVACY]; if (info->attrs[NL80211_ATTR_WANT_1X_4WAY_HS] && !wiphy_ext_feature_isset(&rdev->wiphy, NL80211_EXT_FEATURE_4WAY_HANDSHAKE_STA_1X)) return -EINVAL; connect.want_1x = info->attrs[NL80211_ATTR_WANT_1X_4WAY_HS]; err = nl80211_crypto_settings(rdev, info, &connect.crypto, NL80211_MAX_NR_CIPHER_SUITES); if (err) return err; if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_STATION && dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_CLIENT) return -EOPNOTSUPP; wiphy = &rdev->wiphy; connect.bg_scan_period = -1; if (info->attrs[NL80211_ATTR_BG_SCAN_PERIOD] && (wiphy->flags & WIPHY_FLAG_SUPPORTS_FW_ROAM)) { connect.bg_scan_period = nla_get_u16(info->attrs[NL80211_ATTR_BG_SCAN_PERIOD]); } if (info->attrs[NL80211_ATTR_MAC]) connect.bssid = nla_data(info->attrs[NL80211_ATTR_MAC]); else if (info->attrs[NL80211_ATTR_MAC_HINT]) connect.bssid_hint = nla_data(info->attrs[NL80211_ATTR_MAC_HINT]); connect.ssid = nla_data(info->attrs[NL80211_ATTR_SSID]); connect.ssid_len = nla_len(info->attrs[NL80211_ATTR_SSID]); if (info->attrs[NL80211_ATTR_IE]) { connect.ie = nla_data(info->attrs[NL80211_ATTR_IE]); connect.ie_len = nla_len(info->attrs[NL80211_ATTR_IE]); } if (info->attrs[NL80211_ATTR_USE_MFP]) { connect.mfp = nla_get_u32(info->attrs[NL80211_ATTR_USE_MFP]); if (connect.mfp == NL80211_MFP_OPTIONAL && !wiphy_ext_feature_isset(&rdev->wiphy, NL80211_EXT_FEATURE_MFP_OPTIONAL)) return -EOPNOTSUPP; } else { connect.mfp = NL80211_MFP_NO; } if (info->attrs[NL80211_ATTR_PREV_BSSID]) connect.prev_bssid = nla_data(info->attrs[NL80211_ATTR_PREV_BSSID]); if (info->attrs[NL80211_ATTR_WIPHY_FREQ]) freq = MHZ_TO_KHZ(nla_get_u32( info->attrs[NL80211_ATTR_WIPHY_FREQ])); if (info->attrs[NL80211_ATTR_WIPHY_FREQ_OFFSET]) freq += nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ_OFFSET]); if (freq) { connect.channel = nl80211_get_valid_chan(wiphy, freq); if (!connect.channel) return -EINVAL; } else if (info->attrs[NL80211_ATTR_WIPHY_FREQ_HINT]) { freq = nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ_HINT]); freq = MHZ_TO_KHZ(freq); connect.channel_hint = nl80211_get_valid_chan(wiphy, freq); if (!connect.channel_hint) return -EINVAL; } if (info->attrs[NL80211_ATTR_WIPHY_EDMG_CHANNELS]) { connect.edmg.channels = nla_get_u8(info->attrs[NL80211_ATTR_WIPHY_EDMG_CHANNELS]); if (info->attrs[NL80211_ATTR_WIPHY_EDMG_BW_CONFIG]) connect.edmg.bw_config = nla_get_u8(info->attrs[NL80211_ATTR_WIPHY_EDMG_BW_CONFIG]); } if (connect.privacy && info->attrs[NL80211_ATTR_KEYS]) { connkeys = nl80211_parse_connkeys(rdev, info, NULL); if (IS_ERR(connkeys)) return PTR_ERR(connkeys); } if (nla_get_flag(info->attrs[NL80211_ATTR_DISABLE_HT])) connect.flags |= ASSOC_REQ_DISABLE_HT; if (info->attrs[NL80211_ATTR_HT_CAPABILITY_MASK]) memcpy(&connect.ht_capa_mask, nla_data(info->attrs[NL80211_ATTR_HT_CAPABILITY_MASK]), sizeof(connect.ht_capa_mask)); if (info->attrs[NL80211_ATTR_HT_CAPABILITY]) { if (!info->attrs[NL80211_ATTR_HT_CAPABILITY_MASK]) { kfree_sensitive(connkeys); return -EINVAL; } memcpy(&connect.ht_capa, nla_data(info->attrs[NL80211_ATTR_HT_CAPABILITY]), sizeof(connect.ht_capa)); } if (nla_get_flag(info->attrs[NL80211_ATTR_DISABLE_VHT])) connect.flags |= ASSOC_REQ_DISABLE_VHT; if (nla_get_flag(info->attrs[NL80211_ATTR_DISABLE_HE])) connect.flags |= ASSOC_REQ_DISABLE_HE; if (nla_get_flag(info->attrs[NL80211_ATTR_DISABLE_EHT])) connect.flags |= ASSOC_REQ_DISABLE_EHT; if (info->attrs[NL80211_ATTR_VHT_CAPABILITY_MASK]) memcpy(&connect.vht_capa_mask, nla_data(info->attrs[NL80211_ATTR_VHT_CAPABILITY_MASK]), sizeof(connect.vht_capa_mask)); if (info->attrs[NL80211_ATTR_VHT_CAPABILITY]) { if (!info->attrs[NL80211_ATTR_VHT_CAPABILITY_MASK]) { kfree_sensitive(connkeys); return -EINVAL; } memcpy(&connect.vht_capa, nla_data(info->attrs[NL80211_ATTR_VHT_CAPABILITY]), sizeof(connect.vht_capa)); } if (nla_get_flag(info->attrs[NL80211_ATTR_USE_RRM])) { if (!((rdev->wiphy.features & NL80211_FEATURE_DS_PARAM_SET_IE_IN_PROBES) && (rdev->wiphy.features & NL80211_FEATURE_QUIET)) && !wiphy_ext_feature_isset(&rdev->wiphy, NL80211_EXT_FEATURE_RRM)) { kfree_sensitive(connkeys); return -EINVAL; } connect.flags |= ASSOC_REQ_USE_RRM; } connect.pbss = nla_get_flag(info->attrs[NL80211_ATTR_PBSS]); if (connect.pbss && !rdev->wiphy.bands[NL80211_BAND_60GHZ]) { kfree_sensitive(connkeys); return -EOPNOTSUPP; } if (info->attrs[NL80211_ATTR_BSS_SELECT]) { /* bss selection makes no sense if bssid is set */ if (connect.bssid) { kfree_sensitive(connkeys); return -EINVAL; } err = parse_bss_select(info->attrs[NL80211_ATTR_BSS_SELECT], wiphy, &connect.bss_select); if (err) { kfree_sensitive(connkeys); return err; } } if (wiphy_ext_feature_isset(&rdev->wiphy, NL80211_EXT_FEATURE_FILS_SK_OFFLOAD) && info->attrs[NL80211_ATTR_FILS_ERP_USERNAME] && info->attrs[NL80211_ATTR_FILS_ERP_REALM] && info->attrs[NL80211_ATTR_FILS_ERP_NEXT_SEQ_NUM] && info->attrs[NL80211_ATTR_FILS_ERP_RRK]) { connect.fils_erp_username = nla_data(info->attrs[NL80211_ATTR_FILS_ERP_USERNAME]); connect.fils_erp_username_len = nla_len(info->attrs[NL80211_ATTR_FILS_ERP_USERNAME]); connect.fils_erp_realm = nla_data(info->attrs[NL80211_ATTR_FILS_ERP_REALM]); connect.fils_erp_realm_len = nla_len(info->attrs[NL80211_ATTR_FILS_ERP_REALM]); connect.fils_erp_next_seq_num = nla_get_u16( info->attrs[NL80211_ATTR_FILS_ERP_NEXT_SEQ_NUM]); connect.fils_erp_rrk = nla_data(info->attrs[NL80211_ATTR_FILS_ERP_RRK]); connect.fils_erp_rrk_len = nla_len(info->attrs[NL80211_ATTR_FILS_ERP_RRK]); } else if (info->attrs[NL80211_ATTR_FILS_ERP_USERNAME] || info->attrs[NL80211_ATTR_FILS_ERP_REALM] || info->attrs[NL80211_ATTR_FILS_ERP_NEXT_SEQ_NUM] || info->attrs[NL80211_ATTR_FILS_ERP_RRK]) { kfree_sensitive(connkeys); return -EINVAL; } if (nla_get_flag(info->attrs[NL80211_ATTR_EXTERNAL_AUTH_SUPPORT])) { if (!info->attrs[NL80211_ATTR_SOCKET_OWNER]) { kfree_sensitive(connkeys); GENL_SET_ERR_MSG(info, "external auth requires connection ownership"); return -EINVAL; } connect.flags |= CONNECT_REQ_EXTERNAL_AUTH_SUPPORT; } if (nla_get_flag(info->attrs[NL80211_ATTR_MLO_SUPPORT])) connect.flags |= CONNECT_REQ_MLO_SUPPORT; wdev_lock(dev->ieee80211_ptr); err = cfg80211_connect(rdev, dev, &connect, connkeys, connect.prev_bssid); if (err) kfree_sensitive(connkeys); if (!err && info->attrs[NL80211_ATTR_SOCKET_OWNER]) { dev->ieee80211_ptr->conn_owner_nlportid = info->snd_portid; if (connect.bssid) memcpy(dev->ieee80211_ptr->disconnect_bssid, connect.bssid, ETH_ALEN); else eth_zero_addr(dev->ieee80211_ptr->disconnect_bssid); } wdev_unlock(dev->ieee80211_ptr); return err; } static int nl80211_update_connect_params(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_connect_params connect = {}; struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct net_device *dev = info->user_ptr[1]; struct wireless_dev *wdev = dev->ieee80211_ptr; bool fils_sk_offload; u32 auth_type; u32 changed = 0; int ret; if (!rdev->ops->update_connect_params) return -EOPNOTSUPP; if (info->attrs[NL80211_ATTR_IE]) { connect.ie = nla_data(info->attrs[NL80211_ATTR_IE]); connect.ie_len = nla_len(info->attrs[NL80211_ATTR_IE]); changed |= UPDATE_ASSOC_IES; } fils_sk_offload = wiphy_ext_feature_isset(&rdev->wiphy, NL80211_EXT_FEATURE_FILS_SK_OFFLOAD); /* * when driver supports fils-sk offload all attributes must be * provided. So the else covers "fils-sk-not-all" and * "no-fils-sk-any". */ if (fils_sk_offload && info->attrs[NL80211_ATTR_FILS_ERP_USERNAME] && info->attrs[NL80211_ATTR_FILS_ERP_REALM] && info->attrs[NL80211_ATTR_FILS_ERP_NEXT_SEQ_NUM] && info->attrs[NL80211_ATTR_FILS_ERP_RRK]) { connect.fils_erp_username = nla_data(info->attrs[NL80211_ATTR_FILS_ERP_USERNAME]); connect.fils_erp_username_len = nla_len(info->attrs[NL80211_ATTR_FILS_ERP_USERNAME]); connect.fils_erp_realm = nla_data(info->attrs[NL80211_ATTR_FILS_ERP_REALM]); connect.fils_erp_realm_len = nla_len(info->attrs[NL80211_ATTR_FILS_ERP_REALM]); connect.fils_erp_next_seq_num = nla_get_u16( info->attrs[NL80211_ATTR_FILS_ERP_NEXT_SEQ_NUM]); connect.fils_erp_rrk = nla_data(info->attrs[NL80211_ATTR_FILS_ERP_RRK]); connect.fils_erp_rrk_len = nla_len(info->attrs[NL80211_ATTR_FILS_ERP_RRK]); changed |= UPDATE_FILS_ERP_INFO; } else if (info->attrs[NL80211_ATTR_FILS_ERP_USERNAME] || info->attrs[NL80211_ATTR_FILS_ERP_REALM] || info->attrs[NL80211_ATTR_FILS_ERP_NEXT_SEQ_NUM] || info->attrs[NL80211_ATTR_FILS_ERP_RRK]) { return -EINVAL; } if (info->attrs[NL80211_ATTR_AUTH_TYPE]) { auth_type = nla_get_u32(info->attrs[NL80211_ATTR_AUTH_TYPE]); if (!nl80211_valid_auth_type(rdev, auth_type, NL80211_CMD_CONNECT)) return -EINVAL; if (auth_type == NL80211_AUTHTYPE_FILS_SK && fils_sk_offload && !(changed & UPDATE_FILS_ERP_INFO)) return -EINVAL; connect.auth_type = auth_type; changed |= UPDATE_AUTH_TYPE; } wdev_lock(dev->ieee80211_ptr); if (!wdev->connected) ret = -ENOLINK; else ret = rdev_update_connect_params(rdev, dev, &connect, changed); wdev_unlock(dev->ieee80211_ptr); return ret; } static int nl80211_disconnect(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct net_device *dev = info->user_ptr[1]; u16 reason; int ret; if (dev->ieee80211_ptr->conn_owner_nlportid && dev->ieee80211_ptr->conn_owner_nlportid != info->snd_portid) return -EPERM; if (!info->attrs[NL80211_ATTR_REASON_CODE]) reason = WLAN_REASON_DEAUTH_LEAVING; else reason = nla_get_u16(info->attrs[NL80211_ATTR_REASON_CODE]); if (reason == 0) return -EINVAL; if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_STATION && dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_CLIENT) return -EOPNOTSUPP; wdev_lock(dev->ieee80211_ptr); ret = cfg80211_disconnect(rdev, dev, reason, true); wdev_unlock(dev->ieee80211_ptr); return ret; } static int nl80211_wiphy_netns(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct net *net; int err; if (info->attrs[NL80211_ATTR_PID]) { u32 pid = nla_get_u32(info->attrs[NL80211_ATTR_PID]); net = get_net_ns_by_pid(pid); } else if (info->attrs[NL80211_ATTR_NETNS_FD]) { u32 fd = nla_get_u32(info->attrs[NL80211_ATTR_NETNS_FD]); net = get_net_ns_by_fd(fd); } else { return -EINVAL; } if (IS_ERR(net)) return PTR_ERR(net); err = 0; /* check if anything to do */ if (!net_eq(wiphy_net(&rdev->wiphy), net)) err = cfg80211_switch_netns(rdev, net); put_net(net); return err; } static int nl80211_setdel_pmksa(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; int (*rdev_ops)(struct wiphy *wiphy, struct net_device *dev, struct cfg80211_pmksa *pmksa) = NULL; struct net_device *dev = info->user_ptr[1]; struct cfg80211_pmksa pmksa; memset(&pmksa, 0, sizeof(struct cfg80211_pmksa)); if (!info->attrs[NL80211_ATTR_PMKID]) return -EINVAL; pmksa.pmkid = nla_data(info->attrs[NL80211_ATTR_PMKID]); if (info->attrs[NL80211_ATTR_MAC]) { pmksa.bssid = nla_data(info->attrs[NL80211_ATTR_MAC]); } else if (info->attrs[NL80211_ATTR_SSID] && info->attrs[NL80211_ATTR_FILS_CACHE_ID] && (info->genlhdr->cmd == NL80211_CMD_DEL_PMKSA || info->attrs[NL80211_ATTR_PMK])) { pmksa.ssid = nla_data(info->attrs[NL80211_ATTR_SSID]); pmksa.ssid_len = nla_len(info->attrs[NL80211_ATTR_SSID]); pmksa.cache_id = nla_data(info->attrs[NL80211_ATTR_FILS_CACHE_ID]); } else { return -EINVAL; } if (info->attrs[NL80211_ATTR_PMK]) { pmksa.pmk = nla_data(info->attrs[NL80211_ATTR_PMK]); pmksa.pmk_len = nla_len(info->attrs[NL80211_ATTR_PMK]); } if (info->attrs[NL80211_ATTR_PMK_LIFETIME]) pmksa.pmk_lifetime = nla_get_u32(info->attrs[NL80211_ATTR_PMK_LIFETIME]); if (info->attrs[NL80211_ATTR_PMK_REAUTH_THRESHOLD]) pmksa.pmk_reauth_threshold = nla_get_u8( info->attrs[NL80211_ATTR_PMK_REAUTH_THRESHOLD]); if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_STATION && dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_CLIENT && !(dev->ieee80211_ptr->iftype == NL80211_IFTYPE_AP && wiphy_ext_feature_isset(&rdev->wiphy, NL80211_EXT_FEATURE_AP_PMKSA_CACHING))) return -EOPNOTSUPP; switch (info->genlhdr->cmd) { case NL80211_CMD_SET_PMKSA: rdev_ops = rdev->ops->set_pmksa; break; case NL80211_CMD_DEL_PMKSA: rdev_ops = rdev->ops->del_pmksa; break; default: WARN_ON(1); break; } if (!rdev_ops) return -EOPNOTSUPP; return rdev_ops(&rdev->wiphy, dev, &pmksa); } static int nl80211_flush_pmksa(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct net_device *dev = info->user_ptr[1]; if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_STATION && dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_CLIENT) return -EOPNOTSUPP; if (!rdev->ops->flush_pmksa) return -EOPNOTSUPP; return rdev_flush_pmksa(rdev, dev); } static int nl80211_tdls_mgmt(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct net_device *dev = info->user_ptr[1]; u8 action_code, dialog_token; u32 peer_capability = 0; u16 status_code; u8 *peer; int link_id; bool initiator; if (!(rdev->wiphy.flags & WIPHY_FLAG_SUPPORTS_TDLS) || !rdev->ops->tdls_mgmt) return -EOPNOTSUPP; if (!info->attrs[NL80211_ATTR_TDLS_ACTION] || !info->attrs[NL80211_ATTR_STATUS_CODE] || !info->attrs[NL80211_ATTR_TDLS_DIALOG_TOKEN] || !info->attrs[NL80211_ATTR_IE] || !info->attrs[NL80211_ATTR_MAC]) return -EINVAL; peer = nla_data(info->attrs[NL80211_ATTR_MAC]); action_code = nla_get_u8(info->attrs[NL80211_ATTR_TDLS_ACTION]); status_code = nla_get_u16(info->attrs[NL80211_ATTR_STATUS_CODE]); dialog_token = nla_get_u8(info->attrs[NL80211_ATTR_TDLS_DIALOG_TOKEN]); initiator = nla_get_flag(info->attrs[NL80211_ATTR_TDLS_INITIATOR]); if (info->attrs[NL80211_ATTR_TDLS_PEER_CAPABILITY]) peer_capability = nla_get_u32(info->attrs[NL80211_ATTR_TDLS_PEER_CAPABILITY]); link_id = nl80211_link_id_or_invalid(info->attrs); return rdev_tdls_mgmt(rdev, dev, peer, link_id, action_code, dialog_token, status_code, peer_capability, initiator, nla_data(info->attrs[NL80211_ATTR_IE]), nla_len(info->attrs[NL80211_ATTR_IE])); } static int nl80211_tdls_oper(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct net_device *dev = info->user_ptr[1]; enum nl80211_tdls_operation operation; u8 *peer; if (!(rdev->wiphy.flags & WIPHY_FLAG_SUPPORTS_TDLS) || !rdev->ops->tdls_oper) return -EOPNOTSUPP; if (!info->attrs[NL80211_ATTR_TDLS_OPERATION] || !info->attrs[NL80211_ATTR_MAC]) return -EINVAL; operation = nla_get_u8(info->attrs[NL80211_ATTR_TDLS_OPERATION]); peer = nla_data(info->attrs[NL80211_ATTR_MAC]); return rdev_tdls_oper(rdev, dev, peer, operation); } static int nl80211_remain_on_channel(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; unsigned int link_id = nl80211_link_id(info->attrs); struct wireless_dev *wdev = info->user_ptr[1]; struct cfg80211_chan_def chandef; struct sk_buff *msg; void *hdr; u64 cookie; u32 duration; int err; if (!info->attrs[NL80211_ATTR_WIPHY_FREQ] || !info->attrs[NL80211_ATTR_DURATION]) return -EINVAL; duration = nla_get_u32(info->attrs[NL80211_ATTR_DURATION]); if (!rdev->ops->remain_on_channel || !(rdev->wiphy.flags & WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL)) return -EOPNOTSUPP; /* * We should be on that channel for at least a minimum amount of * time (10ms) but no longer than the driver supports. */ if (duration < NL80211_MIN_REMAIN_ON_CHANNEL_TIME || duration > rdev->wiphy.max_remain_on_channel_duration) return -EINVAL; err = nl80211_parse_chandef(rdev, info, &chandef); if (err) return err; wdev_lock(wdev); if (!cfg80211_off_channel_oper_allowed(wdev, chandef.chan)) { const struct cfg80211_chan_def *oper_chandef, *compat_chandef; oper_chandef = wdev_chandef(wdev, link_id); if (WARN_ON(!oper_chandef)) { /* cannot happen since we must beacon to get here */ WARN_ON(1); wdev_unlock(wdev); return -EBUSY; } /* note: returns first one if identical chandefs */ compat_chandef = cfg80211_chandef_compatible(&chandef, oper_chandef); if (compat_chandef != &chandef) { wdev_unlock(wdev); return -EBUSY; } } wdev_unlock(wdev); msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) return -ENOMEM; hdr = nl80211hdr_put(msg, info->snd_portid, info->snd_seq, 0, NL80211_CMD_REMAIN_ON_CHANNEL); if (!hdr) { err = -ENOBUFS; goto free_msg; } err = rdev_remain_on_channel(rdev, wdev, chandef.chan, duration, &cookie); if (err) goto free_msg; if (nla_put_u64_64bit(msg, NL80211_ATTR_COOKIE, cookie, NL80211_ATTR_PAD)) goto nla_put_failure; genlmsg_end(msg, hdr); return genlmsg_reply(msg, info); nla_put_failure: err = -ENOBUFS; free_msg: nlmsg_free(msg); return err; } static int nl80211_cancel_remain_on_channel(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct wireless_dev *wdev = info->user_ptr[1]; u64 cookie; if (!info->attrs[NL80211_ATTR_COOKIE]) return -EINVAL; if (!rdev->ops->cancel_remain_on_channel) return -EOPNOTSUPP; cookie = nla_get_u64(info->attrs[NL80211_ATTR_COOKIE]); return rdev_cancel_remain_on_channel(rdev, wdev, cookie); } static int nl80211_set_tx_bitrate_mask(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_bitrate_mask mask; unsigned int link_id = nl80211_link_id(info->attrs); struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct net_device *dev = info->user_ptr[1]; struct wireless_dev *wdev = dev->ieee80211_ptr; int err; if (!rdev->ops->set_bitrate_mask) return -EOPNOTSUPP; wdev_lock(wdev); err = nl80211_parse_tx_bitrate_mask(info, info->attrs, NL80211_ATTR_TX_RATES, &mask, dev, true, link_id); if (err) goto out; err = rdev_set_bitrate_mask(rdev, dev, link_id, NULL, &mask); out: wdev_unlock(wdev); return err; } static int nl80211_register_mgmt(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct wireless_dev *wdev = info->user_ptr[1]; u16 frame_type = IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_ACTION; if (!info->attrs[NL80211_ATTR_FRAME_MATCH]) return -EINVAL; if (info->attrs[NL80211_ATTR_FRAME_TYPE]) frame_type = nla_get_u16(info->attrs[NL80211_ATTR_FRAME_TYPE]); switch (wdev->iftype) { case NL80211_IFTYPE_STATION: case NL80211_IFTYPE_ADHOC: case NL80211_IFTYPE_P2P_CLIENT: case NL80211_IFTYPE_AP: case NL80211_IFTYPE_AP_VLAN: case NL80211_IFTYPE_MESH_POINT: case NL80211_IFTYPE_P2P_GO: case NL80211_IFTYPE_P2P_DEVICE: break; case NL80211_IFTYPE_NAN: if (!wiphy_ext_feature_isset(wdev->wiphy, NL80211_EXT_FEATURE_SECURE_NAN)) return -EOPNOTSUPP; break; default: return -EOPNOTSUPP; } /* not much point in registering if we can't reply */ if (!rdev->ops->mgmt_tx) return -EOPNOTSUPP; if (info->attrs[NL80211_ATTR_RECEIVE_MULTICAST] && !wiphy_ext_feature_isset(&rdev->wiphy, NL80211_EXT_FEATURE_MULTICAST_REGISTRATIONS)) { GENL_SET_ERR_MSG(info, "multicast RX registrations are not supported"); return -EOPNOTSUPP; } return cfg80211_mlme_register_mgmt(wdev, info->snd_portid, frame_type, nla_data(info->attrs[NL80211_ATTR_FRAME_MATCH]), nla_len(info->attrs[NL80211_ATTR_FRAME_MATCH]), info->attrs[NL80211_ATTR_RECEIVE_MULTICAST], info->extack); } static int nl80211_tx_mgmt(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct wireless_dev *wdev = info->user_ptr[1]; struct cfg80211_chan_def chandef; int err; void *hdr = NULL; u64 cookie; struct sk_buff *msg = NULL; struct cfg80211_mgmt_tx_params params = { .dont_wait_for_ack = info->attrs[NL80211_ATTR_DONT_WAIT_FOR_ACK], }; if (!info->attrs[NL80211_ATTR_FRAME]) return -EINVAL; if (!rdev->ops->mgmt_tx) return -EOPNOTSUPP; switch (wdev->iftype) { case NL80211_IFTYPE_P2P_DEVICE: if (!info->attrs[NL80211_ATTR_WIPHY_FREQ]) return -EINVAL; break; case NL80211_IFTYPE_STATION: case NL80211_IFTYPE_ADHOC: case NL80211_IFTYPE_P2P_CLIENT: case NL80211_IFTYPE_AP: case NL80211_IFTYPE_AP_VLAN: case NL80211_IFTYPE_MESH_POINT: case NL80211_IFTYPE_P2P_GO: break; case NL80211_IFTYPE_NAN: if (!wiphy_ext_feature_isset(wdev->wiphy, NL80211_EXT_FEATURE_SECURE_NAN)) return -EOPNOTSUPP; break; default: return -EOPNOTSUPP; } if (info->attrs[NL80211_ATTR_DURATION]) { if (!(rdev->wiphy.flags & WIPHY_FLAG_OFFCHAN_TX)) return -EINVAL; params.wait = nla_get_u32(info->attrs[NL80211_ATTR_DURATION]); /* * We should wait on the channel for at least a minimum amount * of time (10ms) but no longer than the driver supports. */ if (params.wait < NL80211_MIN_REMAIN_ON_CHANNEL_TIME || params.wait > rdev->wiphy.max_remain_on_channel_duration) return -EINVAL; } params.offchan = info->attrs[NL80211_ATTR_OFFCHANNEL_TX_OK]; if (params.offchan && !(rdev->wiphy.flags & WIPHY_FLAG_OFFCHAN_TX)) return -EINVAL; params.no_cck = nla_get_flag(info->attrs[NL80211_ATTR_TX_NO_CCK_RATE]); /* get the channel if any has been specified, otherwise pass NULL to * the driver. The latter will use the current one */ chandef.chan = NULL; if (info->attrs[NL80211_ATTR_WIPHY_FREQ]) { err = nl80211_parse_chandef(rdev, info, &chandef); if (err) return err; } if (!chandef.chan && params.offchan) return -EINVAL; wdev_lock(wdev); if (params.offchan && !cfg80211_off_channel_oper_allowed(wdev, chandef.chan)) { wdev_unlock(wdev); return -EBUSY; } params.link_id = nl80211_link_id_or_invalid(info->attrs); /* * This now races due to the unlock, but we cannot check * the valid links for the _station_ anyway, so that's up * to the driver. */ if (params.link_id >= 0 && !(wdev->valid_links & BIT(params.link_id))) { wdev_unlock(wdev); return -EINVAL; } wdev_unlock(wdev); params.buf = nla_data(info->attrs[NL80211_ATTR_FRAME]); params.len = nla_len(info->attrs[NL80211_ATTR_FRAME]); if (info->attrs[NL80211_ATTR_CSA_C_OFFSETS_TX]) { int len = nla_len(info->attrs[NL80211_ATTR_CSA_C_OFFSETS_TX]); int i; if (len % sizeof(u16)) return -EINVAL; params.n_csa_offsets = len / sizeof(u16); params.csa_offsets = nla_data(info->attrs[NL80211_ATTR_CSA_C_OFFSETS_TX]); /* check that all the offsets fit the frame */ for (i = 0; i < params.n_csa_offsets; i++) { if (params.csa_offsets[i] >= params.len) return -EINVAL; } } if (!params.dont_wait_for_ack) { msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) return -ENOMEM; hdr = nl80211hdr_put(msg, info->snd_portid, info->snd_seq, 0, NL80211_CMD_FRAME); if (!hdr) { err = -ENOBUFS; goto free_msg; } } params.chan = chandef.chan; err = cfg80211_mlme_mgmt_tx(rdev, wdev, &params, &cookie); if (err) goto free_msg; if (msg) { if (nla_put_u64_64bit(msg, NL80211_ATTR_COOKIE, cookie, NL80211_ATTR_PAD)) goto nla_put_failure; genlmsg_end(msg, hdr); return genlmsg_reply(msg, info); } return 0; nla_put_failure: err = -ENOBUFS; free_msg: nlmsg_free(msg); return err; } static int nl80211_tx_mgmt_cancel_wait(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct wireless_dev *wdev = info->user_ptr[1]; u64 cookie; if (!info->attrs[NL80211_ATTR_COOKIE]) return -EINVAL; if (!rdev->ops->mgmt_tx_cancel_wait) return -EOPNOTSUPP; switch (wdev->iftype) { case NL80211_IFTYPE_STATION: case NL80211_IFTYPE_ADHOC: case NL80211_IFTYPE_P2P_CLIENT: case NL80211_IFTYPE_AP: case NL80211_IFTYPE_AP_VLAN: case NL80211_IFTYPE_P2P_GO: case NL80211_IFTYPE_P2P_DEVICE: break; case NL80211_IFTYPE_NAN: if (!wiphy_ext_feature_isset(wdev->wiphy, NL80211_EXT_FEATURE_SECURE_NAN)) return -EOPNOTSUPP; break; default: return -EOPNOTSUPP; } cookie = nla_get_u64(info->attrs[NL80211_ATTR_COOKIE]); return rdev_mgmt_tx_cancel_wait(rdev, wdev, cookie); } static int nl80211_set_power_save(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct wireless_dev *wdev; struct net_device *dev = info->user_ptr[1]; u8 ps_state; bool state; int err; if (!info->attrs[NL80211_ATTR_PS_STATE]) return -EINVAL; ps_state = nla_get_u32(info->attrs[NL80211_ATTR_PS_STATE]); wdev = dev->ieee80211_ptr; if (!rdev->ops->set_power_mgmt) return -EOPNOTSUPP; state = (ps_state == NL80211_PS_ENABLED) ? true : false; if (state == wdev->ps) return 0; err = rdev_set_power_mgmt(rdev, dev, state, wdev->ps_timeout); if (!err) wdev->ps = state; return err; } static int nl80211_get_power_save(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; enum nl80211_ps_state ps_state; struct wireless_dev *wdev; struct net_device *dev = info->user_ptr[1]; struct sk_buff *msg; void *hdr; int err; wdev = dev->ieee80211_ptr; if (!rdev->ops->set_power_mgmt) return -EOPNOTSUPP; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) return -ENOMEM; hdr = nl80211hdr_put(msg, info->snd_portid, info->snd_seq, 0, NL80211_CMD_GET_POWER_SAVE); if (!hdr) { err = -ENOBUFS; goto free_msg; } if (wdev->ps) ps_state = NL80211_PS_ENABLED; else ps_state = NL80211_PS_DISABLED; if (nla_put_u32(msg, NL80211_ATTR_PS_STATE, ps_state)) goto nla_put_failure; genlmsg_end(msg, hdr); return genlmsg_reply(msg, info); nla_put_failure: err = -ENOBUFS; free_msg: nlmsg_free(msg); return err; } static const struct nla_policy nl80211_attr_cqm_policy[NL80211_ATTR_CQM_MAX + 1] = { [NL80211_ATTR_CQM_RSSI_THOLD] = { .type = NLA_BINARY }, [NL80211_ATTR_CQM_RSSI_HYST] = { .type = NLA_U32 }, [NL80211_ATTR_CQM_RSSI_THRESHOLD_EVENT] = { .type = NLA_U32 }, [NL80211_ATTR_CQM_TXE_RATE] = { .type = NLA_U32 }, [NL80211_ATTR_CQM_TXE_PKTS] = { .type = NLA_U32 }, [NL80211_ATTR_CQM_TXE_INTVL] = { .type = NLA_U32 }, [NL80211_ATTR_CQM_RSSI_LEVEL] = { .type = NLA_S32 }, }; static int nl80211_set_cqm_txe(struct genl_info *info, u32 rate, u32 pkts, u32 intvl) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct net_device *dev = info->user_ptr[1]; struct wireless_dev *wdev = dev->ieee80211_ptr; if (rate > 100 || intvl > NL80211_CQM_TXE_MAX_INTVL) return -EINVAL; if (!rdev->ops->set_cqm_txe_config) return -EOPNOTSUPP; if (wdev->iftype != NL80211_IFTYPE_STATION && wdev->iftype != NL80211_IFTYPE_P2P_CLIENT) return -EOPNOTSUPP; return rdev_set_cqm_txe_config(rdev, dev, rate, pkts, intvl); } static int cfg80211_cqm_rssi_update(struct cfg80211_registered_device *rdev, struct net_device *dev) { struct wireless_dev *wdev = dev->ieee80211_ptr; s32 last, low, high; u32 hyst; int i, n, low_index; int err; /* RSSI reporting disabled? */ if (!wdev->cqm_config) return rdev_set_cqm_rssi_range_config(rdev, dev, 0, 0); /* * Obtain current RSSI value if possible, if not and no RSSI threshold * event has been received yet, we should receive an event after a * connection is established and enough beacons received to calculate * the average. */ if (!wdev->cqm_config->last_rssi_event_value && wdev->links[0].client.current_bss && rdev->ops->get_station) { struct station_info sinfo = {}; u8 *mac_addr; mac_addr = wdev->links[0].client.current_bss->pub.bssid; err = rdev_get_station(rdev, dev, mac_addr, &sinfo); if (err) return err; cfg80211_sinfo_release_content(&sinfo); if (sinfo.filled & BIT_ULL(NL80211_STA_INFO_BEACON_SIGNAL_AVG)) wdev->cqm_config->last_rssi_event_value = (s8) sinfo.rx_beacon_signal_avg; } last = wdev->cqm_config->last_rssi_event_value; hyst = wdev->cqm_config->rssi_hyst; n = wdev->cqm_config->n_rssi_thresholds; for (i = 0; i < n; i++) { i = array_index_nospec(i, n); if (last < wdev->cqm_config->rssi_thresholds[i]) break; } low_index = i - 1; if (low_index >= 0) { low_index = array_index_nospec(low_index, n); low = wdev->cqm_config->rssi_thresholds[low_index] - hyst; } else { low = S32_MIN; } if (i < n) { i = array_index_nospec(i, n); high = wdev->cqm_config->rssi_thresholds[i] + hyst - 1; } else { high = S32_MAX; } return rdev_set_cqm_rssi_range_config(rdev, dev, low, high); } static int nl80211_set_cqm_rssi(struct genl_info *info, const s32 *thresholds, int n_thresholds, u32 hysteresis) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct net_device *dev = info->user_ptr[1]; struct wireless_dev *wdev = dev->ieee80211_ptr; int i, err; s32 prev = S32_MIN; /* Check all values negative and sorted */ for (i = 0; i < n_thresholds; i++) { if (thresholds[i] > 0 || thresholds[i] <= prev) return -EINVAL; prev = thresholds[i]; } if (wdev->iftype != NL80211_IFTYPE_STATION && wdev->iftype != NL80211_IFTYPE_P2P_CLIENT) return -EOPNOTSUPP; wdev_lock(wdev); cfg80211_cqm_config_free(wdev); wdev_unlock(wdev); if (n_thresholds <= 1 && rdev->ops->set_cqm_rssi_config) { if (n_thresholds == 0 || thresholds[0] == 0) /* Disabling */ return rdev_set_cqm_rssi_config(rdev, dev, 0, 0); return rdev_set_cqm_rssi_config(rdev, dev, thresholds[0], hysteresis); } if (!wiphy_ext_feature_isset(&rdev->wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST)) return -EOPNOTSUPP; if (n_thresholds == 1 && thresholds[0] == 0) /* Disabling */ n_thresholds = 0; wdev_lock(wdev); if (n_thresholds) { struct cfg80211_cqm_config *cqm_config; cqm_config = kzalloc(struct_size(cqm_config, rssi_thresholds, n_thresholds), GFP_KERNEL); if (!cqm_config) { err = -ENOMEM; goto unlock; } cqm_config->rssi_hyst = hysteresis; cqm_config->n_rssi_thresholds = n_thresholds; memcpy(cqm_config->rssi_thresholds, thresholds, flex_array_size(cqm_config, rssi_thresholds, n_thresholds)); wdev->cqm_config = cqm_config; } err = cfg80211_cqm_rssi_update(rdev, dev); unlock: wdev_unlock(wdev); return err; } static int nl80211_set_cqm(struct sk_buff *skb, struct genl_info *info) { struct nlattr *attrs[NL80211_ATTR_CQM_MAX + 1]; struct nlattr *cqm; int err; cqm = info->attrs[NL80211_ATTR_CQM]; if (!cqm) return -EINVAL; err = nla_parse_nested_deprecated(attrs, NL80211_ATTR_CQM_MAX, cqm, nl80211_attr_cqm_policy, info->extack); if (err) return err; if (attrs[NL80211_ATTR_CQM_RSSI_THOLD] && attrs[NL80211_ATTR_CQM_RSSI_HYST]) { const s32 *thresholds = nla_data(attrs[NL80211_ATTR_CQM_RSSI_THOLD]); int len = nla_len(attrs[NL80211_ATTR_CQM_RSSI_THOLD]); u32 hysteresis = nla_get_u32(attrs[NL80211_ATTR_CQM_RSSI_HYST]); if (len % 4) return -EINVAL; return nl80211_set_cqm_rssi(info, thresholds, len / 4, hysteresis); } if (attrs[NL80211_ATTR_CQM_TXE_RATE] && attrs[NL80211_ATTR_CQM_TXE_PKTS] && attrs[NL80211_ATTR_CQM_TXE_INTVL]) { u32 rate = nla_get_u32(attrs[NL80211_ATTR_CQM_TXE_RATE]); u32 pkts = nla_get_u32(attrs[NL80211_ATTR_CQM_TXE_PKTS]); u32 intvl = nla_get_u32(attrs[NL80211_ATTR_CQM_TXE_INTVL]); return nl80211_set_cqm_txe(info, rate, pkts, intvl); } return -EINVAL; } static int nl80211_join_ocb(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct net_device *dev = info->user_ptr[1]; struct ocb_setup setup = {}; int err; err = nl80211_parse_chandef(rdev, info, &setup.chandef); if (err) return err; return cfg80211_join_ocb(rdev, dev, &setup); } static int nl80211_leave_ocb(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct net_device *dev = info->user_ptr[1]; return cfg80211_leave_ocb(rdev, dev); } static int nl80211_join_mesh(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct net_device *dev = info->user_ptr[1]; struct mesh_config cfg; struct mesh_setup setup; int err; /* start with default */ memcpy(&cfg, &default_mesh_config, sizeof(cfg)); memcpy(&setup, &default_mesh_setup, sizeof(setup)); if (info->attrs[NL80211_ATTR_MESH_CONFIG]) { /* and parse parameters if given */ err = nl80211_parse_mesh_config(info, &cfg, NULL); if (err) return err; } if (!info->attrs[NL80211_ATTR_MESH_ID] || !nla_len(info->attrs[NL80211_ATTR_MESH_ID])) return -EINVAL; setup.mesh_id = nla_data(info->attrs[NL80211_ATTR_MESH_ID]); setup.mesh_id_len = nla_len(info->attrs[NL80211_ATTR_MESH_ID]); if (info->attrs[NL80211_ATTR_MCAST_RATE] && !nl80211_parse_mcast_rate(rdev, setup.mcast_rate, nla_get_u32(info->attrs[NL80211_ATTR_MCAST_RATE]))) return -EINVAL; if (info->attrs[NL80211_ATTR_BEACON_INTERVAL]) { setup.beacon_interval = nla_get_u32(info->attrs[NL80211_ATTR_BEACON_INTERVAL]); err = cfg80211_validate_beacon_int(rdev, NL80211_IFTYPE_MESH_POINT, setup.beacon_interval); if (err) return err; } if (info->attrs[NL80211_ATTR_DTIM_PERIOD]) { setup.dtim_period = nla_get_u32(info->attrs[NL80211_ATTR_DTIM_PERIOD]); if (setup.dtim_period < 1 || setup.dtim_period > 100) return -EINVAL; } if (info->attrs[NL80211_ATTR_MESH_SETUP]) { /* parse additional setup parameters if given */ err = nl80211_parse_mesh_setup(info, &setup); if (err) return err; } if (setup.user_mpm) cfg.auto_open_plinks = false; if (info->attrs[NL80211_ATTR_WIPHY_FREQ]) { err = nl80211_parse_chandef(rdev, info, &setup.chandef); if (err) return err; } else { /* __cfg80211_join_mesh() will sort it out */ setup.chandef.chan = NULL; } if (info->attrs[NL80211_ATTR_BSS_BASIC_RATES]) { u8 *rates = nla_data(info->attrs[NL80211_ATTR_BSS_BASIC_RATES]); int n_rates = nla_len(info->attrs[NL80211_ATTR_BSS_BASIC_RATES]); struct ieee80211_supported_band *sband; if (!setup.chandef.chan) return -EINVAL; sband = rdev->wiphy.bands[setup.chandef.chan->band]; err = ieee80211_get_ratemask(sband, rates, n_rates, &setup.basic_rates); if (err) return err; } if (info->attrs[NL80211_ATTR_TX_RATES]) { err = nl80211_parse_tx_bitrate_mask(info, info->attrs, NL80211_ATTR_TX_RATES, &setup.beacon_rate, dev, false, 0); if (err) return err; if (!setup.chandef.chan) return -EINVAL; err = validate_beacon_tx_rate(rdev, setup.chandef.chan->band, &setup.beacon_rate); if (err) return err; } setup.userspace_handles_dfs = nla_get_flag(info->attrs[NL80211_ATTR_HANDLE_DFS]); if (info->attrs[NL80211_ATTR_CONTROL_PORT_OVER_NL80211]) { int r = validate_pae_over_nl80211(rdev, info); if (r < 0) return r; setup.control_port_over_nl80211 = true; } wdev_lock(dev->ieee80211_ptr); err = __cfg80211_join_mesh(rdev, dev, &setup, &cfg); if (!err && info->attrs[NL80211_ATTR_SOCKET_OWNER]) dev->ieee80211_ptr->conn_owner_nlportid = info->snd_portid; wdev_unlock(dev->ieee80211_ptr); return err; } static int nl80211_leave_mesh(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct net_device *dev = info->user_ptr[1]; return cfg80211_leave_mesh(rdev, dev); } #ifdef CONFIG_PM static int nl80211_send_wowlan_patterns(struct sk_buff *msg, struct cfg80211_registered_device *rdev) { struct cfg80211_wowlan *wowlan = rdev->wiphy.wowlan_config; struct nlattr *nl_pats, *nl_pat; int i, pat_len; if (!wowlan->n_patterns) return 0; nl_pats = nla_nest_start_noflag(msg, NL80211_WOWLAN_TRIG_PKT_PATTERN); if (!nl_pats) return -ENOBUFS; for (i = 0; i < wowlan->n_patterns; i++) { nl_pat = nla_nest_start_noflag(msg, i + 1); if (!nl_pat) return -ENOBUFS; pat_len = wowlan->patterns[i].pattern_len; if (nla_put(msg, NL80211_PKTPAT_MASK, DIV_ROUND_UP(pat_len, 8), wowlan->patterns[i].mask) || nla_put(msg, NL80211_PKTPAT_PATTERN, pat_len, wowlan->patterns[i].pattern) || nla_put_u32(msg, NL80211_PKTPAT_OFFSET, wowlan->patterns[i].pkt_offset)) return -ENOBUFS; nla_nest_end(msg, nl_pat); } nla_nest_end(msg, nl_pats); return 0; } static int nl80211_send_wowlan_tcp(struct sk_buff *msg, struct cfg80211_wowlan_tcp *tcp) { struct nlattr *nl_tcp; if (!tcp) return 0; nl_tcp = nla_nest_start_noflag(msg, NL80211_WOWLAN_TRIG_TCP_CONNECTION); if (!nl_tcp) return -ENOBUFS; if (nla_put_in_addr(msg, NL80211_WOWLAN_TCP_SRC_IPV4, tcp->src) || nla_put_in_addr(msg, NL80211_WOWLAN_TCP_DST_IPV4, tcp->dst) || nla_put(msg, NL80211_WOWLAN_TCP_DST_MAC, ETH_ALEN, tcp->dst_mac) || nla_put_u16(msg, NL80211_WOWLAN_TCP_SRC_PORT, tcp->src_port) || nla_put_u16(msg, NL80211_WOWLAN_TCP_DST_PORT, tcp->dst_port) || nla_put(msg, NL80211_WOWLAN_TCP_DATA_PAYLOAD, tcp->payload_len, tcp->payload) || nla_put_u32(msg, NL80211_WOWLAN_TCP_DATA_INTERVAL, tcp->data_interval) || nla_put(msg, NL80211_WOWLAN_TCP_WAKE_PAYLOAD, tcp->wake_len, tcp->wake_data) || nla_put(msg, NL80211_WOWLAN_TCP_WAKE_MASK, DIV_ROUND_UP(tcp->wake_len, 8), tcp->wake_mask)) return -ENOBUFS; if (tcp->payload_seq.len && nla_put(msg, NL80211_WOWLAN_TCP_DATA_PAYLOAD_SEQ, sizeof(tcp->payload_seq), &tcp->payload_seq)) return -ENOBUFS; if (tcp->payload_tok.len && nla_put(msg, NL80211_WOWLAN_TCP_DATA_PAYLOAD_TOKEN, sizeof(tcp->payload_tok) + tcp->tokens_size, &tcp->payload_tok)) return -ENOBUFS; nla_nest_end(msg, nl_tcp); return 0; } static int nl80211_send_wowlan_nd(struct sk_buff *msg, struct cfg80211_sched_scan_request *req) { struct nlattr *nd, *freqs, *matches, *match, *scan_plans, *scan_plan; int i; if (!req) return 0; nd = nla_nest_start_noflag(msg, NL80211_WOWLAN_TRIG_NET_DETECT); if (!nd) return -ENOBUFS; if (req->n_scan_plans == 1 && nla_put_u32(msg, NL80211_ATTR_SCHED_SCAN_INTERVAL, req->scan_plans[0].interval * 1000)) return -ENOBUFS; if (nla_put_u32(msg, NL80211_ATTR_SCHED_SCAN_DELAY, req->delay)) return -ENOBUFS; if (req->relative_rssi_set) { struct nl80211_bss_select_rssi_adjust rssi_adjust; if (nla_put_s8(msg, NL80211_ATTR_SCHED_SCAN_RELATIVE_RSSI, req->relative_rssi)) return -ENOBUFS; rssi_adjust.band = req->rssi_adjust.band; rssi_adjust.delta = req->rssi_adjust.delta; if (nla_put(msg, NL80211_ATTR_SCHED_SCAN_RSSI_ADJUST, sizeof(rssi_adjust), &rssi_adjust)) return -ENOBUFS; } freqs = nla_nest_start_noflag(msg, NL80211_ATTR_SCAN_FREQUENCIES); if (!freqs) return -ENOBUFS; for (i = 0; i < req->n_channels; i++) { if (nla_put_u32(msg, i, req->channels[i]->center_freq)) return -ENOBUFS; } nla_nest_end(msg, freqs); if (req->n_match_sets) { matches = nla_nest_start_noflag(msg, NL80211_ATTR_SCHED_SCAN_MATCH); if (!matches) return -ENOBUFS; for (i = 0; i < req->n_match_sets; i++) { match = nla_nest_start_noflag(msg, i); if (!match) return -ENOBUFS; if (nla_put(msg, NL80211_SCHED_SCAN_MATCH_ATTR_SSID, req->match_sets[i].ssid.ssid_len, req->match_sets[i].ssid.ssid)) return -ENOBUFS; nla_nest_end(msg, match); } nla_nest_end(msg, matches); } scan_plans = nla_nest_start_noflag(msg, NL80211_ATTR_SCHED_SCAN_PLANS); if (!scan_plans) return -ENOBUFS; for (i = 0; i < req->n_scan_plans; i++) { scan_plan = nla_nest_start_noflag(msg, i + 1); if (!scan_plan) return -ENOBUFS; if (nla_put_u32(msg, NL80211_SCHED_SCAN_PLAN_INTERVAL, req->scan_plans[i].interval) || (req->scan_plans[i].iterations && nla_put_u32(msg, NL80211_SCHED_SCAN_PLAN_ITERATIONS, req->scan_plans[i].iterations))) return -ENOBUFS; nla_nest_end(msg, scan_plan); } nla_nest_end(msg, scan_plans); nla_nest_end(msg, nd); return 0; } static int nl80211_get_wowlan(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct sk_buff *msg; void *hdr; u32 size = NLMSG_DEFAULT_SIZE; if (!rdev->wiphy.wowlan) return -EOPNOTSUPP; if (rdev->wiphy.wowlan_config && rdev->wiphy.wowlan_config->tcp) { /* adjust size to have room for all the data */ size += rdev->wiphy.wowlan_config->tcp->tokens_size + rdev->wiphy.wowlan_config->tcp->payload_len + rdev->wiphy.wowlan_config->tcp->wake_len + rdev->wiphy.wowlan_config->tcp->wake_len / 8; } msg = nlmsg_new(size, GFP_KERNEL); if (!msg) return -ENOMEM; hdr = nl80211hdr_put(msg, info->snd_portid, info->snd_seq, 0, NL80211_CMD_GET_WOWLAN); if (!hdr) goto nla_put_failure; if (rdev->wiphy.wowlan_config) { struct nlattr *nl_wowlan; nl_wowlan = nla_nest_start_noflag(msg, NL80211_ATTR_WOWLAN_TRIGGERS); if (!nl_wowlan) goto nla_put_failure; if ((rdev->wiphy.wowlan_config->any && nla_put_flag(msg, NL80211_WOWLAN_TRIG_ANY)) || (rdev->wiphy.wowlan_config->disconnect && nla_put_flag(msg, NL80211_WOWLAN_TRIG_DISCONNECT)) || (rdev->wiphy.wowlan_config->magic_pkt && nla_put_flag(msg, NL80211_WOWLAN_TRIG_MAGIC_PKT)) || (rdev->wiphy.wowlan_config->gtk_rekey_failure && nla_put_flag(msg, NL80211_WOWLAN_TRIG_GTK_REKEY_FAILURE)) || (rdev->wiphy.wowlan_config->eap_identity_req && nla_put_flag(msg, NL80211_WOWLAN_TRIG_EAP_IDENT_REQUEST)) || (rdev->wiphy.wowlan_config->four_way_handshake && nla_put_flag(msg, NL80211_WOWLAN_TRIG_4WAY_HANDSHAKE)) || (rdev->wiphy.wowlan_config->rfkill_release && nla_put_flag(msg, NL80211_WOWLAN_TRIG_RFKILL_RELEASE))) goto nla_put_failure; if (nl80211_send_wowlan_patterns(msg, rdev)) goto nla_put_failure; if (nl80211_send_wowlan_tcp(msg, rdev->wiphy.wowlan_config->tcp)) goto nla_put_failure; if (nl80211_send_wowlan_nd( msg, rdev->wiphy.wowlan_config->nd_config)) goto nla_put_failure; nla_nest_end(msg, nl_wowlan); } genlmsg_end(msg, hdr); return genlmsg_reply(msg, info); nla_put_failure: nlmsg_free(msg); return -ENOBUFS; } static int nl80211_parse_wowlan_tcp(struct cfg80211_registered_device *rdev, struct nlattr *attr, struct cfg80211_wowlan *trig) { struct nlattr *tb[NUM_NL80211_WOWLAN_TCP]; struct cfg80211_wowlan_tcp *cfg; struct nl80211_wowlan_tcp_data_token *tok = NULL; struct nl80211_wowlan_tcp_data_seq *seq = NULL; u32 size; u32 data_size, wake_size, tokens_size = 0, wake_mask_size; int err, port; if (!rdev->wiphy.wowlan->tcp) return -EINVAL; err = nla_parse_nested_deprecated(tb, MAX_NL80211_WOWLAN_TCP, attr, nl80211_wowlan_tcp_policy, NULL); if (err) return err; if (!tb[NL80211_WOWLAN_TCP_SRC_IPV4] || !tb[NL80211_WOWLAN_TCP_DST_IPV4] || !tb[NL80211_WOWLAN_TCP_DST_MAC] || !tb[NL80211_WOWLAN_TCP_DST_PORT] || !tb[NL80211_WOWLAN_TCP_DATA_PAYLOAD] || !tb[NL80211_WOWLAN_TCP_DATA_INTERVAL] || !tb[NL80211_WOWLAN_TCP_WAKE_PAYLOAD] || !tb[NL80211_WOWLAN_TCP_WAKE_MASK]) return -EINVAL; data_size = nla_len(tb[NL80211_WOWLAN_TCP_DATA_PAYLOAD]); if (data_size > rdev->wiphy.wowlan->tcp->data_payload_max) return -EINVAL; if (nla_get_u32(tb[NL80211_WOWLAN_TCP_DATA_INTERVAL]) > rdev->wiphy.wowlan->tcp->data_interval_max || nla_get_u32(tb[NL80211_WOWLAN_TCP_DATA_INTERVAL]) == 0) return -EINVAL; wake_size = nla_len(tb[NL80211_WOWLAN_TCP_WAKE_PAYLOAD]); if (wake_size > rdev->wiphy.wowlan->tcp->wake_payload_max) return -EINVAL; wake_mask_size = nla_len(tb[NL80211_WOWLAN_TCP_WAKE_MASK]); if (wake_mask_size != DIV_ROUND_UP(wake_size, 8)) return -EINVAL; if (tb[NL80211_WOWLAN_TCP_DATA_PAYLOAD_TOKEN]) { u32 tokln = nla_len(tb[NL80211_WOWLAN_TCP_DATA_PAYLOAD_TOKEN]); tok = nla_data(tb[NL80211_WOWLAN_TCP_DATA_PAYLOAD_TOKEN]); tokens_size = tokln - sizeof(*tok); if (!tok->len || tokens_size % tok->len) return -EINVAL; if (!rdev->wiphy.wowlan->tcp->tok) return -EINVAL; if (tok->len > rdev->wiphy.wowlan->tcp->tok->max_len) return -EINVAL; if (tok->len < rdev->wiphy.wowlan->tcp->tok->min_len) return -EINVAL; if (tokens_size > rdev->wiphy.wowlan->tcp->tok->bufsize) return -EINVAL; if (tok->offset + tok->len > data_size) return -EINVAL; } if (tb[NL80211_WOWLAN_TCP_DATA_PAYLOAD_SEQ]) { seq = nla_data(tb[NL80211_WOWLAN_TCP_DATA_PAYLOAD_SEQ]); if (!rdev->wiphy.wowlan->tcp->seq) return -EINVAL; if (seq->len == 0 || seq->len > 4) return -EINVAL; if (seq->len + seq->offset > data_size) return -EINVAL; } size = sizeof(*cfg); size += data_size; size += wake_size + wake_mask_size; size += tokens_size; cfg = kzalloc(size, GFP_KERNEL); if (!cfg) return -ENOMEM; cfg->src = nla_get_in_addr(tb[NL80211_WOWLAN_TCP_SRC_IPV4]); cfg->dst = nla_get_in_addr(tb[NL80211_WOWLAN_TCP_DST_IPV4]); memcpy(cfg->dst_mac, nla_data(tb[NL80211_WOWLAN_TCP_DST_MAC]), ETH_ALEN); if (tb[NL80211_WOWLAN_TCP_SRC_PORT]) port = nla_get_u16(tb[NL80211_WOWLAN_TCP_SRC_PORT]); else port = 0; #ifdef CONFIG_INET /* allocate a socket and port for it and use it */ err = __sock_create(wiphy_net(&rdev->wiphy), PF_INET, SOCK_STREAM, IPPROTO_TCP, &cfg->sock, 1); if (err) { kfree(cfg); return err; } if (inet_csk_get_port(cfg->sock->sk, port)) { sock_release(cfg->sock); kfree(cfg); return -EADDRINUSE; } cfg->src_port = inet_sk(cfg->sock->sk)->inet_num; #else if (!port) { kfree(cfg); return -EINVAL; } cfg->src_port = port; #endif cfg->dst_port = nla_get_u16(tb[NL80211_WOWLAN_TCP_DST_PORT]); cfg->payload_len = data_size; cfg->payload = (u8 *)cfg + sizeof(*cfg) + tokens_size; memcpy((void *)cfg->payload, nla_data(tb[NL80211_WOWLAN_TCP_DATA_PAYLOAD]), data_size); if (seq) cfg->payload_seq = *seq; cfg->data_interval = nla_get_u32(tb[NL80211_WOWLAN_TCP_DATA_INTERVAL]); cfg->wake_len = wake_size; cfg->wake_data = (u8 *)cfg + sizeof(*cfg) + tokens_size + data_size; memcpy((void *)cfg->wake_data, nla_data(tb[NL80211_WOWLAN_TCP_WAKE_PAYLOAD]), wake_size); cfg->wake_mask = (u8 *)cfg + sizeof(*cfg) + tokens_size + data_size + wake_size; memcpy((void *)cfg->wake_mask, nla_data(tb[NL80211_WOWLAN_TCP_WAKE_MASK]), wake_mask_size); if (tok) { cfg->tokens_size = tokens_size; cfg->payload_tok = *tok; memcpy(cfg->payload_tok.token_stream, tok->token_stream, tokens_size); } trig->tcp = cfg; return 0; } static int nl80211_parse_wowlan_nd(struct cfg80211_registered_device *rdev, const struct wiphy_wowlan_support *wowlan, struct nlattr *attr, struct cfg80211_wowlan *trig) { struct nlattr **tb; int err; tb = kcalloc(NUM_NL80211_ATTR, sizeof(*tb), GFP_KERNEL); if (!tb) return -ENOMEM; if (!(wowlan->flags & WIPHY_WOWLAN_NET_DETECT)) { err = -EOPNOTSUPP; goto out; } err = nla_parse_nested_deprecated(tb, NL80211_ATTR_MAX, attr, nl80211_policy, NULL); if (err) goto out; trig->nd_config = nl80211_parse_sched_scan(&rdev->wiphy, NULL, tb, wowlan->max_nd_match_sets); err = PTR_ERR_OR_ZERO(trig->nd_config); if (err) trig->nd_config = NULL; out: kfree(tb); return err; } static int nl80211_set_wowlan(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct nlattr *tb[NUM_NL80211_WOWLAN_TRIG]; struct cfg80211_wowlan new_triggers = {}; struct cfg80211_wowlan *ntrig; const struct wiphy_wowlan_support *wowlan = rdev->wiphy.wowlan; int err, i; bool prev_enabled = rdev->wiphy.wowlan_config; bool regular = false; if (!wowlan) return -EOPNOTSUPP; if (!info->attrs[NL80211_ATTR_WOWLAN_TRIGGERS]) { cfg80211_rdev_free_wowlan(rdev); rdev->wiphy.wowlan_config = NULL; goto set_wakeup; } err = nla_parse_nested_deprecated(tb, MAX_NL80211_WOWLAN_TRIG, info->attrs[NL80211_ATTR_WOWLAN_TRIGGERS], nl80211_wowlan_policy, info->extack); if (err) return err; if (tb[NL80211_WOWLAN_TRIG_ANY]) { if (!(wowlan->flags & WIPHY_WOWLAN_ANY)) return -EINVAL; new_triggers.any = true; } if (tb[NL80211_WOWLAN_TRIG_DISCONNECT]) { if (!(wowlan->flags & WIPHY_WOWLAN_DISCONNECT)) return -EINVAL; new_triggers.disconnect = true; regular = true; } if (tb[NL80211_WOWLAN_TRIG_MAGIC_PKT]) { if (!(wowlan->flags & WIPHY_WOWLAN_MAGIC_PKT)) return -EINVAL; new_triggers.magic_pkt = true; regular = true; } if (tb[NL80211_WOWLAN_TRIG_GTK_REKEY_SUPPORTED]) return -EINVAL; if (tb[NL80211_WOWLAN_TRIG_GTK_REKEY_FAILURE]) { if (!(wowlan->flags & WIPHY_WOWLAN_GTK_REKEY_FAILURE)) return -EINVAL; new_triggers.gtk_rekey_failure = true; regular = true; } if (tb[NL80211_WOWLAN_TRIG_EAP_IDENT_REQUEST]) { if (!(wowlan->flags & WIPHY_WOWLAN_EAP_IDENTITY_REQ)) return -EINVAL; new_triggers.eap_identity_req = true; regular = true; } if (tb[NL80211_WOWLAN_TRIG_4WAY_HANDSHAKE]) { if (!(wowlan->flags & WIPHY_WOWLAN_4WAY_HANDSHAKE)) return -EINVAL; new_triggers.four_way_handshake = true; regular = true; } if (tb[NL80211_WOWLAN_TRIG_RFKILL_RELEASE]) { if (!(wowlan->flags & WIPHY_WOWLAN_RFKILL_RELEASE)) return -EINVAL; new_triggers.rfkill_release = true; regular = true; } if (tb[NL80211_WOWLAN_TRIG_PKT_PATTERN]) { struct nlattr *pat; int n_patterns = 0; int rem, pat_len, mask_len, pkt_offset; struct nlattr *pat_tb[NUM_NL80211_PKTPAT]; regular = true; nla_for_each_nested(pat, tb[NL80211_WOWLAN_TRIG_PKT_PATTERN], rem) n_patterns++; if (n_patterns > wowlan->n_patterns) return -EINVAL; new_triggers.patterns = kcalloc(n_patterns, sizeof(new_triggers.patterns[0]), GFP_KERNEL); if (!new_triggers.patterns) return -ENOMEM; new_triggers.n_patterns = n_patterns; i = 0; nla_for_each_nested(pat, tb[NL80211_WOWLAN_TRIG_PKT_PATTERN], rem) { u8 *mask_pat; err = nla_parse_nested_deprecated(pat_tb, MAX_NL80211_PKTPAT, pat, nl80211_packet_pattern_policy, info->extack); if (err) goto error; err = -EINVAL; if (!pat_tb[NL80211_PKTPAT_MASK] || !pat_tb[NL80211_PKTPAT_PATTERN]) goto error; pat_len = nla_len(pat_tb[NL80211_PKTPAT_PATTERN]); mask_len = DIV_ROUND_UP(pat_len, 8); if (nla_len(pat_tb[NL80211_PKTPAT_MASK]) != mask_len) goto error; if (pat_len > wowlan->pattern_max_len || pat_len < wowlan->pattern_min_len) goto error; if (!pat_tb[NL80211_PKTPAT_OFFSET]) pkt_offset = 0; else pkt_offset = nla_get_u32( pat_tb[NL80211_PKTPAT_OFFSET]); if (pkt_offset > wowlan->max_pkt_offset) goto error; new_triggers.patterns[i].pkt_offset = pkt_offset; mask_pat = kmalloc(mask_len + pat_len, GFP_KERNEL); if (!mask_pat) { err = -ENOMEM; goto error; } new_triggers.patterns[i].mask = mask_pat; memcpy(mask_pat, nla_data(pat_tb[NL80211_PKTPAT_MASK]), mask_len); mask_pat += mask_len; new_triggers.patterns[i].pattern = mask_pat; new_triggers.patterns[i].pattern_len = pat_len; memcpy(mask_pat, nla_data(pat_tb[NL80211_PKTPAT_PATTERN]), pat_len); i++; } } if (tb[NL80211_WOWLAN_TRIG_TCP_CONNECTION]) { regular = true; err = nl80211_parse_wowlan_tcp( rdev, tb[NL80211_WOWLAN_TRIG_TCP_CONNECTION], &new_triggers); if (err) goto error; } if (tb[NL80211_WOWLAN_TRIG_NET_DETECT]) { regular = true; err = nl80211_parse_wowlan_nd( rdev, wowlan, tb[NL80211_WOWLAN_TRIG_NET_DETECT], &new_triggers); if (err) goto error; } /* The 'any' trigger means the device continues operating more or less * as in its normal operation mode and wakes up the host on most of the * normal interrupts (like packet RX, ...) * It therefore makes little sense to combine with the more constrained * wakeup trigger modes. */ if (new_triggers.any && regular) { err = -EINVAL; goto error; } ntrig = kmemdup(&new_triggers, sizeof(new_triggers), GFP_KERNEL); if (!ntrig) { err = -ENOMEM; goto error; } cfg80211_rdev_free_wowlan(rdev); rdev->wiphy.wowlan_config = ntrig; set_wakeup: if (rdev->ops->set_wakeup && prev_enabled != !!rdev->wiphy.wowlan_config) rdev_set_wakeup(rdev, rdev->wiphy.wowlan_config); return 0; error: for (i = 0; i < new_triggers.n_patterns; i++) kfree(new_triggers.patterns[i].mask); kfree(new_triggers.patterns); if (new_triggers.tcp && new_triggers.tcp->sock) sock_release(new_triggers.tcp->sock); kfree(new_triggers.tcp); kfree(new_triggers.nd_config); return err; } #endif static int nl80211_send_coalesce_rules(struct sk_buff *msg, struct cfg80211_registered_device *rdev) { struct nlattr *nl_pats, *nl_pat, *nl_rule, *nl_rules; int i, j, pat_len; struct cfg80211_coalesce_rules *rule; if (!rdev->coalesce->n_rules) return 0; nl_rules = nla_nest_start_noflag(msg, NL80211_ATTR_COALESCE_RULE); if (!nl_rules) return -ENOBUFS; for (i = 0; i < rdev->coalesce->n_rules; i++) { nl_rule = nla_nest_start_noflag(msg, i + 1); if (!nl_rule) return -ENOBUFS; rule = &rdev->coalesce->rules[i]; if (nla_put_u32(msg, NL80211_ATTR_COALESCE_RULE_DELAY, rule->delay)) return -ENOBUFS; if (nla_put_u32(msg, NL80211_ATTR_COALESCE_RULE_CONDITION, rule->condition)) return -ENOBUFS; nl_pats = nla_nest_start_noflag(msg, NL80211_ATTR_COALESCE_RULE_PKT_PATTERN); if (!nl_pats) return -ENOBUFS; for (j = 0; j < rule->n_patterns; j++) { nl_pat = nla_nest_start_noflag(msg, j + 1); if (!nl_pat) return -ENOBUFS; pat_len = rule->patterns[j].pattern_len; if (nla_put(msg, NL80211_PKTPAT_MASK, DIV_ROUND_UP(pat_len, 8), rule->patterns[j].mask) || nla_put(msg, NL80211_PKTPAT_PATTERN, pat_len, rule->patterns[j].pattern) || nla_put_u32(msg, NL80211_PKTPAT_OFFSET, rule->patterns[j].pkt_offset)) return -ENOBUFS; nla_nest_end(msg, nl_pat); } nla_nest_end(msg, nl_pats); nla_nest_end(msg, nl_rule); } nla_nest_end(msg, nl_rules); return 0; } static int nl80211_get_coalesce(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct sk_buff *msg; void *hdr; if (!rdev->wiphy.coalesce) return -EOPNOTSUPP; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) return -ENOMEM; hdr = nl80211hdr_put(msg, info->snd_portid, info->snd_seq, 0, NL80211_CMD_GET_COALESCE); if (!hdr) goto nla_put_failure; if (rdev->coalesce && nl80211_send_coalesce_rules(msg, rdev)) goto nla_put_failure; genlmsg_end(msg, hdr); return genlmsg_reply(msg, info); nla_put_failure: nlmsg_free(msg); return -ENOBUFS; } void cfg80211_rdev_free_coalesce(struct cfg80211_registered_device *rdev) { struct cfg80211_coalesce *coalesce = rdev->coalesce; int i, j; struct cfg80211_coalesce_rules *rule; if (!coalesce) return; for (i = 0; i < coalesce->n_rules; i++) { rule = &coalesce->rules[i]; for (j = 0; j < rule->n_patterns; j++) kfree(rule->patterns[j].mask); kfree(rule->patterns); } kfree(coalesce->rules); kfree(coalesce); rdev->coalesce = NULL; } static int nl80211_parse_coalesce_rule(struct cfg80211_registered_device *rdev, struct nlattr *rule, struct cfg80211_coalesce_rules *new_rule) { int err, i; const struct wiphy_coalesce_support *coalesce = rdev->wiphy.coalesce; struct nlattr *tb[NUM_NL80211_ATTR_COALESCE_RULE], *pat; int rem, pat_len, mask_len, pkt_offset, n_patterns = 0; struct nlattr *pat_tb[NUM_NL80211_PKTPAT]; err = nla_parse_nested_deprecated(tb, NL80211_ATTR_COALESCE_RULE_MAX, rule, nl80211_coalesce_policy, NULL); if (err) return err; if (tb[NL80211_ATTR_COALESCE_RULE_DELAY]) new_rule->delay = nla_get_u32(tb[NL80211_ATTR_COALESCE_RULE_DELAY]); if (new_rule->delay > coalesce->max_delay) return -EINVAL; if (tb[NL80211_ATTR_COALESCE_RULE_CONDITION]) new_rule->condition = nla_get_u32(tb[NL80211_ATTR_COALESCE_RULE_CONDITION]); if (!tb[NL80211_ATTR_COALESCE_RULE_PKT_PATTERN]) return -EINVAL; nla_for_each_nested(pat, tb[NL80211_ATTR_COALESCE_RULE_PKT_PATTERN], rem) n_patterns++; if (n_patterns > coalesce->n_patterns) return -EINVAL; new_rule->patterns = kcalloc(n_patterns, sizeof(new_rule->patterns[0]), GFP_KERNEL); if (!new_rule->patterns) return -ENOMEM; new_rule->n_patterns = n_patterns; i = 0; nla_for_each_nested(pat, tb[NL80211_ATTR_COALESCE_RULE_PKT_PATTERN], rem) { u8 *mask_pat; err = nla_parse_nested_deprecated(pat_tb, MAX_NL80211_PKTPAT, pat, nl80211_packet_pattern_policy, NULL); if (err) return err; if (!pat_tb[NL80211_PKTPAT_MASK] || !pat_tb[NL80211_PKTPAT_PATTERN]) return -EINVAL; pat_len = nla_len(pat_tb[NL80211_PKTPAT_PATTERN]); mask_len = DIV_ROUND_UP(pat_len, 8); if (nla_len(pat_tb[NL80211_PKTPAT_MASK]) != mask_len) return -EINVAL; if (pat_len > coalesce->pattern_max_len || pat_len < coalesce->pattern_min_len) return -EINVAL; if (!pat_tb[NL80211_PKTPAT_OFFSET]) pkt_offset = 0; else pkt_offset = nla_get_u32(pat_tb[NL80211_PKTPAT_OFFSET]); if (pkt_offset > coalesce->max_pkt_offset) return -EINVAL; new_rule->patterns[i].pkt_offset = pkt_offset; mask_pat = kmalloc(mask_len + pat_len, GFP_KERNEL); if (!mask_pat) return -ENOMEM; new_rule->patterns[i].mask = mask_pat; memcpy(mask_pat, nla_data(pat_tb[NL80211_PKTPAT_MASK]), mask_len); mask_pat += mask_len; new_rule->patterns[i].pattern = mask_pat; new_rule->patterns[i].pattern_len = pat_len; memcpy(mask_pat, nla_data(pat_tb[NL80211_PKTPAT_PATTERN]), pat_len); i++; } return 0; } static int nl80211_set_coalesce(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; const struct wiphy_coalesce_support *coalesce = rdev->wiphy.coalesce; struct cfg80211_coalesce new_coalesce = {}; struct cfg80211_coalesce *n_coalesce; int err, rem_rule, n_rules = 0, i, j; struct nlattr *rule; struct cfg80211_coalesce_rules *tmp_rule; if (!rdev->wiphy.coalesce || !rdev->ops->set_coalesce) return -EOPNOTSUPP; if (!info->attrs[NL80211_ATTR_COALESCE_RULE]) { cfg80211_rdev_free_coalesce(rdev); rdev_set_coalesce(rdev, NULL); return 0; } nla_for_each_nested(rule, info->attrs[NL80211_ATTR_COALESCE_RULE], rem_rule) n_rules++; if (n_rules > coalesce->n_rules) return -EINVAL; new_coalesce.rules = kcalloc(n_rules, sizeof(new_coalesce.rules[0]), GFP_KERNEL); if (!new_coalesce.rules) return -ENOMEM; new_coalesce.n_rules = n_rules; i = 0; nla_for_each_nested(rule, info->attrs[NL80211_ATTR_COALESCE_RULE], rem_rule) { err = nl80211_parse_coalesce_rule(rdev, rule, &new_coalesce.rules[i]); if (err) goto error; i++; } err = rdev_set_coalesce(rdev, &new_coalesce); if (err) goto error; n_coalesce = kmemdup(&new_coalesce, sizeof(new_coalesce), GFP_KERNEL); if (!n_coalesce) { err = -ENOMEM; goto error; } cfg80211_rdev_free_coalesce(rdev); rdev->coalesce = n_coalesce; return 0; error: for (i = 0; i < new_coalesce.n_rules; i++) { tmp_rule = &new_coalesce.rules[i]; for (j = 0; j < tmp_rule->n_patterns; j++) kfree(tmp_rule->patterns[j].mask); kfree(tmp_rule->patterns); } kfree(new_coalesce.rules); return err; } static int nl80211_set_rekey_data(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct net_device *dev = info->user_ptr[1]; struct wireless_dev *wdev = dev->ieee80211_ptr; struct nlattr *tb[NUM_NL80211_REKEY_DATA]; struct cfg80211_gtk_rekey_data rekey_data = {}; int err; if (!info->attrs[NL80211_ATTR_REKEY_DATA]) return -EINVAL; err = nla_parse_nested_deprecated(tb, MAX_NL80211_REKEY_DATA, info->attrs[NL80211_ATTR_REKEY_DATA], nl80211_rekey_policy, info->extack); if (err) return err; if (!tb[NL80211_REKEY_DATA_REPLAY_CTR] || !tb[NL80211_REKEY_DATA_KEK] || !tb[NL80211_REKEY_DATA_KCK]) return -EINVAL; if (nla_len(tb[NL80211_REKEY_DATA_KEK]) != NL80211_KEK_LEN && !(rdev->wiphy.flags & WIPHY_FLAG_SUPPORTS_EXT_KEK_KCK && nla_len(tb[NL80211_REKEY_DATA_KEK]) == NL80211_KEK_EXT_LEN)) return -ERANGE; if (nla_len(tb[NL80211_REKEY_DATA_KCK]) != NL80211_KCK_LEN && !(rdev->wiphy.flags & WIPHY_FLAG_SUPPORTS_EXT_KEK_KCK && nla_len(tb[NL80211_REKEY_DATA_KCK]) == NL80211_KCK_EXT_LEN) && !(rdev->wiphy.flags & WIPHY_FLAG_SUPPORTS_EXT_KCK_32 && nla_len(tb[NL80211_REKEY_DATA_KCK]) == NL80211_KCK_EXT_LEN_32)) return -ERANGE; rekey_data.kek = nla_data(tb[NL80211_REKEY_DATA_KEK]); rekey_data.kck = nla_data(tb[NL80211_REKEY_DATA_KCK]); rekey_data.replay_ctr = nla_data(tb[NL80211_REKEY_DATA_REPLAY_CTR]); rekey_data.kek_len = nla_len(tb[NL80211_REKEY_DATA_KEK]); rekey_data.kck_len = nla_len(tb[NL80211_REKEY_DATA_KCK]); if (tb[NL80211_REKEY_DATA_AKM]) rekey_data.akm = nla_get_u32(tb[NL80211_REKEY_DATA_AKM]); wdev_lock(wdev); if (!wdev->connected) { err = -ENOTCONN; goto out; } if (!rdev->ops->set_rekey_data) { err = -EOPNOTSUPP; goto out; } err = rdev_set_rekey_data(rdev, dev, &rekey_data); out: wdev_unlock(wdev); return err; } static int nl80211_register_unexpected_frame(struct sk_buff *skb, struct genl_info *info) { struct net_device *dev = info->user_ptr[1]; struct wireless_dev *wdev = dev->ieee80211_ptr; if (wdev->iftype != NL80211_IFTYPE_AP && wdev->iftype != NL80211_IFTYPE_P2P_GO) return -EINVAL; if (wdev->ap_unexpected_nlportid) return -EBUSY; wdev->ap_unexpected_nlportid = info->snd_portid; return 0; } static int nl80211_probe_client(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct net_device *dev = info->user_ptr[1]; struct wireless_dev *wdev = dev->ieee80211_ptr; struct sk_buff *msg; void *hdr; const u8 *addr; u64 cookie; int err; if (wdev->iftype != NL80211_IFTYPE_AP && wdev->iftype != NL80211_IFTYPE_P2P_GO) return -EOPNOTSUPP; if (!info->attrs[NL80211_ATTR_MAC]) return -EINVAL; if (!rdev->ops->probe_client) return -EOPNOTSUPP; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) return -ENOMEM; hdr = nl80211hdr_put(msg, info->snd_portid, info->snd_seq, 0, NL80211_CMD_PROBE_CLIENT); if (!hdr) { err = -ENOBUFS; goto free_msg; } addr = nla_data(info->attrs[NL80211_ATTR_MAC]); err = rdev_probe_client(rdev, dev, addr, &cookie); if (err) goto free_msg; if (nla_put_u64_64bit(msg, NL80211_ATTR_COOKIE, cookie, NL80211_ATTR_PAD)) goto nla_put_failure; genlmsg_end(msg, hdr); return genlmsg_reply(msg, info); nla_put_failure: err = -ENOBUFS; free_msg: nlmsg_free(msg); return err; } static int nl80211_register_beacons(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct cfg80211_beacon_registration *reg, *nreg; int rv; if (!(rdev->wiphy.flags & WIPHY_FLAG_REPORTS_OBSS)) return -EOPNOTSUPP; nreg = kzalloc(sizeof(*nreg), GFP_KERNEL); if (!nreg) return -ENOMEM; /* First, check if already registered. */ spin_lock_bh(&rdev->beacon_registrations_lock); list_for_each_entry(reg, &rdev->beacon_registrations, list) { if (reg->nlportid == info->snd_portid) { rv = -EALREADY; goto out_err; } } /* Add it to the list */ nreg->nlportid = info->snd_portid; list_add(&nreg->list, &rdev->beacon_registrations); spin_unlock_bh(&rdev->beacon_registrations_lock); return 0; out_err: spin_unlock_bh(&rdev->beacon_registrations_lock); kfree(nreg); return rv; } static int nl80211_start_p2p_device(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct wireless_dev *wdev = info->user_ptr[1]; int err; if (!rdev->ops->start_p2p_device) return -EOPNOTSUPP; if (wdev->iftype != NL80211_IFTYPE_P2P_DEVICE) return -EOPNOTSUPP; if (wdev_running(wdev)) return 0; if (rfkill_blocked(rdev->wiphy.rfkill)) return -ERFKILL; err = rdev_start_p2p_device(rdev, wdev); if (err) return err; wdev->is_running = true; rdev->opencount++; return 0; } static int nl80211_stop_p2p_device(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct wireless_dev *wdev = info->user_ptr[1]; if (wdev->iftype != NL80211_IFTYPE_P2P_DEVICE) return -EOPNOTSUPP; if (!rdev->ops->stop_p2p_device) return -EOPNOTSUPP; cfg80211_stop_p2p_device(rdev, wdev); return 0; } static int nl80211_start_nan(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct wireless_dev *wdev = info->user_ptr[1]; struct cfg80211_nan_conf conf = {}; int err; if (wdev->iftype != NL80211_IFTYPE_NAN) return -EOPNOTSUPP; if (wdev_running(wdev)) return -EEXIST; if (rfkill_blocked(rdev->wiphy.rfkill)) return -ERFKILL; if (!info->attrs[NL80211_ATTR_NAN_MASTER_PREF]) return -EINVAL; conf.master_pref = nla_get_u8(info->attrs[NL80211_ATTR_NAN_MASTER_PREF]); if (info->attrs[NL80211_ATTR_BANDS]) { u32 bands = nla_get_u32(info->attrs[NL80211_ATTR_BANDS]); if (bands & ~(u32)wdev->wiphy->nan_supported_bands) return -EOPNOTSUPP; if (bands && !(bands & BIT(NL80211_BAND_2GHZ))) return -EINVAL; conf.bands = bands; } err = rdev_start_nan(rdev, wdev, &conf); if (err) return err; wdev->is_running = true; rdev->opencount++; return 0; } static int nl80211_stop_nan(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct wireless_dev *wdev = info->user_ptr[1]; if (wdev->iftype != NL80211_IFTYPE_NAN) return -EOPNOTSUPP; cfg80211_stop_nan(rdev, wdev); return 0; } static int validate_nan_filter(struct nlattr *filter_attr) { struct nlattr *attr; int len = 0, n_entries = 0, rem; nla_for_each_nested(attr, filter_attr, rem) { len += nla_len(attr); n_entries++; } if (len >= U8_MAX) return -EINVAL; return n_entries; } static int handle_nan_filter(struct nlattr *attr_filter, struct cfg80211_nan_func *func, bool tx) { struct nlattr *attr; int n_entries, rem, i; struct cfg80211_nan_func_filter *filter; n_entries = validate_nan_filter(attr_filter); if (n_entries < 0) return n_entries; BUILD_BUG_ON(sizeof(*func->rx_filters) != sizeof(*func->tx_filters)); filter = kcalloc(n_entries, sizeof(*func->rx_filters), GFP_KERNEL); if (!filter) return -ENOMEM; i = 0; nla_for_each_nested(attr, attr_filter, rem) { filter[i].filter = nla_memdup(attr, GFP_KERNEL); if (!filter[i].filter) goto err; filter[i].len = nla_len(attr); i++; } if (tx) { func->num_tx_filters = n_entries; func->tx_filters = filter; } else { func->num_rx_filters = n_entries; func->rx_filters = filter; } return 0; err: i = 0; nla_for_each_nested(attr, attr_filter, rem) { kfree(filter[i].filter); i++; } kfree(filter); return -ENOMEM; } static int nl80211_nan_add_func(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct wireless_dev *wdev = info->user_ptr[1]; struct nlattr *tb[NUM_NL80211_NAN_FUNC_ATTR], *func_attr; struct cfg80211_nan_func *func; struct sk_buff *msg = NULL; void *hdr = NULL; int err = 0; if (wdev->iftype != NL80211_IFTYPE_NAN) return -EOPNOTSUPP; if (!wdev_running(wdev)) return -ENOTCONN; if (!info->attrs[NL80211_ATTR_NAN_FUNC]) return -EINVAL; err = nla_parse_nested_deprecated(tb, NL80211_NAN_FUNC_ATTR_MAX, info->attrs[NL80211_ATTR_NAN_FUNC], nl80211_nan_func_policy, info->extack); if (err) return err; func = kzalloc(sizeof(*func), GFP_KERNEL); if (!func) return -ENOMEM; func->cookie = cfg80211_assign_cookie(rdev); if (!tb[NL80211_NAN_FUNC_TYPE]) { err = -EINVAL; goto out; } func->type = nla_get_u8(tb[NL80211_NAN_FUNC_TYPE]); if (!tb[NL80211_NAN_FUNC_SERVICE_ID]) { err = -EINVAL; goto out; } memcpy(func->service_id, nla_data(tb[NL80211_NAN_FUNC_SERVICE_ID]), sizeof(func->service_id)); func->close_range = nla_get_flag(tb[NL80211_NAN_FUNC_CLOSE_RANGE]); if (tb[NL80211_NAN_FUNC_SERVICE_INFO]) { func->serv_spec_info_len = nla_len(tb[NL80211_NAN_FUNC_SERVICE_INFO]); func->serv_spec_info = kmemdup(nla_data(tb[NL80211_NAN_FUNC_SERVICE_INFO]), func->serv_spec_info_len, GFP_KERNEL); if (!func->serv_spec_info) { err = -ENOMEM; goto out; } } if (tb[NL80211_NAN_FUNC_TTL]) func->ttl = nla_get_u32(tb[NL80211_NAN_FUNC_TTL]); switch (func->type) { case NL80211_NAN_FUNC_PUBLISH: if (!tb[NL80211_NAN_FUNC_PUBLISH_TYPE]) { err = -EINVAL; goto out; } func->publish_type = nla_get_u8(tb[NL80211_NAN_FUNC_PUBLISH_TYPE]); func->publish_bcast = nla_get_flag(tb[NL80211_NAN_FUNC_PUBLISH_BCAST]); if ((!(func->publish_type & NL80211_NAN_SOLICITED_PUBLISH)) && func->publish_bcast) { err = -EINVAL; goto out; } break; case NL80211_NAN_FUNC_SUBSCRIBE: func->subscribe_active = nla_get_flag(tb[NL80211_NAN_FUNC_SUBSCRIBE_ACTIVE]); break; case NL80211_NAN_FUNC_FOLLOW_UP: if (!tb[NL80211_NAN_FUNC_FOLLOW_UP_ID] || !tb[NL80211_NAN_FUNC_FOLLOW_UP_REQ_ID] || !tb[NL80211_NAN_FUNC_FOLLOW_UP_DEST]) { err = -EINVAL; goto out; } func->followup_id = nla_get_u8(tb[NL80211_NAN_FUNC_FOLLOW_UP_ID]); func->followup_reqid = nla_get_u8(tb[NL80211_NAN_FUNC_FOLLOW_UP_REQ_ID]); memcpy(func->followup_dest.addr, nla_data(tb[NL80211_NAN_FUNC_FOLLOW_UP_DEST]), sizeof(func->followup_dest.addr)); if (func->ttl) { err = -EINVAL; goto out; } break; default: err = -EINVAL; goto out; } if (tb[NL80211_NAN_FUNC_SRF]) { struct nlattr *srf_tb[NUM_NL80211_NAN_SRF_ATTR]; err = nla_parse_nested_deprecated(srf_tb, NL80211_NAN_SRF_ATTR_MAX, tb[NL80211_NAN_FUNC_SRF], nl80211_nan_srf_policy, info->extack); if (err) goto out; func->srf_include = nla_get_flag(srf_tb[NL80211_NAN_SRF_INCLUDE]); if (srf_tb[NL80211_NAN_SRF_BF]) { if (srf_tb[NL80211_NAN_SRF_MAC_ADDRS] || !srf_tb[NL80211_NAN_SRF_BF_IDX]) { err = -EINVAL; goto out; } func->srf_bf_len = nla_len(srf_tb[NL80211_NAN_SRF_BF]); func->srf_bf = kmemdup(nla_data(srf_tb[NL80211_NAN_SRF_BF]), func->srf_bf_len, GFP_KERNEL); if (!func->srf_bf) { err = -ENOMEM; goto out; } func->srf_bf_idx = nla_get_u8(srf_tb[NL80211_NAN_SRF_BF_IDX]); } else { struct nlattr *attr, *mac_attr = srf_tb[NL80211_NAN_SRF_MAC_ADDRS]; int n_entries, rem, i = 0; if (!mac_attr) { err = -EINVAL; goto out; } n_entries = validate_acl_mac_addrs(mac_attr); if (n_entries <= 0) { err = -EINVAL; goto out; } func->srf_num_macs = n_entries; func->srf_macs = kcalloc(n_entries, sizeof(*func->srf_macs), GFP_KERNEL); if (!func->srf_macs) { err = -ENOMEM; goto out; } nla_for_each_nested(attr, mac_attr, rem) memcpy(func->srf_macs[i++].addr, nla_data(attr), sizeof(*func->srf_macs)); } } if (tb[NL80211_NAN_FUNC_TX_MATCH_FILTER]) { err = handle_nan_filter(tb[NL80211_NAN_FUNC_TX_MATCH_FILTER], func, true); if (err) goto out; } if (tb[NL80211_NAN_FUNC_RX_MATCH_FILTER]) { err = handle_nan_filter(tb[NL80211_NAN_FUNC_RX_MATCH_FILTER], func, false); if (err) goto out; } msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) { err = -ENOMEM; goto out; } hdr = nl80211hdr_put(msg, info->snd_portid, info->snd_seq, 0, NL80211_CMD_ADD_NAN_FUNCTION); /* This can't really happen - we just allocated 4KB */ if (WARN_ON(!hdr)) { err = -ENOMEM; goto out; } err = rdev_add_nan_func(rdev, wdev, func); out: if (err < 0) { cfg80211_free_nan_func(func); nlmsg_free(msg); return err; } /* propagate the instance id and cookie to userspace */ if (nla_put_u64_64bit(msg, NL80211_ATTR_COOKIE, func->cookie, NL80211_ATTR_PAD)) goto nla_put_failure; func_attr = nla_nest_start_noflag(msg, NL80211_ATTR_NAN_FUNC); if (!func_attr) goto nla_put_failure; if (nla_put_u8(msg, NL80211_NAN_FUNC_INSTANCE_ID, func->instance_id)) goto nla_put_failure; nla_nest_end(msg, func_attr); genlmsg_end(msg, hdr); return genlmsg_reply(msg, info); nla_put_failure: nlmsg_free(msg); return -ENOBUFS; } static int nl80211_nan_del_func(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct wireless_dev *wdev = info->user_ptr[1]; u64 cookie; if (wdev->iftype != NL80211_IFTYPE_NAN) return -EOPNOTSUPP; if (!wdev_running(wdev)) return -ENOTCONN; if (!info->attrs[NL80211_ATTR_COOKIE]) return -EINVAL; cookie = nla_get_u64(info->attrs[NL80211_ATTR_COOKIE]); rdev_del_nan_func(rdev, wdev, cookie); return 0; } static int nl80211_nan_change_config(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct wireless_dev *wdev = info->user_ptr[1]; struct cfg80211_nan_conf conf = {}; u32 changed = 0; if (wdev->iftype != NL80211_IFTYPE_NAN) return -EOPNOTSUPP; if (!wdev_running(wdev)) return -ENOTCONN; if (info->attrs[NL80211_ATTR_NAN_MASTER_PREF]) { conf.master_pref = nla_get_u8(info->attrs[NL80211_ATTR_NAN_MASTER_PREF]); if (conf.master_pref <= 1 || conf.master_pref == 255) return -EINVAL; changed |= CFG80211_NAN_CONF_CHANGED_PREF; } if (info->attrs[NL80211_ATTR_BANDS]) { u32 bands = nla_get_u32(info->attrs[NL80211_ATTR_BANDS]); if (bands & ~(u32)wdev->wiphy->nan_supported_bands) return -EOPNOTSUPP; if (bands && !(bands & BIT(NL80211_BAND_2GHZ))) return -EINVAL; conf.bands = bands; changed |= CFG80211_NAN_CONF_CHANGED_BANDS; } if (!changed) return -EINVAL; return rdev_nan_change_conf(rdev, wdev, &conf, changed); } void cfg80211_nan_match(struct wireless_dev *wdev, struct cfg80211_nan_match_params *match, gfp_t gfp) { struct wiphy *wiphy = wdev->wiphy; struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy); struct nlattr *match_attr, *local_func_attr, *peer_func_attr; struct sk_buff *msg; void *hdr; if (WARN_ON(!match->inst_id || !match->peer_inst_id || !match->addr)) return; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp); if (!msg) return; hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_NAN_MATCH); if (!hdr) { nlmsg_free(msg); return; } if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || (wdev->netdev && nla_put_u32(msg, NL80211_ATTR_IFINDEX, wdev->netdev->ifindex)) || nla_put_u64_64bit(msg, NL80211_ATTR_WDEV, wdev_id(wdev), NL80211_ATTR_PAD)) goto nla_put_failure; if (nla_put_u64_64bit(msg, NL80211_ATTR_COOKIE, match->cookie, NL80211_ATTR_PAD) || nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, match->addr)) goto nla_put_failure; match_attr = nla_nest_start_noflag(msg, NL80211_ATTR_NAN_MATCH); if (!match_attr) goto nla_put_failure; local_func_attr = nla_nest_start_noflag(msg, NL80211_NAN_MATCH_FUNC_LOCAL); if (!local_func_attr) goto nla_put_failure; if (nla_put_u8(msg, NL80211_NAN_FUNC_INSTANCE_ID, match->inst_id)) goto nla_put_failure; nla_nest_end(msg, local_func_attr); peer_func_attr = nla_nest_start_noflag(msg, NL80211_NAN_MATCH_FUNC_PEER); if (!peer_func_attr) goto nla_put_failure; if (nla_put_u8(msg, NL80211_NAN_FUNC_TYPE, match->type) || nla_put_u8(msg, NL80211_NAN_FUNC_INSTANCE_ID, match->peer_inst_id)) goto nla_put_failure; if (match->info && match->info_len && nla_put(msg, NL80211_NAN_FUNC_SERVICE_INFO, match->info_len, match->info)) goto nla_put_failure; nla_nest_end(msg, peer_func_attr); nla_nest_end(msg, match_attr); genlmsg_end(msg, hdr); if (!wdev->owner_nlportid) genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0, NL80211_MCGRP_NAN, gfp); else genlmsg_unicast(wiphy_net(&rdev->wiphy), msg, wdev->owner_nlportid); return; nla_put_failure: nlmsg_free(msg); } EXPORT_SYMBOL(cfg80211_nan_match); void cfg80211_nan_func_terminated(struct wireless_dev *wdev, u8 inst_id, enum nl80211_nan_func_term_reason reason, u64 cookie, gfp_t gfp) { struct wiphy *wiphy = wdev->wiphy; struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy); struct sk_buff *msg; struct nlattr *func_attr; void *hdr; if (WARN_ON(!inst_id)) return; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp); if (!msg) return; hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_DEL_NAN_FUNCTION); if (!hdr) { nlmsg_free(msg); return; } if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || (wdev->netdev && nla_put_u32(msg, NL80211_ATTR_IFINDEX, wdev->netdev->ifindex)) || nla_put_u64_64bit(msg, NL80211_ATTR_WDEV, wdev_id(wdev), NL80211_ATTR_PAD)) goto nla_put_failure; if (nla_put_u64_64bit(msg, NL80211_ATTR_COOKIE, cookie, NL80211_ATTR_PAD)) goto nla_put_failure; func_attr = nla_nest_start_noflag(msg, NL80211_ATTR_NAN_FUNC); if (!func_attr) goto nla_put_failure; if (nla_put_u8(msg, NL80211_NAN_FUNC_INSTANCE_ID, inst_id) || nla_put_u8(msg, NL80211_NAN_FUNC_TERM_REASON, reason)) goto nla_put_failure; nla_nest_end(msg, func_attr); genlmsg_end(msg, hdr); if (!wdev->owner_nlportid) genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0, NL80211_MCGRP_NAN, gfp); else genlmsg_unicast(wiphy_net(&rdev->wiphy), msg, wdev->owner_nlportid); return; nla_put_failure: nlmsg_free(msg); } EXPORT_SYMBOL(cfg80211_nan_func_terminated); static int nl80211_get_protocol_features(struct sk_buff *skb, struct genl_info *info) { void *hdr; struct sk_buff *msg; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) return -ENOMEM; hdr = nl80211hdr_put(msg, info->snd_portid, info->snd_seq, 0, NL80211_CMD_GET_PROTOCOL_FEATURES); if (!hdr) goto nla_put_failure; if (nla_put_u32(msg, NL80211_ATTR_PROTOCOL_FEATURES, NL80211_PROTOCOL_FEATURE_SPLIT_WIPHY_DUMP)) goto nla_put_failure; genlmsg_end(msg, hdr); return genlmsg_reply(msg, info); nla_put_failure: kfree_skb(msg); return -ENOBUFS; } static int nl80211_update_ft_ies(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct cfg80211_update_ft_ies_params ft_params; struct net_device *dev = info->user_ptr[1]; if (!rdev->ops->update_ft_ies) return -EOPNOTSUPP; if (!info->attrs[NL80211_ATTR_MDID] || !info->attrs[NL80211_ATTR_IE]) return -EINVAL; memset(&ft_params, 0, sizeof(ft_params)); ft_params.md = nla_get_u16(info->attrs[NL80211_ATTR_MDID]); ft_params.ie = nla_data(info->attrs[NL80211_ATTR_IE]); ft_params.ie_len = nla_len(info->attrs[NL80211_ATTR_IE]); return rdev_update_ft_ies(rdev, dev, &ft_params); } static int nl80211_crit_protocol_start(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct wireless_dev *wdev = info->user_ptr[1]; enum nl80211_crit_proto_id proto = NL80211_CRIT_PROTO_UNSPEC; u16 duration; int ret; if (!rdev->ops->crit_proto_start) return -EOPNOTSUPP; if (WARN_ON(!rdev->ops->crit_proto_stop)) return -EINVAL; if (rdev->crit_proto_nlportid) return -EBUSY; /* determine protocol if provided */ if (info->attrs[NL80211_ATTR_CRIT_PROT_ID]) proto = nla_get_u16(info->attrs[NL80211_ATTR_CRIT_PROT_ID]); if (proto >= NUM_NL80211_CRIT_PROTO) return -EINVAL; /* timeout must be provided */ if (!info->attrs[NL80211_ATTR_MAX_CRIT_PROT_DURATION]) return -EINVAL; duration = nla_get_u16(info->attrs[NL80211_ATTR_MAX_CRIT_PROT_DURATION]); ret = rdev_crit_proto_start(rdev, wdev, proto, duration); if (!ret) rdev->crit_proto_nlportid = info->snd_portid; return ret; } static int nl80211_crit_protocol_stop(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct wireless_dev *wdev = info->user_ptr[1]; if (!rdev->ops->crit_proto_stop) return -EOPNOTSUPP; if (rdev->crit_proto_nlportid) { rdev->crit_proto_nlportid = 0; rdev_crit_proto_stop(rdev, wdev); } return 0; } static int nl80211_vendor_check_policy(const struct wiphy_vendor_command *vcmd, struct nlattr *attr, struct netlink_ext_ack *extack) { if (vcmd->policy == VENDOR_CMD_RAW_DATA) { if (attr->nla_type & NLA_F_NESTED) { NL_SET_ERR_MSG_ATTR(extack, attr, "unexpected nested data"); return -EINVAL; } return 0; } if (!(attr->nla_type & NLA_F_NESTED)) { NL_SET_ERR_MSG_ATTR(extack, attr, "expected nested data"); return -EINVAL; } return nla_validate_nested(attr, vcmd->maxattr, vcmd->policy, extack); } static int nl80211_vendor_cmd(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct wireless_dev *wdev = __cfg80211_wdev_from_attrs(rdev, genl_info_net(info), info->attrs); int i, err; u32 vid, subcmd; if (!rdev->wiphy.vendor_commands) return -EOPNOTSUPP; if (IS_ERR(wdev)) { err = PTR_ERR(wdev); if (err != -EINVAL) return err; wdev = NULL; } else if (wdev->wiphy != &rdev->wiphy) { return -EINVAL; } if (!info->attrs[NL80211_ATTR_VENDOR_ID] || !info->attrs[NL80211_ATTR_VENDOR_SUBCMD]) return -EINVAL; vid = nla_get_u32(info->attrs[NL80211_ATTR_VENDOR_ID]); subcmd = nla_get_u32(info->attrs[NL80211_ATTR_VENDOR_SUBCMD]); for (i = 0; i < rdev->wiphy.n_vendor_commands; i++) { const struct wiphy_vendor_command *vcmd; void *data = NULL; int len = 0; vcmd = &rdev->wiphy.vendor_commands[i]; if (vcmd->info.vendor_id != vid || vcmd->info.subcmd != subcmd) continue; if (vcmd->flags & (WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV)) { if (!wdev) return -EINVAL; if (vcmd->flags & WIPHY_VENDOR_CMD_NEED_NETDEV && !wdev->netdev) return -EINVAL; if (vcmd->flags & WIPHY_VENDOR_CMD_NEED_RUNNING) { if (!wdev_running(wdev)) return -ENETDOWN; } } else { wdev = NULL; } if (!vcmd->doit) return -EOPNOTSUPP; if (info->attrs[NL80211_ATTR_VENDOR_DATA]) { data = nla_data(info->attrs[NL80211_ATTR_VENDOR_DATA]); len = nla_len(info->attrs[NL80211_ATTR_VENDOR_DATA]); err = nl80211_vendor_check_policy(vcmd, info->attrs[NL80211_ATTR_VENDOR_DATA], info->extack); if (err) return err; } rdev->cur_cmd_info = info; err = vcmd->doit(&rdev->wiphy, wdev, data, len); rdev->cur_cmd_info = NULL; return err; } return -EOPNOTSUPP; } static int nl80211_prepare_vendor_dump(struct sk_buff *skb, struct netlink_callback *cb, struct cfg80211_registered_device **rdev, struct wireless_dev **wdev) { struct nlattr **attrbuf; u32 vid, subcmd; unsigned int i; int vcmd_idx = -1; int err; void *data = NULL; unsigned int data_len = 0; if (cb->args[0]) { /* subtract the 1 again here */ struct wiphy *wiphy = wiphy_idx_to_wiphy(cb->args[0] - 1); struct wireless_dev *tmp; if (!wiphy) return -ENODEV; *rdev = wiphy_to_rdev(wiphy); *wdev = NULL; if (cb->args[1]) { list_for_each_entry(tmp, &wiphy->wdev_list, list) { if (tmp->identifier == cb->args[1] - 1) { *wdev = tmp; break; } } } /* keep rtnl locked in successful case */ return 0; } attrbuf = kcalloc(NUM_NL80211_ATTR, sizeof(*attrbuf), GFP_KERNEL); if (!attrbuf) return -ENOMEM; err = nlmsg_parse_deprecated(cb->nlh, GENL_HDRLEN + nl80211_fam.hdrsize, attrbuf, nl80211_fam.maxattr, nl80211_policy, NULL); if (err) goto out; if (!attrbuf[NL80211_ATTR_VENDOR_ID] || !attrbuf[NL80211_ATTR_VENDOR_SUBCMD]) { err = -EINVAL; goto out; } *wdev = __cfg80211_wdev_from_attrs(NULL, sock_net(skb->sk), attrbuf); if (IS_ERR(*wdev)) *wdev = NULL; *rdev = __cfg80211_rdev_from_attrs(sock_net(skb->sk), attrbuf); if (IS_ERR(*rdev)) { err = PTR_ERR(*rdev); goto out; } vid = nla_get_u32(attrbuf[NL80211_ATTR_VENDOR_ID]); subcmd = nla_get_u32(attrbuf[NL80211_ATTR_VENDOR_SUBCMD]); for (i = 0; i < (*rdev)->wiphy.n_vendor_commands; i++) { const struct wiphy_vendor_command *vcmd; vcmd = &(*rdev)->wiphy.vendor_commands[i]; if (vcmd->info.vendor_id != vid || vcmd->info.subcmd != subcmd) continue; if (!vcmd->dumpit) { err = -EOPNOTSUPP; goto out; } vcmd_idx = i; break; } if (vcmd_idx < 0) { err = -EOPNOTSUPP; goto out; } if (attrbuf[NL80211_ATTR_VENDOR_DATA]) { data = nla_data(attrbuf[NL80211_ATTR_VENDOR_DATA]); data_len = nla_len(attrbuf[NL80211_ATTR_VENDOR_DATA]); err = nl80211_vendor_check_policy( &(*rdev)->wiphy.vendor_commands[vcmd_idx], attrbuf[NL80211_ATTR_VENDOR_DATA], cb->extack); if (err) goto out; } /* 0 is the first index - add 1 to parse only once */ cb->args[0] = (*rdev)->wiphy_idx + 1; /* add 1 to know if it was NULL */ cb->args[1] = *wdev ? (*wdev)->identifier + 1 : 0; cb->args[2] = vcmd_idx; cb->args[3] = (unsigned long)data; cb->args[4] = data_len; /* keep rtnl locked in successful case */ err = 0; out: kfree(attrbuf); return err; } static int nl80211_vendor_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb) { struct cfg80211_registered_device *rdev; struct wireless_dev *wdev; unsigned int vcmd_idx; const struct wiphy_vendor_command *vcmd; void *data; int data_len; int err; struct nlattr *vendor_data; rtnl_lock(); err = nl80211_prepare_vendor_dump(skb, cb, &rdev, &wdev); if (err) goto out; vcmd_idx = cb->args[2]; data = (void *)cb->args[3]; data_len = cb->args[4]; vcmd = &rdev->wiphy.vendor_commands[vcmd_idx]; if (vcmd->flags & (WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV)) { if (!wdev) { err = -EINVAL; goto out; } if (vcmd->flags & WIPHY_VENDOR_CMD_NEED_NETDEV && !wdev->netdev) { err = -EINVAL; goto out; } if (vcmd->flags & WIPHY_VENDOR_CMD_NEED_RUNNING) { if (!wdev_running(wdev)) { err = -ENETDOWN; goto out; } } } while (1) { void *hdr = nl80211hdr_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, NLM_F_MULTI, NL80211_CMD_VENDOR); if (!hdr) break; if (nla_put_u32(skb, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || (wdev && nla_put_u64_64bit(skb, NL80211_ATTR_WDEV, wdev_id(wdev), NL80211_ATTR_PAD))) { genlmsg_cancel(skb, hdr); break; } vendor_data = nla_nest_start_noflag(skb, NL80211_ATTR_VENDOR_DATA); if (!vendor_data) { genlmsg_cancel(skb, hdr); break; } err = vcmd->dumpit(&rdev->wiphy, wdev, skb, data, data_len, (unsigned long *)&cb->args[5]); nla_nest_end(skb, vendor_data); if (err == -ENOBUFS || err == -ENOENT) { genlmsg_cancel(skb, hdr); break; } else if (err <= 0) { genlmsg_cancel(skb, hdr); goto out; } genlmsg_end(skb, hdr); } err = skb->len; out: rtnl_unlock(); return err; } struct sk_buff *__cfg80211_alloc_reply_skb(struct wiphy *wiphy, enum nl80211_commands cmd, enum nl80211_attrs attr, int approxlen) { struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy); if (WARN_ON(!rdev->cur_cmd_info)) return NULL; return __cfg80211_alloc_vendor_skb(rdev, NULL, approxlen, rdev->cur_cmd_info->snd_portid, rdev->cur_cmd_info->snd_seq, cmd, attr, NULL, GFP_KERNEL); } EXPORT_SYMBOL(__cfg80211_alloc_reply_skb); int cfg80211_vendor_cmd_reply(struct sk_buff *skb) { struct cfg80211_registered_device *rdev = ((void **)skb->cb)[0]; void *hdr = ((void **)skb->cb)[1]; struct nlattr *data = ((void **)skb->cb)[2]; /* clear CB data for netlink core to own from now on */ memset(skb->cb, 0, sizeof(skb->cb)); if (WARN_ON(!rdev->cur_cmd_info)) { kfree_skb(skb); return -EINVAL; } nla_nest_end(skb, data); genlmsg_end(skb, hdr); return genlmsg_reply(skb, rdev->cur_cmd_info); } EXPORT_SYMBOL_GPL(cfg80211_vendor_cmd_reply); unsigned int cfg80211_vendor_cmd_get_sender(struct wiphy *wiphy) { struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy); if (WARN_ON(!rdev->cur_cmd_info)) return 0; return rdev->cur_cmd_info->snd_portid; } EXPORT_SYMBOL_GPL(cfg80211_vendor_cmd_get_sender); static int nl80211_set_qos_map(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct cfg80211_qos_map *qos_map = NULL; struct net_device *dev = info->user_ptr[1]; u8 *pos, len, num_des, des_len, des; int ret; if (!rdev->ops->set_qos_map) return -EOPNOTSUPP; if (info->attrs[NL80211_ATTR_QOS_MAP]) { pos = nla_data(info->attrs[NL80211_ATTR_QOS_MAP]); len = nla_len(info->attrs[NL80211_ATTR_QOS_MAP]); if (len % 2) return -EINVAL; qos_map = kzalloc(sizeof(struct cfg80211_qos_map), GFP_KERNEL); if (!qos_map) return -ENOMEM; num_des = (len - IEEE80211_QOS_MAP_LEN_MIN) >> 1; if (num_des) { des_len = num_des * sizeof(struct cfg80211_dscp_exception); memcpy(qos_map->dscp_exception, pos, des_len); qos_map->num_des = num_des; for (des = 0; des < num_des; des++) { if (qos_map->dscp_exception[des].up > 7) { kfree(qos_map); return -EINVAL; } } pos += des_len; } memcpy(qos_map->up, pos, IEEE80211_QOS_MAP_LEN_MIN); } wdev_lock(dev->ieee80211_ptr); ret = nl80211_key_allowed(dev->ieee80211_ptr); if (!ret) ret = rdev_set_qos_map(rdev, dev, qos_map); wdev_unlock(dev->ieee80211_ptr); kfree(qos_map); return ret; } static int nl80211_add_tx_ts(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct net_device *dev = info->user_ptr[1]; struct wireless_dev *wdev = dev->ieee80211_ptr; const u8 *peer; u8 tsid, up; u16 admitted_time = 0; int err; if (!(rdev->wiphy.features & NL80211_FEATURE_SUPPORTS_WMM_ADMISSION)) return -EOPNOTSUPP; if (!info->attrs[NL80211_ATTR_TSID] || !info->attrs[NL80211_ATTR_MAC] || !info->attrs[NL80211_ATTR_USER_PRIO]) return -EINVAL; tsid = nla_get_u8(info->attrs[NL80211_ATTR_TSID]); up = nla_get_u8(info->attrs[NL80211_ATTR_USER_PRIO]); /* WMM uses TIDs 0-7 even for TSPEC */ if (tsid >= IEEE80211_FIRST_TSPEC_TSID) { /* TODO: handle 802.11 TSPEC/admission control * need more attributes for that (e.g. BA session requirement); * change the WMM adminssion test above to allow both then */ return -EINVAL; } peer = nla_data(info->attrs[NL80211_ATTR_MAC]); if (info->attrs[NL80211_ATTR_ADMITTED_TIME]) { admitted_time = nla_get_u16(info->attrs[NL80211_ATTR_ADMITTED_TIME]); if (!admitted_time) return -EINVAL; } wdev_lock(wdev); switch (wdev->iftype) { case NL80211_IFTYPE_STATION: case NL80211_IFTYPE_P2P_CLIENT: if (wdev->connected) break; err = -ENOTCONN; goto out; default: err = -EOPNOTSUPP; goto out; } err = rdev_add_tx_ts(rdev, dev, tsid, peer, up, admitted_time); out: wdev_unlock(wdev); return err; } static int nl80211_del_tx_ts(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct net_device *dev = info->user_ptr[1]; struct wireless_dev *wdev = dev->ieee80211_ptr; const u8 *peer; u8 tsid; int err; if (!info->attrs[NL80211_ATTR_TSID] || !info->attrs[NL80211_ATTR_MAC]) return -EINVAL; tsid = nla_get_u8(info->attrs[NL80211_ATTR_TSID]); peer = nla_data(info->attrs[NL80211_ATTR_MAC]); wdev_lock(wdev); err = rdev_del_tx_ts(rdev, dev, tsid, peer); wdev_unlock(wdev); return err; } static int nl80211_tdls_channel_switch(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct net_device *dev = info->user_ptr[1]; struct wireless_dev *wdev = dev->ieee80211_ptr; struct cfg80211_chan_def chandef = {}; const u8 *addr; u8 oper_class; int err; if (!rdev->ops->tdls_channel_switch || !(rdev->wiphy.features & NL80211_FEATURE_TDLS_CHANNEL_SWITCH)) return -EOPNOTSUPP; switch (dev->ieee80211_ptr->iftype) { case NL80211_IFTYPE_STATION: case NL80211_IFTYPE_P2P_CLIENT: break; default: return -EOPNOTSUPP; } if (!info->attrs[NL80211_ATTR_MAC] || !info->attrs[NL80211_ATTR_OPER_CLASS]) return -EINVAL; err = nl80211_parse_chandef(rdev, info, &chandef); if (err) return err; /* * Don't allow wide channels on the 2.4Ghz band, as per IEEE802.11-2012 * section 10.22.6.2.1. Disallow 5/10Mhz channels as well for now, the * specification is not defined for them. */ if (chandef.chan->band == NL80211_BAND_2GHZ && chandef.width != NL80211_CHAN_WIDTH_20_NOHT && chandef.width != NL80211_CHAN_WIDTH_20) return -EINVAL; /* we will be active on the TDLS link */ if (!cfg80211_reg_can_beacon_relax(&rdev->wiphy, &chandef, wdev->iftype)) return -EINVAL; /* don't allow switching to DFS channels */ if (cfg80211_chandef_dfs_required(wdev->wiphy, &chandef, wdev->iftype)) return -EINVAL; addr = nla_data(info->attrs[NL80211_ATTR_MAC]); oper_class = nla_get_u8(info->attrs[NL80211_ATTR_OPER_CLASS]); wdev_lock(wdev); err = rdev_tdls_channel_switch(rdev, dev, addr, oper_class, &chandef); wdev_unlock(wdev); return err; } static int nl80211_tdls_cancel_channel_switch(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct net_device *dev = info->user_ptr[1]; struct wireless_dev *wdev = dev->ieee80211_ptr; const u8 *addr; if (!rdev->ops->tdls_channel_switch || !rdev->ops->tdls_cancel_channel_switch || !(rdev->wiphy.features & NL80211_FEATURE_TDLS_CHANNEL_SWITCH)) return -EOPNOTSUPP; switch (dev->ieee80211_ptr->iftype) { case NL80211_IFTYPE_STATION: case NL80211_IFTYPE_P2P_CLIENT: break; default: return -EOPNOTSUPP; } if (!info->attrs[NL80211_ATTR_MAC]) return -EINVAL; addr = nla_data(info->attrs[NL80211_ATTR_MAC]); wdev_lock(wdev); rdev_tdls_cancel_channel_switch(rdev, dev, addr); wdev_unlock(wdev); return 0; } static int nl80211_set_multicast_to_unicast(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct net_device *dev = info->user_ptr[1]; struct wireless_dev *wdev = dev->ieee80211_ptr; const struct nlattr *nla; bool enabled; if (!rdev->ops->set_multicast_to_unicast) return -EOPNOTSUPP; if (wdev->iftype != NL80211_IFTYPE_AP && wdev->iftype != NL80211_IFTYPE_P2P_GO) return -EOPNOTSUPP; nla = info->attrs[NL80211_ATTR_MULTICAST_TO_UNICAST_ENABLED]; enabled = nla_get_flag(nla); return rdev_set_multicast_to_unicast(rdev, dev, enabled); } static int nl80211_set_pmk(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct net_device *dev = info->user_ptr[1]; struct wireless_dev *wdev = dev->ieee80211_ptr; struct cfg80211_pmk_conf pmk_conf = {}; int ret; if (wdev->iftype != NL80211_IFTYPE_STATION && wdev->iftype != NL80211_IFTYPE_P2P_CLIENT) return -EOPNOTSUPP; if (!wiphy_ext_feature_isset(&rdev->wiphy, NL80211_EXT_FEATURE_4WAY_HANDSHAKE_STA_1X)) return -EOPNOTSUPP; if (!info->attrs[NL80211_ATTR_MAC] || !info->attrs[NL80211_ATTR_PMK]) return -EINVAL; wdev_lock(wdev); if (!wdev->connected) { ret = -ENOTCONN; goto out; } pmk_conf.aa = nla_data(info->attrs[NL80211_ATTR_MAC]); if (memcmp(pmk_conf.aa, wdev->u.client.connected_addr, ETH_ALEN)) { ret = -EINVAL; goto out; } pmk_conf.pmk = nla_data(info->attrs[NL80211_ATTR_PMK]); pmk_conf.pmk_len = nla_len(info->attrs[NL80211_ATTR_PMK]); if (pmk_conf.pmk_len != WLAN_PMK_LEN && pmk_conf.pmk_len != WLAN_PMK_LEN_SUITE_B_192) { ret = -EINVAL; goto out; } if (info->attrs[NL80211_ATTR_PMKR0_NAME]) pmk_conf.pmk_r0_name = nla_data(info->attrs[NL80211_ATTR_PMKR0_NAME]); ret = rdev_set_pmk(rdev, dev, &pmk_conf); out: wdev_unlock(wdev); return ret; } static int nl80211_del_pmk(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct net_device *dev = info->user_ptr[1]; struct wireless_dev *wdev = dev->ieee80211_ptr; const u8 *aa; int ret; if (wdev->iftype != NL80211_IFTYPE_STATION && wdev->iftype != NL80211_IFTYPE_P2P_CLIENT) return -EOPNOTSUPP; if (!wiphy_ext_feature_isset(&rdev->wiphy, NL80211_EXT_FEATURE_4WAY_HANDSHAKE_STA_1X)) return -EOPNOTSUPP; if (!info->attrs[NL80211_ATTR_MAC]) return -EINVAL; wdev_lock(wdev); aa = nla_data(info->attrs[NL80211_ATTR_MAC]); ret = rdev_del_pmk(rdev, dev, aa); wdev_unlock(wdev); return ret; } static int nl80211_external_auth(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct net_device *dev = info->user_ptr[1]; struct cfg80211_external_auth_params params; if (!rdev->ops->external_auth) return -EOPNOTSUPP; if (!info->attrs[NL80211_ATTR_SSID] && dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP && dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_GO) return -EINVAL; if (!info->attrs[NL80211_ATTR_BSSID]) return -EINVAL; if (!info->attrs[NL80211_ATTR_STATUS_CODE]) return -EINVAL; memset(&params, 0, sizeof(params)); if (info->attrs[NL80211_ATTR_SSID]) { params.ssid.ssid_len = nla_len(info->attrs[NL80211_ATTR_SSID]); if (params.ssid.ssid_len == 0) return -EINVAL; memcpy(params.ssid.ssid, nla_data(info->attrs[NL80211_ATTR_SSID]), params.ssid.ssid_len); } memcpy(params.bssid, nla_data(info->attrs[NL80211_ATTR_BSSID]), ETH_ALEN); params.status = nla_get_u16(info->attrs[NL80211_ATTR_STATUS_CODE]); if (info->attrs[NL80211_ATTR_PMKID]) params.pmkid = nla_data(info->attrs[NL80211_ATTR_PMKID]); return rdev_external_auth(rdev, dev, &params); } static int nl80211_tx_control_port(struct sk_buff *skb, struct genl_info *info) { bool dont_wait_for_ack = info->attrs[NL80211_ATTR_DONT_WAIT_FOR_ACK]; struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct net_device *dev = info->user_ptr[1]; struct wireless_dev *wdev = dev->ieee80211_ptr; const u8 *buf; size_t len; u8 *dest; u16 proto; bool noencrypt; u64 cookie = 0; int link_id; int err; if (!wiphy_ext_feature_isset(&rdev->wiphy, NL80211_EXT_FEATURE_CONTROL_PORT_OVER_NL80211)) return -EOPNOTSUPP; if (!rdev->ops->tx_control_port) return -EOPNOTSUPP; if (!info->attrs[NL80211_ATTR_FRAME] || !info->attrs[NL80211_ATTR_MAC] || !info->attrs[NL80211_ATTR_CONTROL_PORT_ETHERTYPE]) { GENL_SET_ERR_MSG(info, "Frame, MAC or ethertype missing"); return -EINVAL; } wdev_lock(wdev); switch (wdev->iftype) { case NL80211_IFTYPE_AP: case NL80211_IFTYPE_P2P_GO: case NL80211_IFTYPE_MESH_POINT: break; case NL80211_IFTYPE_ADHOC: if (wdev->u.ibss.current_bss) break; err = -ENOTCONN; goto out; case NL80211_IFTYPE_STATION: case NL80211_IFTYPE_P2P_CLIENT: if (wdev->connected) break; err = -ENOTCONN; goto out; default: err = -EOPNOTSUPP; goto out; } wdev_unlock(wdev); buf = nla_data(info->attrs[NL80211_ATTR_FRAME]); len = nla_len(info->attrs[NL80211_ATTR_FRAME]); dest = nla_data(info->attrs[NL80211_ATTR_MAC]); proto = nla_get_u16(info->attrs[NL80211_ATTR_CONTROL_PORT_ETHERTYPE]); noencrypt = nla_get_flag(info->attrs[NL80211_ATTR_CONTROL_PORT_NO_ENCRYPT]); link_id = nl80211_link_id_or_invalid(info->attrs); err = rdev_tx_control_port(rdev, dev, buf, len, dest, cpu_to_be16(proto), noencrypt, link_id, dont_wait_for_ack ? NULL : &cookie); if (!err && !dont_wait_for_ack) nl_set_extack_cookie_u64(info->extack, cookie); return err; out: wdev_unlock(wdev); return err; } static int nl80211_get_ftm_responder_stats(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct net_device *dev = info->user_ptr[1]; struct wireless_dev *wdev = dev->ieee80211_ptr; struct cfg80211_ftm_responder_stats ftm_stats = {}; unsigned int link_id = nl80211_link_id(info->attrs); struct sk_buff *msg; void *hdr; struct nlattr *ftm_stats_attr; int err; if (wdev->iftype != NL80211_IFTYPE_AP || !wdev->links[link_id].ap.beacon_interval) return -EOPNOTSUPP; err = rdev_get_ftm_responder_stats(rdev, dev, &ftm_stats); if (err) return err; if (!ftm_stats.filled) return -ENODATA; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) return -ENOMEM; hdr = nl80211hdr_put(msg, info->snd_portid, info->snd_seq, 0, NL80211_CMD_GET_FTM_RESPONDER_STATS); if (!hdr) goto nla_put_failure; if (nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex)) goto nla_put_failure; ftm_stats_attr = nla_nest_start_noflag(msg, NL80211_ATTR_FTM_RESPONDER_STATS); if (!ftm_stats_attr) goto nla_put_failure; #define SET_FTM(field, name, type) \ do { if ((ftm_stats.filled & BIT(NL80211_FTM_STATS_ ## name)) && \ nla_put_ ## type(msg, NL80211_FTM_STATS_ ## name, \ ftm_stats.field)) \ goto nla_put_failure; } while (0) #define SET_FTM_U64(field, name) \ do { if ((ftm_stats.filled & BIT(NL80211_FTM_STATS_ ## name)) && \ nla_put_u64_64bit(msg, NL80211_FTM_STATS_ ## name, \ ftm_stats.field, NL80211_FTM_STATS_PAD)) \ goto nla_put_failure; } while (0) SET_FTM(success_num, SUCCESS_NUM, u32); SET_FTM(partial_num, PARTIAL_NUM, u32); SET_FTM(failed_num, FAILED_NUM, u32); SET_FTM(asap_num, ASAP_NUM, u32); SET_FTM(non_asap_num, NON_ASAP_NUM, u32); SET_FTM_U64(total_duration_ms, TOTAL_DURATION_MSEC); SET_FTM(unknown_triggers_num, UNKNOWN_TRIGGERS_NUM, u32); SET_FTM(reschedule_requests_num, RESCHEDULE_REQUESTS_NUM, u32); SET_FTM(out_of_window_triggers_num, OUT_OF_WINDOW_TRIGGERS_NUM, u32); #undef SET_FTM nla_nest_end(msg, ftm_stats_attr); genlmsg_end(msg, hdr); return genlmsg_reply(msg, info); nla_put_failure: nlmsg_free(msg); return -ENOBUFS; } static int nl80211_update_owe_info(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct cfg80211_update_owe_info owe_info; struct net_device *dev = info->user_ptr[1]; if (!rdev->ops->update_owe_info) return -EOPNOTSUPP; if (!info->attrs[NL80211_ATTR_STATUS_CODE] || !info->attrs[NL80211_ATTR_MAC]) return -EINVAL; memset(&owe_info, 0, sizeof(owe_info)); owe_info.status = nla_get_u16(info->attrs[NL80211_ATTR_STATUS_CODE]); nla_memcpy(owe_info.peer, info->attrs[NL80211_ATTR_MAC], ETH_ALEN); if (info->attrs[NL80211_ATTR_IE]) { owe_info.ie = nla_data(info->attrs[NL80211_ATTR_IE]); owe_info.ie_len = nla_len(info->attrs[NL80211_ATTR_IE]); } return rdev_update_owe_info(rdev, dev, &owe_info); } static int nl80211_probe_mesh_link(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct net_device *dev = info->user_ptr[1]; struct wireless_dev *wdev = dev->ieee80211_ptr; struct station_info sinfo = {}; const u8 *buf; size_t len; u8 *dest; int err; if (!rdev->ops->probe_mesh_link || !rdev->ops->get_station) return -EOPNOTSUPP; if (!info->attrs[NL80211_ATTR_MAC] || !info->attrs[NL80211_ATTR_FRAME]) { GENL_SET_ERR_MSG(info, "Frame or MAC missing"); return -EINVAL; } if (wdev->iftype != NL80211_IFTYPE_MESH_POINT) return -EOPNOTSUPP; dest = nla_data(info->attrs[NL80211_ATTR_MAC]); buf = nla_data(info->attrs[NL80211_ATTR_FRAME]); len = nla_len(info->attrs[NL80211_ATTR_FRAME]); if (len < sizeof(struct ethhdr)) return -EINVAL; if (!ether_addr_equal(buf, dest) || is_multicast_ether_addr(buf) || !ether_addr_equal(buf + ETH_ALEN, dev->dev_addr)) return -EINVAL; err = rdev_get_station(rdev, dev, dest, &sinfo); if (err) return err; cfg80211_sinfo_release_content(&sinfo); return rdev_probe_mesh_link(rdev, dev, dest, buf, len); } static int parse_tid_conf(struct cfg80211_registered_device *rdev, struct nlattr *attrs[], struct net_device *dev, struct cfg80211_tid_cfg *tid_conf, struct genl_info *info, const u8 *peer, unsigned int link_id) { struct netlink_ext_ack *extack = info->extack; u64 mask; int err; if (!attrs[NL80211_TID_CONFIG_ATTR_TIDS]) return -EINVAL; tid_conf->config_override = nla_get_flag(attrs[NL80211_TID_CONFIG_ATTR_OVERRIDE]); tid_conf->tids = nla_get_u16(attrs[NL80211_TID_CONFIG_ATTR_TIDS]); if (tid_conf->config_override) { if (rdev->ops->reset_tid_config) { err = rdev_reset_tid_config(rdev, dev, peer, tid_conf->tids); if (err) return err; } else { return -EINVAL; } } if (attrs[NL80211_TID_CONFIG_ATTR_NOACK]) { tid_conf->mask |= BIT(NL80211_TID_CONFIG_ATTR_NOACK); tid_conf->noack = nla_get_u8(attrs[NL80211_TID_CONFIG_ATTR_NOACK]); } if (attrs[NL80211_TID_CONFIG_ATTR_RETRY_SHORT]) { tid_conf->mask |= BIT(NL80211_TID_CONFIG_ATTR_RETRY_SHORT); tid_conf->retry_short = nla_get_u8(attrs[NL80211_TID_CONFIG_ATTR_RETRY_SHORT]); if (tid_conf->retry_short > rdev->wiphy.max_data_retry_count) return -EINVAL; } if (attrs[NL80211_TID_CONFIG_ATTR_RETRY_LONG]) { tid_conf->mask |= BIT(NL80211_TID_CONFIG_ATTR_RETRY_LONG); tid_conf->retry_long = nla_get_u8(attrs[NL80211_TID_CONFIG_ATTR_RETRY_LONG]); if (tid_conf->retry_long > rdev->wiphy.max_data_retry_count) return -EINVAL; } if (attrs[NL80211_TID_CONFIG_ATTR_AMPDU_CTRL]) { tid_conf->mask |= BIT(NL80211_TID_CONFIG_ATTR_AMPDU_CTRL); tid_conf->ampdu = nla_get_u8(attrs[NL80211_TID_CONFIG_ATTR_AMPDU_CTRL]); } if (attrs[NL80211_TID_CONFIG_ATTR_RTSCTS_CTRL]) { tid_conf->mask |= BIT(NL80211_TID_CONFIG_ATTR_RTSCTS_CTRL); tid_conf->rtscts = nla_get_u8(attrs[NL80211_TID_CONFIG_ATTR_RTSCTS_CTRL]); } if (attrs[NL80211_TID_CONFIG_ATTR_AMSDU_CTRL]) { tid_conf->mask |= BIT(NL80211_TID_CONFIG_ATTR_AMSDU_CTRL); tid_conf->amsdu = nla_get_u8(attrs[NL80211_TID_CONFIG_ATTR_AMSDU_CTRL]); } if (attrs[NL80211_TID_CONFIG_ATTR_TX_RATE_TYPE]) { u32 idx = NL80211_TID_CONFIG_ATTR_TX_RATE_TYPE, attr; tid_conf->txrate_type = nla_get_u8(attrs[idx]); if (tid_conf->txrate_type != NL80211_TX_RATE_AUTOMATIC) { attr = NL80211_TID_CONFIG_ATTR_TX_RATE; err = nl80211_parse_tx_bitrate_mask(info, attrs, attr, &tid_conf->txrate_mask, dev, true, link_id); if (err) return err; tid_conf->mask |= BIT(NL80211_TID_CONFIG_ATTR_TX_RATE); } tid_conf->mask |= BIT(NL80211_TID_CONFIG_ATTR_TX_RATE_TYPE); } if (peer) mask = rdev->wiphy.tid_config_support.peer; else mask = rdev->wiphy.tid_config_support.vif; if (tid_conf->mask & ~mask) { NL_SET_ERR_MSG(extack, "unsupported TID configuration"); return -ENOTSUPP; } return 0; } static int nl80211_set_tid_config(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct nlattr *attrs[NL80211_TID_CONFIG_ATTR_MAX + 1]; unsigned int link_id = nl80211_link_id(info->attrs); struct net_device *dev = info->user_ptr[1]; struct cfg80211_tid_config *tid_config; struct nlattr *tid; int conf_idx = 0, rem_conf; int ret = -EINVAL; u32 num_conf = 0; if (!info->attrs[NL80211_ATTR_TID_CONFIG]) return -EINVAL; if (!rdev->ops->set_tid_config) return -EOPNOTSUPP; nla_for_each_nested(tid, info->attrs[NL80211_ATTR_TID_CONFIG], rem_conf) num_conf++; tid_config = kzalloc(struct_size(tid_config, tid_conf, num_conf), GFP_KERNEL); if (!tid_config) return -ENOMEM; tid_config->n_tid_conf = num_conf; if (info->attrs[NL80211_ATTR_MAC]) tid_config->peer = nla_data(info->attrs[NL80211_ATTR_MAC]); wdev_lock(dev->ieee80211_ptr); nla_for_each_nested(tid, info->attrs[NL80211_ATTR_TID_CONFIG], rem_conf) { ret = nla_parse_nested(attrs, NL80211_TID_CONFIG_ATTR_MAX, tid, NULL, NULL); if (ret) goto bad_tid_conf; ret = parse_tid_conf(rdev, attrs, dev, &tid_config->tid_conf[conf_idx], info, tid_config->peer, link_id); if (ret) goto bad_tid_conf; conf_idx++; } ret = rdev_set_tid_config(rdev, dev, tid_config); bad_tid_conf: kfree(tid_config); wdev_unlock(dev->ieee80211_ptr); return ret; } static int nl80211_color_change(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct cfg80211_color_change_settings params = {}; struct net_device *dev = info->user_ptr[1]; struct wireless_dev *wdev = dev->ieee80211_ptr; struct nlattr **tb; u16 offset; int err; if (!rdev->ops->color_change) return -EOPNOTSUPP; if (!wiphy_ext_feature_isset(&rdev->wiphy, NL80211_EXT_FEATURE_BSS_COLOR)) return -EOPNOTSUPP; if (wdev->iftype != NL80211_IFTYPE_AP) return -EOPNOTSUPP; if (!info->attrs[NL80211_ATTR_COLOR_CHANGE_COUNT] || !info->attrs[NL80211_ATTR_COLOR_CHANGE_COLOR] || !info->attrs[NL80211_ATTR_COLOR_CHANGE_ELEMS]) return -EINVAL; params.count = nla_get_u8(info->attrs[NL80211_ATTR_COLOR_CHANGE_COUNT]); params.color = nla_get_u8(info->attrs[NL80211_ATTR_COLOR_CHANGE_COLOR]); err = nl80211_parse_beacon(rdev, info->attrs, &params.beacon_next, info->extack); if (err) return err; tb = kcalloc(NL80211_ATTR_MAX + 1, sizeof(*tb), GFP_KERNEL); if (!tb) return -ENOMEM; err = nla_parse_nested(tb, NL80211_ATTR_MAX, info->attrs[NL80211_ATTR_COLOR_CHANGE_ELEMS], nl80211_policy, info->extack); if (err) goto out; err = nl80211_parse_beacon(rdev, tb, &params.beacon_color_change, info->extack); if (err) goto out; if (!tb[NL80211_ATTR_CNTDWN_OFFS_BEACON]) { err = -EINVAL; goto out; } if (nla_len(tb[NL80211_ATTR_CNTDWN_OFFS_BEACON]) != sizeof(u16)) { err = -EINVAL; goto out; } offset = nla_get_u16(tb[NL80211_ATTR_CNTDWN_OFFS_BEACON]); if (offset >= params.beacon_color_change.tail_len) { err = -EINVAL; goto out; } if (params.beacon_color_change.tail[offset] != params.count) { err = -EINVAL; goto out; } params.counter_offset_beacon = offset; if (tb[NL80211_ATTR_CNTDWN_OFFS_PRESP]) { if (nla_len(tb[NL80211_ATTR_CNTDWN_OFFS_PRESP]) != sizeof(u16)) { err = -EINVAL; goto out; } offset = nla_get_u16(tb[NL80211_ATTR_CNTDWN_OFFS_PRESP]); if (offset >= params.beacon_color_change.probe_resp_len) { err = -EINVAL; goto out; } if (params.beacon_color_change.probe_resp[offset] != params.count) { err = -EINVAL; goto out; } params.counter_offset_presp = offset; } wdev_lock(wdev); err = rdev_color_change(rdev, dev, &params); wdev_unlock(wdev); out: kfree(params.beacon_next.mbssid_ies); kfree(params.beacon_color_change.mbssid_ies); kfree(params.beacon_next.rnr_ies); kfree(params.beacon_color_change.rnr_ies); kfree(tb); return err; } static int nl80211_set_fils_aad(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct net_device *dev = info->user_ptr[1]; struct cfg80211_fils_aad fils_aad = {}; u8 *nonces; if (!info->attrs[NL80211_ATTR_MAC] || !info->attrs[NL80211_ATTR_FILS_KEK] || !info->attrs[NL80211_ATTR_FILS_NONCES]) return -EINVAL; fils_aad.macaddr = nla_data(info->attrs[NL80211_ATTR_MAC]); fils_aad.kek_len = nla_len(info->attrs[NL80211_ATTR_FILS_KEK]); fils_aad.kek = nla_data(info->attrs[NL80211_ATTR_FILS_KEK]); nonces = nla_data(info->attrs[NL80211_ATTR_FILS_NONCES]); fils_aad.snonce = nonces; fils_aad.anonce = nonces + FILS_NONCE_LEN; return rdev_set_fils_aad(rdev, dev, &fils_aad); } static int nl80211_add_link(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; unsigned int link_id = nl80211_link_id(info->attrs); struct net_device *dev = info->user_ptr[1]; struct wireless_dev *wdev = dev->ieee80211_ptr; int ret; if (!(wdev->wiphy->flags & WIPHY_FLAG_SUPPORTS_MLO)) return -EINVAL; switch (wdev->iftype) { case NL80211_IFTYPE_AP: break; default: return -EINVAL; } if (!info->attrs[NL80211_ATTR_MAC] || !is_valid_ether_addr(nla_data(info->attrs[NL80211_ATTR_MAC]))) return -EINVAL; wdev_lock(wdev); wdev->valid_links |= BIT(link_id); ether_addr_copy(wdev->links[link_id].addr, nla_data(info->attrs[NL80211_ATTR_MAC])); ret = rdev_add_intf_link(rdev, wdev, link_id); if (ret) { wdev->valid_links &= ~BIT(link_id); eth_zero_addr(wdev->links[link_id].addr); } wdev_unlock(wdev); return ret; } static int nl80211_remove_link(struct sk_buff *skb, struct genl_info *info) { unsigned int link_id = nl80211_link_id(info->attrs); struct net_device *dev = info->user_ptr[1]; struct wireless_dev *wdev = dev->ieee80211_ptr; /* cannot remove if there's no link */ if (!info->attrs[NL80211_ATTR_MLO_LINK_ID]) return -EINVAL; switch (wdev->iftype) { case NL80211_IFTYPE_AP: break; default: return -EINVAL; } wdev_lock(wdev); cfg80211_remove_link(wdev, link_id); wdev_unlock(wdev); return 0; } static int nl80211_add_mod_link_station(struct sk_buff *skb, struct genl_info *info, bool add) { struct link_station_parameters params = {}; struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct net_device *dev = info->user_ptr[1]; int err; if ((add && !rdev->ops->add_link_station) || (!add && !rdev->ops->mod_link_station)) return -EOPNOTSUPP; if (add && !info->attrs[NL80211_ATTR_MAC]) return -EINVAL; if (!info->attrs[NL80211_ATTR_MLD_ADDR]) return -EINVAL; if (add && !info->attrs[NL80211_ATTR_STA_SUPPORTED_RATES]) return -EINVAL; params.mld_mac = nla_data(info->attrs[NL80211_ATTR_MLD_ADDR]); if (info->attrs[NL80211_ATTR_MAC]) { params.link_mac = nla_data(info->attrs[NL80211_ATTR_MAC]); if (!is_valid_ether_addr(params.link_mac)) return -EINVAL; } if (!info->attrs[NL80211_ATTR_MLO_LINK_ID]) return -EINVAL; params.link_id = nla_get_u8(info->attrs[NL80211_ATTR_MLO_LINK_ID]); if (info->attrs[NL80211_ATTR_STA_SUPPORTED_RATES]) { params.supported_rates = nla_data(info->attrs[NL80211_ATTR_STA_SUPPORTED_RATES]); params.supported_rates_len = nla_len(info->attrs[NL80211_ATTR_STA_SUPPORTED_RATES]); } if (info->attrs[NL80211_ATTR_HT_CAPABILITY]) params.ht_capa = nla_data(info->attrs[NL80211_ATTR_HT_CAPABILITY]); if (info->attrs[NL80211_ATTR_VHT_CAPABILITY]) params.vht_capa = nla_data(info->attrs[NL80211_ATTR_VHT_CAPABILITY]); if (info->attrs[NL80211_ATTR_HE_CAPABILITY]) { params.he_capa = nla_data(info->attrs[NL80211_ATTR_HE_CAPABILITY]); params.he_capa_len = nla_len(info->attrs[NL80211_ATTR_HE_CAPABILITY]); if (info->attrs[NL80211_ATTR_EHT_CAPABILITY]) { params.eht_capa = nla_data(info->attrs[NL80211_ATTR_EHT_CAPABILITY]); params.eht_capa_len = nla_len(info->attrs[NL80211_ATTR_EHT_CAPABILITY]); if (!ieee80211_eht_capa_size_ok((const u8 *)params.he_capa, (const u8 *)params.eht_capa, params.eht_capa_len, false)) return -EINVAL; } } if (info->attrs[NL80211_ATTR_HE_6GHZ_CAPABILITY]) params.he_6ghz_capa = nla_data(info->attrs[NL80211_ATTR_HE_6GHZ_CAPABILITY]); if (info->attrs[NL80211_ATTR_OPMODE_NOTIF]) { params.opmode_notif_used = true; params.opmode_notif = nla_get_u8(info->attrs[NL80211_ATTR_OPMODE_NOTIF]); } err = nl80211_parse_sta_txpower_setting(info, &params.txpwr, &params.txpwr_set); if (err) return err; wdev_lock(dev->ieee80211_ptr); if (add) err = rdev_add_link_station(rdev, dev, &params); else err = rdev_mod_link_station(rdev, dev, &params); wdev_unlock(dev->ieee80211_ptr); return err; } static int nl80211_add_link_station(struct sk_buff *skb, struct genl_info *info) { return nl80211_add_mod_link_station(skb, info, true); } static int nl80211_modify_link_station(struct sk_buff *skb, struct genl_info *info) { return nl80211_add_mod_link_station(skb, info, false); } static int nl80211_remove_link_station(struct sk_buff *skb, struct genl_info *info) { struct link_station_del_parameters params = {}; struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct net_device *dev = info->user_ptr[1]; int ret; if (!rdev->ops->del_link_station) return -EOPNOTSUPP; if (!info->attrs[NL80211_ATTR_MLD_ADDR] || !info->attrs[NL80211_ATTR_MLO_LINK_ID]) return -EINVAL; params.mld_mac = nla_data(info->attrs[NL80211_ATTR_MLD_ADDR]); params.link_id = nla_get_u8(info->attrs[NL80211_ATTR_MLO_LINK_ID]); wdev_lock(dev->ieee80211_ptr); ret = rdev_del_link_station(rdev, dev, &params); wdev_unlock(dev->ieee80211_ptr); return ret; } static int nl80211_set_hw_timestamp(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct net_device *dev = info->user_ptr[1]; struct cfg80211_set_hw_timestamp hwts = {}; if (!rdev->wiphy.hw_timestamp_max_peers) return -EOPNOTSUPP; if (!info->attrs[NL80211_ATTR_MAC] && rdev->wiphy.hw_timestamp_max_peers != CFG80211_HW_TIMESTAMP_ALL_PEERS) return -EOPNOTSUPP; if (info->attrs[NL80211_ATTR_MAC]) hwts.macaddr = nla_data(info->attrs[NL80211_ATTR_MAC]); hwts.enable = nla_get_flag(info->attrs[NL80211_ATTR_HW_TIMESTAMP_ENABLED]); return rdev_set_hw_timestamp(rdev, dev, &hwts); } #define NL80211_FLAG_NEED_WIPHY 0x01 #define NL80211_FLAG_NEED_NETDEV 0x02 #define NL80211_FLAG_NEED_RTNL 0x04 #define NL80211_FLAG_CHECK_NETDEV_UP 0x08 #define NL80211_FLAG_NEED_NETDEV_UP (NL80211_FLAG_NEED_NETDEV |\ NL80211_FLAG_CHECK_NETDEV_UP) #define NL80211_FLAG_NEED_WDEV 0x10 /* If a netdev is associated, it must be UP, P2P must be started */ #define NL80211_FLAG_NEED_WDEV_UP (NL80211_FLAG_NEED_WDEV |\ NL80211_FLAG_CHECK_NETDEV_UP) #define NL80211_FLAG_CLEAR_SKB 0x20 #define NL80211_FLAG_NO_WIPHY_MTX 0x40 #define NL80211_FLAG_MLO_VALID_LINK_ID 0x80 #define NL80211_FLAG_MLO_UNSUPPORTED 0x100 #define INTERNAL_FLAG_SELECTORS(__sel) \ SELECTOR(__sel, NONE, 0) /* must be first */ \ SELECTOR(__sel, WIPHY, \ NL80211_FLAG_NEED_WIPHY) \ SELECTOR(__sel, WDEV, \ NL80211_FLAG_NEED_WDEV) \ SELECTOR(__sel, NETDEV, \ NL80211_FLAG_NEED_NETDEV) \ SELECTOR(__sel, NETDEV_LINK, \ NL80211_FLAG_NEED_NETDEV | \ NL80211_FLAG_MLO_VALID_LINK_ID) \ SELECTOR(__sel, NETDEV_NO_MLO, \ NL80211_FLAG_NEED_NETDEV | \ NL80211_FLAG_MLO_UNSUPPORTED) \ SELECTOR(__sel, WIPHY_RTNL, \ NL80211_FLAG_NEED_WIPHY | \ NL80211_FLAG_NEED_RTNL) \ SELECTOR(__sel, WIPHY_RTNL_NOMTX, \ NL80211_FLAG_NEED_WIPHY | \ NL80211_FLAG_NEED_RTNL | \ NL80211_FLAG_NO_WIPHY_MTX) \ SELECTOR(__sel, WDEV_RTNL, \ NL80211_FLAG_NEED_WDEV | \ NL80211_FLAG_NEED_RTNL) \ SELECTOR(__sel, NETDEV_RTNL, \ NL80211_FLAG_NEED_NETDEV | \ NL80211_FLAG_NEED_RTNL) \ SELECTOR(__sel, NETDEV_UP, \ NL80211_FLAG_NEED_NETDEV_UP) \ SELECTOR(__sel, NETDEV_UP_LINK, \ NL80211_FLAG_NEED_NETDEV_UP | \ NL80211_FLAG_MLO_VALID_LINK_ID) \ SELECTOR(__sel, NETDEV_UP_NO_MLO, \ NL80211_FLAG_NEED_NETDEV_UP | \ NL80211_FLAG_MLO_UNSUPPORTED) \ SELECTOR(__sel, NETDEV_UP_NO_MLO_CLEAR, \ NL80211_FLAG_NEED_NETDEV_UP | \ NL80211_FLAG_CLEAR_SKB | \ NL80211_FLAG_MLO_UNSUPPORTED) \ SELECTOR(__sel, NETDEV_UP_NOTMX, \ NL80211_FLAG_NEED_NETDEV_UP | \ NL80211_FLAG_NO_WIPHY_MTX) \ SELECTOR(__sel, NETDEV_UP_NOTMX_NOMLO, \ NL80211_FLAG_NEED_NETDEV_UP | \ NL80211_FLAG_NO_WIPHY_MTX | \ NL80211_FLAG_MLO_UNSUPPORTED) \ SELECTOR(__sel, NETDEV_UP_CLEAR, \ NL80211_FLAG_NEED_NETDEV_UP | \ NL80211_FLAG_CLEAR_SKB) \ SELECTOR(__sel, WDEV_UP, \ NL80211_FLAG_NEED_WDEV_UP) \ SELECTOR(__sel, WDEV_UP_LINK, \ NL80211_FLAG_NEED_WDEV_UP | \ NL80211_FLAG_MLO_VALID_LINK_ID) \ SELECTOR(__sel, WDEV_UP_RTNL, \ NL80211_FLAG_NEED_WDEV_UP | \ NL80211_FLAG_NEED_RTNL) \ SELECTOR(__sel, WIPHY_CLEAR, \ NL80211_FLAG_NEED_WIPHY | \ NL80211_FLAG_CLEAR_SKB) enum nl80211_internal_flags_selector { #define SELECTOR(_, name, value) NL80211_IFL_SEL_##name, INTERNAL_FLAG_SELECTORS(_) #undef SELECTOR }; static u32 nl80211_internal_flags[] = { #define SELECTOR(_, name, value) [NL80211_IFL_SEL_##name] = value, INTERNAL_FLAG_SELECTORS(_) #undef SELECTOR }; static int nl80211_pre_doit(const struct genl_split_ops *ops, struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = NULL; struct wireless_dev *wdev = NULL; struct net_device *dev = NULL; u32 internal_flags; int err; if (WARN_ON(ops->internal_flags >= ARRAY_SIZE(nl80211_internal_flags))) return -EINVAL; internal_flags = nl80211_internal_flags[ops->internal_flags]; rtnl_lock(); if (internal_flags & NL80211_FLAG_NEED_WIPHY) { rdev = cfg80211_get_dev_from_info(genl_info_net(info), info); if (IS_ERR(rdev)) { err = PTR_ERR(rdev); goto out_unlock; } info->user_ptr[0] = rdev; } else if (internal_flags & NL80211_FLAG_NEED_NETDEV || internal_flags & NL80211_FLAG_NEED_WDEV) { wdev = __cfg80211_wdev_from_attrs(NULL, genl_info_net(info), info->attrs); if (IS_ERR(wdev)) { err = PTR_ERR(wdev); goto out_unlock; } dev = wdev->netdev; dev_hold(dev); rdev = wiphy_to_rdev(wdev->wiphy); if (internal_flags & NL80211_FLAG_NEED_NETDEV) { if (!dev) { err = -EINVAL; goto out_unlock; } info->user_ptr[1] = dev; } else { info->user_ptr[1] = wdev; } if (internal_flags & NL80211_FLAG_CHECK_NETDEV_UP && !wdev_running(wdev)) { err = -ENETDOWN; goto out_unlock; } info->user_ptr[0] = rdev; } if (internal_flags & NL80211_FLAG_MLO_VALID_LINK_ID) { struct nlattr *link_id = info->attrs[NL80211_ATTR_MLO_LINK_ID]; if (!wdev) { err = -EINVAL; goto out_unlock; } /* MLO -> require valid link ID */ if (wdev->valid_links && (!link_id || !(wdev->valid_links & BIT(nla_get_u8(link_id))))) { err = -EINVAL; goto out_unlock; } /* non-MLO -> no link ID attribute accepted */ if (!wdev->valid_links && link_id) { err = -EINVAL; goto out_unlock; } } if (internal_flags & NL80211_FLAG_MLO_UNSUPPORTED) { if (info->attrs[NL80211_ATTR_MLO_LINK_ID] || (wdev && wdev->valid_links)) { err = -EINVAL; goto out_unlock; } } if (rdev && !(internal_flags & NL80211_FLAG_NO_WIPHY_MTX)) { wiphy_lock(&rdev->wiphy); /* we keep the mutex locked until post_doit */ __release(&rdev->wiphy.mtx); } if (!(internal_flags & NL80211_FLAG_NEED_RTNL)) rtnl_unlock(); return 0; out_unlock: rtnl_unlock(); dev_put(dev); return err; } static void nl80211_post_doit(const struct genl_split_ops *ops, struct sk_buff *skb, struct genl_info *info) { u32 internal_flags = nl80211_internal_flags[ops->internal_flags]; if (info->user_ptr[1]) { if (internal_flags & NL80211_FLAG_NEED_WDEV) { struct wireless_dev *wdev = info->user_ptr[1]; dev_put(wdev->netdev); } else { dev_put(info->user_ptr[1]); } } if (info->user_ptr[0] && !(internal_flags & NL80211_FLAG_NO_WIPHY_MTX)) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; /* we kept the mutex locked since pre_doit */ __acquire(&rdev->wiphy.mtx); wiphy_unlock(&rdev->wiphy); } if (internal_flags & NL80211_FLAG_NEED_RTNL) rtnl_unlock(); /* If needed, clear the netlink message payload from the SKB * as it might contain key data that shouldn't stick around on * the heap after the SKB is freed. The netlink message header * is still needed for further processing, so leave it intact. */ if (internal_flags & NL80211_FLAG_CLEAR_SKB) { struct nlmsghdr *nlh = nlmsg_hdr(skb); memset(nlmsg_data(nlh), 0, nlmsg_len(nlh)); } } static int nl80211_set_sar_sub_specs(struct cfg80211_registered_device *rdev, struct cfg80211_sar_specs *sar_specs, struct nlattr *spec[], int index) { u32 range_index, i; if (!sar_specs || !spec) return -EINVAL; if (!spec[NL80211_SAR_ATTR_SPECS_POWER] || !spec[NL80211_SAR_ATTR_SPECS_RANGE_INDEX]) return -EINVAL; range_index = nla_get_u32(spec[NL80211_SAR_ATTR_SPECS_RANGE_INDEX]); /* check if range_index exceeds num_freq_ranges */ if (range_index >= rdev->wiphy.sar_capa->num_freq_ranges) return -EINVAL; /* check if range_index duplicates */ for (i = 0; i < index; i++) { if (sar_specs->sub_specs[i].freq_range_index == range_index) return -EINVAL; } sar_specs->sub_specs[index].power = nla_get_s32(spec[NL80211_SAR_ATTR_SPECS_POWER]); sar_specs->sub_specs[index].freq_range_index = range_index; return 0; } static int nl80211_set_sar_specs(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct nlattr *spec[NL80211_SAR_ATTR_SPECS_MAX + 1]; struct nlattr *tb[NL80211_SAR_ATTR_MAX + 1]; struct cfg80211_sar_specs *sar_spec; enum nl80211_sar_type type; struct nlattr *spec_list; u32 specs; int rem, err; if (!rdev->wiphy.sar_capa || !rdev->ops->set_sar_specs) return -EOPNOTSUPP; if (!info->attrs[NL80211_ATTR_SAR_SPEC]) return -EINVAL; nla_parse_nested(tb, NL80211_SAR_ATTR_MAX, info->attrs[NL80211_ATTR_SAR_SPEC], NULL, NULL); if (!tb[NL80211_SAR_ATTR_TYPE] || !tb[NL80211_SAR_ATTR_SPECS]) return -EINVAL; type = nla_get_u32(tb[NL80211_SAR_ATTR_TYPE]); if (type != rdev->wiphy.sar_capa->type) return -EINVAL; specs = 0; nla_for_each_nested(spec_list, tb[NL80211_SAR_ATTR_SPECS], rem) specs++; if (specs > rdev->wiphy.sar_capa->num_freq_ranges) return -EINVAL; sar_spec = kzalloc(struct_size(sar_spec, sub_specs, specs), GFP_KERNEL); if (!sar_spec) return -ENOMEM; sar_spec->type = type; specs = 0; nla_for_each_nested(spec_list, tb[NL80211_SAR_ATTR_SPECS], rem) { nla_parse_nested(spec, NL80211_SAR_ATTR_SPECS_MAX, spec_list, NULL, NULL); switch (type) { case NL80211_SAR_TYPE_POWER: if (nl80211_set_sar_sub_specs(rdev, sar_spec, spec, specs)) { err = -EINVAL; goto error; } break; default: err = -EINVAL; goto error; } specs++; } sar_spec->num_sub_specs = specs; rdev->cur_cmd_info = info; err = rdev_set_sar_specs(rdev, sar_spec); rdev->cur_cmd_info = NULL; error: kfree(sar_spec); return err; } #define SELECTOR(__sel, name, value) \ ((__sel) == (value)) ? NL80211_IFL_SEL_##name : int __missing_selector(void); #define IFLAGS(__val) INTERNAL_FLAG_SELECTORS(__val) __missing_selector() static const struct genl_ops nl80211_ops[] = { { .cmd = NL80211_CMD_GET_WIPHY, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = nl80211_get_wiphy, .dumpit = nl80211_dump_wiphy, .done = nl80211_dump_wiphy_done, /* can be retrieved by unprivileged users */ .internal_flags = IFLAGS(NL80211_FLAG_NEED_WIPHY), }, }; static const struct genl_small_ops nl80211_small_ops[] = { { .cmd = NL80211_CMD_SET_WIPHY, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = nl80211_set_wiphy, .flags = GENL_UNS_ADMIN_PERM, }, { .cmd = NL80211_CMD_GET_INTERFACE, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = nl80211_get_interface, .dumpit = nl80211_dump_interface, /* can be retrieved by unprivileged users */ .internal_flags = IFLAGS(NL80211_FLAG_NEED_WDEV), }, { .cmd = NL80211_CMD_SET_INTERFACE, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = nl80211_set_interface, .flags = GENL_UNS_ADMIN_PERM, .internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV | NL80211_FLAG_NEED_RTNL), }, { .cmd = NL80211_CMD_NEW_INTERFACE, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = nl80211_new_interface, .flags = GENL_UNS_ADMIN_PERM, .internal_flags = IFLAGS(NL80211_FLAG_NEED_WIPHY | NL80211_FLAG_NEED_RTNL | /* we take the wiphy mutex later ourselves */ NL80211_FLAG_NO_WIPHY_MTX), }, { .cmd = NL80211_CMD_DEL_INTERFACE, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = nl80211_del_interface, .flags = GENL_UNS_ADMIN_PERM, .internal_flags = IFLAGS(NL80211_FLAG_NEED_WDEV | NL80211_FLAG_NEED_RTNL), }, { .cmd = NL80211_CMD_GET_KEY, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = nl80211_get_key, .flags = GENL_UNS_ADMIN_PERM, .internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV_UP), }, { .cmd = NL80211_CMD_SET_KEY, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = nl80211_set_key, .flags = GENL_UNS_ADMIN_PERM, /* cannot use NL80211_FLAG_MLO_VALID_LINK_ID, depends on key */ .internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_CLEAR_SKB), }, { .cmd = NL80211_CMD_NEW_KEY, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = nl80211_new_key, .flags = GENL_UNS_ADMIN_PERM, .internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_CLEAR_SKB), }, { .cmd = NL80211_CMD_DEL_KEY, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = nl80211_del_key, .flags = GENL_UNS_ADMIN_PERM, .internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV_UP), }, { .cmd = NL80211_CMD_SET_BEACON, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .flags = GENL_UNS_ADMIN_PERM, .doit = nl80211_set_beacon, .internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_MLO_VALID_LINK_ID), }, { .cmd = NL80211_CMD_START_AP, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .flags = GENL_UNS_ADMIN_PERM, .doit = nl80211_start_ap, .internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_MLO_VALID_LINK_ID), }, { .cmd = NL80211_CMD_STOP_AP, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .flags = GENL_UNS_ADMIN_PERM, .doit = nl80211_stop_ap, .internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_MLO_VALID_LINK_ID), }, { .cmd = NL80211_CMD_GET_STATION, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = nl80211_get_station, .dumpit = nl80211_dump_station, .internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV), }, { .cmd = NL80211_CMD_SET_STATION, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = nl80211_set_station, .flags = GENL_UNS_ADMIN_PERM, .internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV_UP), }, { .cmd = NL80211_CMD_NEW_STATION, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = nl80211_new_station, .flags = GENL_UNS_ADMIN_PERM, .internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV_UP), }, { .cmd = NL80211_CMD_DEL_STATION, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = nl80211_del_station, .flags = GENL_UNS_ADMIN_PERM, .internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV_UP), }, { .cmd = NL80211_CMD_GET_MPATH, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = nl80211_get_mpath, .dumpit = nl80211_dump_mpath, .flags = GENL_UNS_ADMIN_PERM, .internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV_UP), }, { .cmd = NL80211_CMD_GET_MPP, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = nl80211_get_mpp, .dumpit = nl80211_dump_mpp, .flags = GENL_UNS_ADMIN_PERM, .internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV_UP), }, { .cmd = NL80211_CMD_SET_MPATH, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = nl80211_set_mpath, .flags = GENL_UNS_ADMIN_PERM, .internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV_UP), }, { .cmd = NL80211_CMD_NEW_MPATH, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = nl80211_new_mpath, .flags = GENL_UNS_ADMIN_PERM, .internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV_UP), }, { .cmd = NL80211_CMD_DEL_MPATH, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = nl80211_del_mpath, .flags = GENL_UNS_ADMIN_PERM, .internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV_UP), }, { .cmd = NL80211_CMD_SET_BSS, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = nl80211_set_bss, .flags = GENL_UNS_ADMIN_PERM, .internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_MLO_VALID_LINK_ID), }, { .cmd = NL80211_CMD_GET_REG, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = nl80211_get_reg_do, .dumpit = nl80211_get_reg_dump, /* can be retrieved by unprivileged users */ }, #ifdef CONFIG_CFG80211_CRDA_SUPPORT { .cmd = NL80211_CMD_SET_REG, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = nl80211_set_reg, .flags = GENL_ADMIN_PERM, }, #endif { .cmd = NL80211_CMD_REQ_SET_REG, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = nl80211_req_set_reg, .flags = GENL_ADMIN_PERM, }, { .cmd = NL80211_CMD_RELOAD_REGDB, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = nl80211_reload_regdb, .flags = GENL_ADMIN_PERM, }, { .cmd = NL80211_CMD_GET_MESH_CONFIG, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = nl80211_get_mesh_config, /* can be retrieved by unprivileged users */ .internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV_UP), }, { .cmd = NL80211_CMD_SET_MESH_CONFIG, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = nl80211_update_mesh_config, .flags = GENL_UNS_ADMIN_PERM, .internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV_UP), }, { .cmd = NL80211_CMD_TRIGGER_SCAN, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = nl80211_trigger_scan, .flags = GENL_UNS_ADMIN_PERM, .internal_flags = IFLAGS(NL80211_FLAG_NEED_WDEV_UP), }, { .cmd = NL80211_CMD_ABORT_SCAN, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = nl80211_abort_scan, .flags = GENL_UNS_ADMIN_PERM, .internal_flags = IFLAGS(NL80211_FLAG_NEED_WDEV_UP), }, { .cmd = NL80211_CMD_GET_SCAN, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .dumpit = nl80211_dump_scan, }, { .cmd = NL80211_CMD_START_SCHED_SCAN, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = nl80211_start_sched_scan, .flags = GENL_UNS_ADMIN_PERM, .internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV_UP), }, { .cmd = NL80211_CMD_STOP_SCHED_SCAN, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = nl80211_stop_sched_scan, .flags = GENL_UNS_ADMIN_PERM, .internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV_UP), }, { .cmd = NL80211_CMD_AUTHENTICATE, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = nl80211_authenticate, .flags = GENL_UNS_ADMIN_PERM, .internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_CLEAR_SKB), }, { .cmd = NL80211_CMD_ASSOCIATE, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = nl80211_associate, .flags = GENL_UNS_ADMIN_PERM, .internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_CLEAR_SKB), }, { .cmd = NL80211_CMD_DEAUTHENTICATE, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = nl80211_deauthenticate, .flags = GENL_UNS_ADMIN_PERM, .internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV_UP), }, { .cmd = NL80211_CMD_DISASSOCIATE, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = nl80211_disassociate, .flags = GENL_UNS_ADMIN_PERM, .internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV_UP), }, { .cmd = NL80211_CMD_JOIN_IBSS, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = nl80211_join_ibss, .flags = GENL_UNS_ADMIN_PERM, .internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV_UP), }, { .cmd = NL80211_CMD_LEAVE_IBSS, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = nl80211_leave_ibss, .flags = GENL_UNS_ADMIN_PERM, .internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV_UP), }, #ifdef CONFIG_NL80211_TESTMODE { .cmd = NL80211_CMD_TESTMODE, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = nl80211_testmode_do, .dumpit = nl80211_testmode_dump, .flags = GENL_UNS_ADMIN_PERM, .internal_flags = IFLAGS(NL80211_FLAG_NEED_WIPHY), }, #endif { .cmd = NL80211_CMD_CONNECT, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = nl80211_connect, .flags = GENL_UNS_ADMIN_PERM, .internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_CLEAR_SKB), }, { .cmd = NL80211_CMD_UPDATE_CONNECT_PARAMS, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = nl80211_update_connect_params, .flags = GENL_ADMIN_PERM, .internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_CLEAR_SKB), }, { .cmd = NL80211_CMD_DISCONNECT, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = nl80211_disconnect, .flags = GENL_UNS_ADMIN_PERM, .internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV_UP), }, { .cmd = NL80211_CMD_SET_WIPHY_NETNS, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = nl80211_wiphy_netns, .flags = GENL_UNS_ADMIN_PERM, .internal_flags = IFLAGS(NL80211_FLAG_NEED_WIPHY | NL80211_FLAG_NEED_RTNL | NL80211_FLAG_NO_WIPHY_MTX), }, { .cmd = NL80211_CMD_GET_SURVEY, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .dumpit = nl80211_dump_survey, }, { .cmd = NL80211_CMD_SET_PMKSA, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = nl80211_setdel_pmksa, .flags = GENL_UNS_ADMIN_PERM, .internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_CLEAR_SKB), }, { .cmd = NL80211_CMD_DEL_PMKSA, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = nl80211_setdel_pmksa, .flags = GENL_UNS_ADMIN_PERM, .internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV_UP), }, { .cmd = NL80211_CMD_FLUSH_PMKSA, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = nl80211_flush_pmksa, .flags = GENL_UNS_ADMIN_PERM, .internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV_UP), }, { .cmd = NL80211_CMD_REMAIN_ON_CHANNEL, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = nl80211_remain_on_channel, .flags = GENL_UNS_ADMIN_PERM, /* FIXME: requiring a link ID here is probably not good */ .internal_flags = IFLAGS(NL80211_FLAG_NEED_WDEV_UP | NL80211_FLAG_MLO_VALID_LINK_ID), }, { .cmd = NL80211_CMD_CANCEL_REMAIN_ON_CHANNEL, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = nl80211_cancel_remain_on_channel, .flags = GENL_UNS_ADMIN_PERM, .internal_flags = IFLAGS(NL80211_FLAG_NEED_WDEV_UP), }, { .cmd = NL80211_CMD_SET_TX_BITRATE_MASK, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = nl80211_set_tx_bitrate_mask, .flags = GENL_UNS_ADMIN_PERM, .internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV | NL80211_FLAG_MLO_VALID_LINK_ID), }, { .cmd = NL80211_CMD_REGISTER_FRAME, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = nl80211_register_mgmt, .flags = GENL_UNS_ADMIN_PERM, .internal_flags = IFLAGS(NL80211_FLAG_NEED_WDEV), }, { .cmd = NL80211_CMD_FRAME, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = nl80211_tx_mgmt, .flags = GENL_UNS_ADMIN_PERM, .internal_flags = IFLAGS(NL80211_FLAG_NEED_WDEV_UP), }, { .cmd = NL80211_CMD_FRAME_WAIT_CANCEL, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = nl80211_tx_mgmt_cancel_wait, .flags = GENL_UNS_ADMIN_PERM, .internal_flags = IFLAGS(NL80211_FLAG_NEED_WDEV_UP), }, { .cmd = NL80211_CMD_SET_POWER_SAVE, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = nl80211_set_power_save, .flags = GENL_UNS_ADMIN_PERM, .internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV), }, { .cmd = NL80211_CMD_GET_POWER_SAVE, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = nl80211_get_power_save, /* can be retrieved by unprivileged users */ .internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV), }, { .cmd = NL80211_CMD_SET_CQM, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = nl80211_set_cqm, .flags = GENL_UNS_ADMIN_PERM, .internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV), }, { .cmd = NL80211_CMD_SET_CHANNEL, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = nl80211_set_channel, .flags = GENL_UNS_ADMIN_PERM, .internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV | NL80211_FLAG_MLO_VALID_LINK_ID), }, { .cmd = NL80211_CMD_JOIN_MESH, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = nl80211_join_mesh, .flags = GENL_UNS_ADMIN_PERM, .internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV_UP), }, { .cmd = NL80211_CMD_LEAVE_MESH, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = nl80211_leave_mesh, .flags = GENL_UNS_ADMIN_PERM, .internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV_UP), }, { .cmd = NL80211_CMD_JOIN_OCB, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = nl80211_join_ocb, .flags = GENL_UNS_ADMIN_PERM, .internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV_UP), }, { .cmd = NL80211_CMD_LEAVE_OCB, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = nl80211_leave_ocb, .flags = GENL_UNS_ADMIN_PERM, .internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV_UP), }, #ifdef CONFIG_PM { .cmd = NL80211_CMD_GET_WOWLAN, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = nl80211_get_wowlan, /* can be retrieved by unprivileged users */ .internal_flags = IFLAGS(NL80211_FLAG_NEED_WIPHY), }, { .cmd = NL80211_CMD_SET_WOWLAN, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = nl80211_set_wowlan, .flags = GENL_UNS_ADMIN_PERM, .internal_flags = IFLAGS(NL80211_FLAG_NEED_WIPHY), }, #endif { .cmd = NL80211_CMD_SET_REKEY_OFFLOAD, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = nl80211_set_rekey_data, .flags = GENL_UNS_ADMIN_PERM, .internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_CLEAR_SKB), }, { .cmd = NL80211_CMD_TDLS_MGMT, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = nl80211_tdls_mgmt, .flags = GENL_UNS_ADMIN_PERM, .internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_MLO_VALID_LINK_ID), }, { .cmd = NL80211_CMD_TDLS_OPER, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = nl80211_tdls_oper, .flags = GENL_UNS_ADMIN_PERM, .internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV_UP), }, { .cmd = NL80211_CMD_UNEXPECTED_FRAME, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = nl80211_register_unexpected_frame, .flags = GENL_UNS_ADMIN_PERM, .internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV), }, { .cmd = NL80211_CMD_PROBE_CLIENT, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = nl80211_probe_client, .flags = GENL_UNS_ADMIN_PERM, .internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV_UP), }, { .cmd = NL80211_CMD_REGISTER_BEACONS, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = nl80211_register_beacons, .flags = GENL_UNS_ADMIN_PERM, .internal_flags = IFLAGS(NL80211_FLAG_NEED_WIPHY), }, { .cmd = NL80211_CMD_SET_NOACK_MAP, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = nl80211_set_noack_map, .flags = GENL_UNS_ADMIN_PERM, .internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV), }, { .cmd = NL80211_CMD_START_P2P_DEVICE, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = nl80211_start_p2p_device, .flags = GENL_UNS_ADMIN_PERM, .internal_flags = IFLAGS(NL80211_FLAG_NEED_WDEV | NL80211_FLAG_NEED_RTNL), }, { .cmd = NL80211_CMD_STOP_P2P_DEVICE, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = nl80211_stop_p2p_device, .flags = GENL_UNS_ADMIN_PERM, .internal_flags = IFLAGS(NL80211_FLAG_NEED_WDEV_UP | NL80211_FLAG_NEED_RTNL), }, { .cmd = NL80211_CMD_START_NAN, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = nl80211_start_nan, .flags = GENL_ADMIN_PERM, .internal_flags = IFLAGS(NL80211_FLAG_NEED_WDEV | NL80211_FLAG_NEED_RTNL), }, { .cmd = NL80211_CMD_STOP_NAN, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = nl80211_stop_nan, .flags = GENL_ADMIN_PERM, .internal_flags = IFLAGS(NL80211_FLAG_NEED_WDEV_UP | NL80211_FLAG_NEED_RTNL), }, { .cmd = NL80211_CMD_ADD_NAN_FUNCTION, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = nl80211_nan_add_func, .flags = GENL_ADMIN_PERM, .internal_flags = IFLAGS(NL80211_FLAG_NEED_WDEV_UP), }, { .cmd = NL80211_CMD_DEL_NAN_FUNCTION, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = nl80211_nan_del_func, .flags = GENL_ADMIN_PERM, .internal_flags = IFLAGS(NL80211_FLAG_NEED_WDEV_UP), }, { .cmd = NL80211_CMD_CHANGE_NAN_CONFIG, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = nl80211_nan_change_config, .flags = GENL_ADMIN_PERM, .internal_flags = IFLAGS(NL80211_FLAG_NEED_WDEV_UP), }, { .cmd = NL80211_CMD_SET_MCAST_RATE, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = nl80211_set_mcast_rate, .flags = GENL_UNS_ADMIN_PERM, .internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV), }, { .cmd = NL80211_CMD_SET_MAC_ACL, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = nl80211_set_mac_acl, .flags = GENL_UNS_ADMIN_PERM, .internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV | NL80211_FLAG_MLO_UNSUPPORTED), }, { .cmd = NL80211_CMD_RADAR_DETECT, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = nl80211_start_radar_detection, .flags = GENL_UNS_ADMIN_PERM, .internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NO_WIPHY_MTX | NL80211_FLAG_MLO_UNSUPPORTED), }, { .cmd = NL80211_CMD_GET_PROTOCOL_FEATURES, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = nl80211_get_protocol_features, }, { .cmd = NL80211_CMD_UPDATE_FT_IES, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = nl80211_update_ft_ies, .flags = GENL_UNS_ADMIN_PERM, .internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV_UP), }, { .cmd = NL80211_CMD_CRIT_PROTOCOL_START, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = nl80211_crit_protocol_start, .flags = GENL_UNS_ADMIN_PERM, .internal_flags = IFLAGS(NL80211_FLAG_NEED_WDEV_UP), }, { .cmd = NL80211_CMD_CRIT_PROTOCOL_STOP, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = nl80211_crit_protocol_stop, .flags = GENL_UNS_ADMIN_PERM, .internal_flags = IFLAGS(NL80211_FLAG_NEED_WDEV_UP), }, { .cmd = NL80211_CMD_GET_COALESCE, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = nl80211_get_coalesce, .internal_flags = IFLAGS(NL80211_FLAG_NEED_WIPHY), }, { .cmd = NL80211_CMD_SET_COALESCE, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = nl80211_set_coalesce, .flags = GENL_UNS_ADMIN_PERM, .internal_flags = IFLAGS(NL80211_FLAG_NEED_WIPHY), }, { .cmd = NL80211_CMD_CHANNEL_SWITCH, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = nl80211_channel_switch, .flags = GENL_UNS_ADMIN_PERM, .internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_MLO_VALID_LINK_ID), }, { .cmd = NL80211_CMD_VENDOR, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = nl80211_vendor_cmd, .dumpit = nl80211_vendor_cmd_dump, .flags = GENL_UNS_ADMIN_PERM, .internal_flags = IFLAGS(NL80211_FLAG_NEED_WIPHY | NL80211_FLAG_CLEAR_SKB), }, { .cmd = NL80211_CMD_SET_QOS_MAP, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = nl80211_set_qos_map, .flags = GENL_UNS_ADMIN_PERM, .internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV_UP), }, { .cmd = NL80211_CMD_ADD_TX_TS, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = nl80211_add_tx_ts, .flags = GENL_UNS_ADMIN_PERM, .internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_MLO_UNSUPPORTED), }, { .cmd = NL80211_CMD_DEL_TX_TS, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = nl80211_del_tx_ts, .flags = GENL_UNS_ADMIN_PERM, .internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV_UP), }, { .cmd = NL80211_CMD_TDLS_CHANNEL_SWITCH, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = nl80211_tdls_channel_switch, .flags = GENL_UNS_ADMIN_PERM, .internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV_UP), }, { .cmd = NL80211_CMD_TDLS_CANCEL_CHANNEL_SWITCH, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = nl80211_tdls_cancel_channel_switch, .flags = GENL_UNS_ADMIN_PERM, .internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV_UP), }, { .cmd = NL80211_CMD_SET_MULTICAST_TO_UNICAST, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = nl80211_set_multicast_to_unicast, .flags = GENL_UNS_ADMIN_PERM, .internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV), }, { .cmd = NL80211_CMD_SET_PMK, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = nl80211_set_pmk, .internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_CLEAR_SKB), }, { .cmd = NL80211_CMD_DEL_PMK, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = nl80211_del_pmk, .internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV_UP), }, { .cmd = NL80211_CMD_EXTERNAL_AUTH, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = nl80211_external_auth, .flags = GENL_ADMIN_PERM, .internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV_UP), }, { .cmd = NL80211_CMD_CONTROL_PORT_FRAME, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = nl80211_tx_control_port, .flags = GENL_UNS_ADMIN_PERM, .internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV_UP), }, { .cmd = NL80211_CMD_GET_FTM_RESPONDER_STATS, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = nl80211_get_ftm_responder_stats, .internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV | NL80211_FLAG_MLO_VALID_LINK_ID), }, { .cmd = NL80211_CMD_PEER_MEASUREMENT_START, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = nl80211_pmsr_start, .flags = GENL_UNS_ADMIN_PERM, .internal_flags = IFLAGS(NL80211_FLAG_NEED_WDEV_UP), }, { .cmd = NL80211_CMD_NOTIFY_RADAR, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = nl80211_notify_radar_detection, .flags = GENL_UNS_ADMIN_PERM, .internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV_UP), }, { .cmd = NL80211_CMD_UPDATE_OWE_INFO, .doit = nl80211_update_owe_info, .flags = GENL_ADMIN_PERM, .internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV_UP), }, { .cmd = NL80211_CMD_PROBE_MESH_LINK, .doit = nl80211_probe_mesh_link, .flags = GENL_UNS_ADMIN_PERM, .internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV_UP), }, { .cmd = NL80211_CMD_SET_TID_CONFIG, .doit = nl80211_set_tid_config, .flags = GENL_UNS_ADMIN_PERM, .internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV | NL80211_FLAG_MLO_VALID_LINK_ID), }, { .cmd = NL80211_CMD_SET_SAR_SPECS, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = nl80211_set_sar_specs, .flags = GENL_UNS_ADMIN_PERM, .internal_flags = IFLAGS(NL80211_FLAG_NEED_WIPHY | NL80211_FLAG_NEED_RTNL), }, { .cmd = NL80211_CMD_COLOR_CHANGE_REQUEST, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = nl80211_color_change, .flags = GENL_UNS_ADMIN_PERM, .internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV_UP), }, { .cmd = NL80211_CMD_SET_FILS_AAD, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = nl80211_set_fils_aad, .flags = GENL_UNS_ADMIN_PERM, .internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV_UP), }, { .cmd = NL80211_CMD_ADD_LINK, .doit = nl80211_add_link, .flags = GENL_UNS_ADMIN_PERM, .internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV_UP), }, { .cmd = NL80211_CMD_REMOVE_LINK, .doit = nl80211_remove_link, .flags = GENL_UNS_ADMIN_PERM, .internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_MLO_VALID_LINK_ID), }, { .cmd = NL80211_CMD_ADD_LINK_STA, .doit = nl80211_add_link_station, .flags = GENL_UNS_ADMIN_PERM, .internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_MLO_VALID_LINK_ID), }, { .cmd = NL80211_CMD_MODIFY_LINK_STA, .doit = nl80211_modify_link_station, .flags = GENL_UNS_ADMIN_PERM, .internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_MLO_VALID_LINK_ID), }, { .cmd = NL80211_CMD_REMOVE_LINK_STA, .doit = nl80211_remove_link_station, .flags = GENL_UNS_ADMIN_PERM, .internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_MLO_VALID_LINK_ID), }, { .cmd = NL80211_CMD_SET_HW_TIMESTAMP, .doit = nl80211_set_hw_timestamp, .flags = GENL_UNS_ADMIN_PERM, .internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV_UP), }, }; static struct genl_family nl80211_fam __ro_after_init = { .name = NL80211_GENL_NAME, /* have users key off the name instead */ .hdrsize = 0, /* no private header */ .version = 1, /* no particular meaning now */ .maxattr = NL80211_ATTR_MAX, .policy = nl80211_policy, .netnsok = true, .pre_doit = nl80211_pre_doit, .post_doit = nl80211_post_doit, .module = THIS_MODULE, .ops = nl80211_ops, .n_ops = ARRAY_SIZE(nl80211_ops), .small_ops = nl80211_small_ops, .n_small_ops = ARRAY_SIZE(nl80211_small_ops), .resv_start_op = NL80211_CMD_REMOVE_LINK_STA + 1, .mcgrps = nl80211_mcgrps, .n_mcgrps = ARRAY_SIZE(nl80211_mcgrps), .parallel_ops = true, }; /* notification functions */ void nl80211_notify_wiphy(struct cfg80211_registered_device *rdev, enum nl80211_commands cmd) { struct sk_buff *msg; struct nl80211_dump_wiphy_state state = {}; WARN_ON(cmd != NL80211_CMD_NEW_WIPHY && cmd != NL80211_CMD_DEL_WIPHY); msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) return; if (nl80211_send_wiphy(rdev, cmd, msg, 0, 0, 0, &state) < 0) { nlmsg_free(msg); return; } genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0, NL80211_MCGRP_CONFIG, GFP_KERNEL); } void nl80211_notify_iface(struct cfg80211_registered_device *rdev, struct wireless_dev *wdev, enum nl80211_commands cmd) { struct sk_buff *msg; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) return; if (nl80211_send_iface(msg, 0, 0, 0, rdev, wdev, cmd) < 0) { nlmsg_free(msg); return; } genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0, NL80211_MCGRP_CONFIG, GFP_KERNEL); } static int nl80211_add_scan_req(struct sk_buff *msg, struct cfg80211_registered_device *rdev) { struct cfg80211_scan_request *req = rdev->scan_req; struct nlattr *nest; int i; struct cfg80211_scan_info *info; if (WARN_ON(!req)) return 0; nest = nla_nest_start_noflag(msg, NL80211_ATTR_SCAN_SSIDS); if (!nest) goto nla_put_failure; for (i = 0; i < req->n_ssids; i++) { if (nla_put(msg, i, req->ssids[i].ssid_len, req->ssids[i].ssid)) goto nla_put_failure; } nla_nest_end(msg, nest); if (req->flags & NL80211_SCAN_FLAG_FREQ_KHZ) { nest = nla_nest_start(msg, NL80211_ATTR_SCAN_FREQ_KHZ); if (!nest) goto nla_put_failure; for (i = 0; i < req->n_channels; i++) { if (nla_put_u32(msg, i, ieee80211_channel_to_khz(req->channels[i]))) goto nla_put_failure; } nla_nest_end(msg, nest); } else { nest = nla_nest_start_noflag(msg, NL80211_ATTR_SCAN_FREQUENCIES); if (!nest) goto nla_put_failure; for (i = 0; i < req->n_channels; i++) { if (nla_put_u32(msg, i, req->channels[i]->center_freq)) goto nla_put_failure; } nla_nest_end(msg, nest); } if (req->ie && nla_put(msg, NL80211_ATTR_IE, req->ie_len, req->ie)) goto nla_put_failure; if (req->flags && nla_put_u32(msg, NL80211_ATTR_SCAN_FLAGS, req->flags)) goto nla_put_failure; info = rdev->int_scan_req ? &rdev->int_scan_req->info : &rdev->scan_req->info; if (info->scan_start_tsf && (nla_put_u64_64bit(msg, NL80211_ATTR_SCAN_START_TIME_TSF, info->scan_start_tsf, NL80211_BSS_PAD) || nla_put(msg, NL80211_ATTR_SCAN_START_TIME_TSF_BSSID, ETH_ALEN, info->tsf_bssid))) goto nla_put_failure; return 0; nla_put_failure: return -ENOBUFS; } static int nl80211_prep_scan_msg(struct sk_buff *msg, struct cfg80211_registered_device *rdev, struct wireless_dev *wdev, u32 portid, u32 seq, int flags, u32 cmd) { void *hdr; hdr = nl80211hdr_put(msg, portid, seq, flags, cmd); if (!hdr) return -1; if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || (wdev->netdev && nla_put_u32(msg, NL80211_ATTR_IFINDEX, wdev->netdev->ifindex)) || nla_put_u64_64bit(msg, NL80211_ATTR_WDEV, wdev_id(wdev), NL80211_ATTR_PAD)) goto nla_put_failure; /* ignore errors and send incomplete event anyway */ nl80211_add_scan_req(msg, rdev); genlmsg_end(msg, hdr); return 0; nla_put_failure: genlmsg_cancel(msg, hdr); return -EMSGSIZE; } static int nl80211_prep_sched_scan_msg(struct sk_buff *msg, struct cfg80211_sched_scan_request *req, u32 cmd) { void *hdr; hdr = nl80211hdr_put(msg, 0, 0, 0, cmd); if (!hdr) return -1; if (nla_put_u32(msg, NL80211_ATTR_WIPHY, wiphy_to_rdev(req->wiphy)->wiphy_idx) || nla_put_u32(msg, NL80211_ATTR_IFINDEX, req->dev->ifindex) || nla_put_u64_64bit(msg, NL80211_ATTR_COOKIE, req->reqid, NL80211_ATTR_PAD)) goto nla_put_failure; genlmsg_end(msg, hdr); return 0; nla_put_failure: genlmsg_cancel(msg, hdr); return -EMSGSIZE; } void nl80211_send_scan_start(struct cfg80211_registered_device *rdev, struct wireless_dev *wdev) { struct sk_buff *msg; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) return; if (nl80211_prep_scan_msg(msg, rdev, wdev, 0, 0, 0, NL80211_CMD_TRIGGER_SCAN) < 0) { nlmsg_free(msg); return; } genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0, NL80211_MCGRP_SCAN, GFP_KERNEL); } struct sk_buff *nl80211_build_scan_msg(struct cfg80211_registered_device *rdev, struct wireless_dev *wdev, bool aborted) { struct sk_buff *msg; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) return NULL; if (nl80211_prep_scan_msg(msg, rdev, wdev, 0, 0, 0, aborted ? NL80211_CMD_SCAN_ABORTED : NL80211_CMD_NEW_SCAN_RESULTS) < 0) { nlmsg_free(msg); return NULL; } return msg; } /* send message created by nl80211_build_scan_msg() */ void nl80211_send_scan_msg(struct cfg80211_registered_device *rdev, struct sk_buff *msg) { if (!msg) return; genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0, NL80211_MCGRP_SCAN, GFP_KERNEL); } void nl80211_send_sched_scan(struct cfg80211_sched_scan_request *req, u32 cmd) { struct sk_buff *msg; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) return; if (nl80211_prep_sched_scan_msg(msg, req, cmd) < 0) { nlmsg_free(msg); return; } genlmsg_multicast_netns(&nl80211_fam, wiphy_net(req->wiphy), msg, 0, NL80211_MCGRP_SCAN, GFP_KERNEL); } static bool nl80211_reg_change_event_fill(struct sk_buff *msg, struct regulatory_request *request) { /* Userspace can always count this one always being set */ if (nla_put_u8(msg, NL80211_ATTR_REG_INITIATOR, request->initiator)) goto nla_put_failure; if (request->alpha2[0] == '0' && request->alpha2[1] == '0') { if (nla_put_u8(msg, NL80211_ATTR_REG_TYPE, NL80211_REGDOM_TYPE_WORLD)) goto nla_put_failure; } else if (request->alpha2[0] == '9' && request->alpha2[1] == '9') { if (nla_put_u8(msg, NL80211_ATTR_REG_TYPE, NL80211_REGDOM_TYPE_CUSTOM_WORLD)) goto nla_put_failure; } else if ((request->alpha2[0] == '9' && request->alpha2[1] == '8') || request->intersect) { if (nla_put_u8(msg, NL80211_ATTR_REG_TYPE, NL80211_REGDOM_TYPE_INTERSECTION)) goto nla_put_failure; } else { if (nla_put_u8(msg, NL80211_ATTR_REG_TYPE, NL80211_REGDOM_TYPE_COUNTRY) || nla_put_string(msg, NL80211_ATTR_REG_ALPHA2, request->alpha2)) goto nla_put_failure; } if (request->wiphy_idx != WIPHY_IDX_INVALID) { struct wiphy *wiphy = wiphy_idx_to_wiphy(request->wiphy_idx); if (wiphy && nla_put_u32(msg, NL80211_ATTR_WIPHY, request->wiphy_idx)) goto nla_put_failure; if (wiphy && wiphy->regulatory_flags & REGULATORY_WIPHY_SELF_MANAGED && nla_put_flag(msg, NL80211_ATTR_WIPHY_SELF_MANAGED_REG)) goto nla_put_failure; } return true; nla_put_failure: return false; } /* * This can happen on global regulatory changes or device specific settings * based on custom regulatory domains. */ void nl80211_common_reg_change_event(enum nl80211_commands cmd_id, struct regulatory_request *request) { struct sk_buff *msg; void *hdr; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) return; hdr = nl80211hdr_put(msg, 0, 0, 0, cmd_id); if (!hdr) goto nla_put_failure; if (!nl80211_reg_change_event_fill(msg, request)) goto nla_put_failure; genlmsg_end(msg, hdr); rcu_read_lock(); genlmsg_multicast_allns(&nl80211_fam, msg, 0, NL80211_MCGRP_REGULATORY, GFP_ATOMIC); rcu_read_unlock(); return; nla_put_failure: nlmsg_free(msg); } static void nl80211_send_mlme_event(struct cfg80211_registered_device *rdev, struct net_device *netdev, const u8 *buf, size_t len, enum nl80211_commands cmd, gfp_t gfp, int uapsd_queues, const u8 *req_ies, size_t req_ies_len, bool reconnect) { struct sk_buff *msg; void *hdr; msg = nlmsg_new(100 + len + req_ies_len, gfp); if (!msg) return; hdr = nl80211hdr_put(msg, 0, 0, 0, cmd); if (!hdr) { nlmsg_free(msg); return; } if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) || nla_put(msg, NL80211_ATTR_FRAME, len, buf) || (req_ies && nla_put(msg, NL80211_ATTR_REQ_IE, req_ies_len, req_ies))) goto nla_put_failure; if (reconnect && nla_put_flag(msg, NL80211_ATTR_RECONNECT_REQUESTED)) goto nla_put_failure; if (uapsd_queues >= 0) { struct nlattr *nla_wmm = nla_nest_start_noflag(msg, NL80211_ATTR_STA_WME); if (!nla_wmm) goto nla_put_failure; if (nla_put_u8(msg, NL80211_STA_WME_UAPSD_QUEUES, uapsd_queues)) goto nla_put_failure; nla_nest_end(msg, nla_wmm); } genlmsg_end(msg, hdr); genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0, NL80211_MCGRP_MLME, gfp); return; nla_put_failure: nlmsg_free(msg); } void nl80211_send_rx_auth(struct cfg80211_registered_device *rdev, struct net_device *netdev, const u8 *buf, size_t len, gfp_t gfp) { nl80211_send_mlme_event(rdev, netdev, buf, len, NL80211_CMD_AUTHENTICATE, gfp, -1, NULL, 0, false); } void nl80211_send_rx_assoc(struct cfg80211_registered_device *rdev, struct net_device *netdev, struct cfg80211_rx_assoc_resp *data) { nl80211_send_mlme_event(rdev, netdev, data->buf, data->len, NL80211_CMD_ASSOCIATE, GFP_KERNEL, data->uapsd_queues, data->req_ies, data->req_ies_len, false); } void nl80211_send_deauth(struct cfg80211_registered_device *rdev, struct net_device *netdev, const u8 *buf, size_t len, bool reconnect, gfp_t gfp) { nl80211_send_mlme_event(rdev, netdev, buf, len, NL80211_CMD_DEAUTHENTICATE, gfp, -1, NULL, 0, reconnect); } void nl80211_send_disassoc(struct cfg80211_registered_device *rdev, struct net_device *netdev, const u8 *buf, size_t len, bool reconnect, gfp_t gfp) { nl80211_send_mlme_event(rdev, netdev, buf, len, NL80211_CMD_DISASSOCIATE, gfp, -1, NULL, 0, reconnect); } void cfg80211_rx_unprot_mlme_mgmt(struct net_device *dev, const u8 *buf, size_t len) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct wiphy *wiphy = wdev->wiphy; struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy); const struct ieee80211_mgmt *mgmt = (void *)buf; u32 cmd; if (WARN_ON(len < 2)) return; if (ieee80211_is_deauth(mgmt->frame_control)) { cmd = NL80211_CMD_UNPROT_DEAUTHENTICATE; } else if (ieee80211_is_disassoc(mgmt->frame_control)) { cmd = NL80211_CMD_UNPROT_DISASSOCIATE; } else if (ieee80211_is_beacon(mgmt->frame_control)) { if (wdev->unprot_beacon_reported && elapsed_jiffies_msecs(wdev->unprot_beacon_reported) < 10000) return; cmd = NL80211_CMD_UNPROT_BEACON; wdev->unprot_beacon_reported = jiffies; } else { return; } trace_cfg80211_rx_unprot_mlme_mgmt(dev, buf, len); nl80211_send_mlme_event(rdev, dev, buf, len, cmd, GFP_ATOMIC, -1, NULL, 0, false); } EXPORT_SYMBOL(cfg80211_rx_unprot_mlme_mgmt); static void nl80211_send_mlme_timeout(struct cfg80211_registered_device *rdev, struct net_device *netdev, int cmd, const u8 *addr, gfp_t gfp) { struct sk_buff *msg; void *hdr; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp); if (!msg) return; hdr = nl80211hdr_put(msg, 0, 0, 0, cmd); if (!hdr) { nlmsg_free(msg); return; } if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) || nla_put_flag(msg, NL80211_ATTR_TIMED_OUT) || nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, addr)) goto nla_put_failure; genlmsg_end(msg, hdr); genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0, NL80211_MCGRP_MLME, gfp); return; nla_put_failure: nlmsg_free(msg); } void nl80211_send_auth_timeout(struct cfg80211_registered_device *rdev, struct net_device *netdev, const u8 *addr, gfp_t gfp) { nl80211_send_mlme_timeout(rdev, netdev, NL80211_CMD_AUTHENTICATE, addr, gfp); } void nl80211_send_assoc_timeout(struct cfg80211_registered_device *rdev, struct net_device *netdev, const u8 *addr, gfp_t gfp) { nl80211_send_mlme_timeout(rdev, netdev, NL80211_CMD_ASSOCIATE, addr, gfp); } void nl80211_send_connect_result(struct cfg80211_registered_device *rdev, struct net_device *netdev, struct cfg80211_connect_resp_params *cr, gfp_t gfp) { struct sk_buff *msg; void *hdr; unsigned int link; size_t link_info_size = 0; const u8 *connected_addr = cr->valid_links ? cr->ap_mld_addr : cr->links[0].bssid; if (cr->valid_links) { for_each_valid_link(cr, link) { /* Nested attribute header */ link_info_size += NLA_HDRLEN; /* Link ID */ link_info_size += nla_total_size(sizeof(u8)); link_info_size += cr->links[link].addr ? nla_total_size(ETH_ALEN) : 0; link_info_size += (cr->links[link].bssid || cr->links[link].bss) ? nla_total_size(ETH_ALEN) : 0; link_info_size += nla_total_size(sizeof(u16)); } } msg = nlmsg_new(100 + cr->req_ie_len + cr->resp_ie_len + cr->fils.kek_len + cr->fils.pmk_len + (cr->fils.pmkid ? WLAN_PMKID_LEN : 0) + link_info_size, gfp); if (!msg) return; hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_CONNECT); if (!hdr) { nlmsg_free(msg); return; } if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) || (connected_addr && nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, connected_addr)) || nla_put_u16(msg, NL80211_ATTR_STATUS_CODE, cr->status < 0 ? WLAN_STATUS_UNSPECIFIED_FAILURE : cr->status) || (cr->status < 0 && (nla_put_flag(msg, NL80211_ATTR_TIMED_OUT) || nla_put_u32(msg, NL80211_ATTR_TIMEOUT_REASON, cr->timeout_reason))) || (cr->req_ie && nla_put(msg, NL80211_ATTR_REQ_IE, cr->req_ie_len, cr->req_ie)) || (cr->resp_ie && nla_put(msg, NL80211_ATTR_RESP_IE, cr->resp_ie_len, cr->resp_ie)) || (cr->fils.update_erp_next_seq_num && nla_put_u16(msg, NL80211_ATTR_FILS_ERP_NEXT_SEQ_NUM, cr->fils.erp_next_seq_num)) || (cr->status == WLAN_STATUS_SUCCESS && ((cr->fils.kek && nla_put(msg, NL80211_ATTR_FILS_KEK, cr->fils.kek_len, cr->fils.kek)) || (cr->fils.pmk && nla_put(msg, NL80211_ATTR_PMK, cr->fils.pmk_len, cr->fils.pmk)) || (cr->fils.pmkid && nla_put(msg, NL80211_ATTR_PMKID, WLAN_PMKID_LEN, cr->fils.pmkid))))) goto nla_put_failure; if (cr->valid_links) { int i = 1; struct nlattr *nested; nested = nla_nest_start(msg, NL80211_ATTR_MLO_LINKS); if (!nested) goto nla_put_failure; for_each_valid_link(cr, link) { struct nlattr *nested_mlo_links; const u8 *bssid = cr->links[link].bss ? cr->links[link].bss->bssid : cr->links[link].bssid; nested_mlo_links = nla_nest_start(msg, i); if (!nested_mlo_links) goto nla_put_failure; if (nla_put_u8(msg, NL80211_ATTR_MLO_LINK_ID, link) || (bssid && nla_put(msg, NL80211_ATTR_BSSID, ETH_ALEN, bssid)) || (cr->links[link].addr && nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, cr->links[link].addr)) || nla_put_u16(msg, NL80211_ATTR_STATUS_CODE, cr->links[link].status)) goto nla_put_failure; nla_nest_end(msg, nested_mlo_links); i++; } nla_nest_end(msg, nested); } genlmsg_end(msg, hdr); genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0, NL80211_MCGRP_MLME, gfp); return; nla_put_failure: nlmsg_free(msg); } void nl80211_send_roamed(struct cfg80211_registered_device *rdev, struct net_device *netdev, struct cfg80211_roam_info *info, gfp_t gfp) { struct sk_buff *msg; void *hdr; size_t link_info_size = 0; unsigned int link; const u8 *connected_addr = info->ap_mld_addr ? info->ap_mld_addr : (info->links[0].bss ? info->links[0].bss->bssid : info->links[0].bssid); if (info->valid_links) { for_each_valid_link(info, link) { /* Nested attribute header */ link_info_size += NLA_HDRLEN; /* Link ID */ link_info_size += nla_total_size(sizeof(u8)); link_info_size += info->links[link].addr ? nla_total_size(ETH_ALEN) : 0; link_info_size += (info->links[link].bssid || info->links[link].bss) ? nla_total_size(ETH_ALEN) : 0; } } msg = nlmsg_new(100 + info->req_ie_len + info->resp_ie_len + info->fils.kek_len + info->fils.pmk_len + (info->fils.pmkid ? WLAN_PMKID_LEN : 0) + link_info_size, gfp); if (!msg) return; hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_ROAM); if (!hdr) { nlmsg_free(msg); return; } if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) || nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, connected_addr) || (info->req_ie && nla_put(msg, NL80211_ATTR_REQ_IE, info->req_ie_len, info->req_ie)) || (info->resp_ie && nla_put(msg, NL80211_ATTR_RESP_IE, info->resp_ie_len, info->resp_ie)) || (info->fils.update_erp_next_seq_num && nla_put_u16(msg, NL80211_ATTR_FILS_ERP_NEXT_SEQ_NUM, info->fils.erp_next_seq_num)) || (info->fils.kek && nla_put(msg, NL80211_ATTR_FILS_KEK, info->fils.kek_len, info->fils.kek)) || (info->fils.pmk && nla_put(msg, NL80211_ATTR_PMK, info->fils.pmk_len, info->fils.pmk)) || (info->fils.pmkid && nla_put(msg, NL80211_ATTR_PMKID, WLAN_PMKID_LEN, info->fils.pmkid))) goto nla_put_failure; if (info->valid_links) { int i = 1; struct nlattr *nested; nested = nla_nest_start(msg, NL80211_ATTR_MLO_LINKS); if (!nested) goto nla_put_failure; for_each_valid_link(info, link) { struct nlattr *nested_mlo_links; const u8 *bssid = info->links[link].bss ? info->links[link].bss->bssid : info->links[link].bssid; nested_mlo_links = nla_nest_start(msg, i); if (!nested_mlo_links) goto nla_put_failure; if (nla_put_u8(msg, NL80211_ATTR_MLO_LINK_ID, link) || (bssid && nla_put(msg, NL80211_ATTR_BSSID, ETH_ALEN, bssid)) || (info->links[link].addr && nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, info->links[link].addr))) goto nla_put_failure; nla_nest_end(msg, nested_mlo_links); i++; } nla_nest_end(msg, nested); } genlmsg_end(msg, hdr); genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0, NL80211_MCGRP_MLME, gfp); return; nla_put_failure: nlmsg_free(msg); } void nl80211_send_port_authorized(struct cfg80211_registered_device *rdev, struct net_device *netdev, const u8 *bssid, const u8 *td_bitmap, u8 td_bitmap_len) { struct sk_buff *msg; void *hdr; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) return; hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_PORT_AUTHORIZED); if (!hdr) { nlmsg_free(msg); return; } if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) || nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, bssid)) goto nla_put_failure; if ((td_bitmap_len > 0) && td_bitmap) if (nla_put(msg, NL80211_ATTR_TD_BITMAP, td_bitmap_len, td_bitmap)) goto nla_put_failure; genlmsg_end(msg, hdr); genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0, NL80211_MCGRP_MLME, GFP_KERNEL); return; nla_put_failure: nlmsg_free(msg); } void nl80211_send_disconnected(struct cfg80211_registered_device *rdev, struct net_device *netdev, u16 reason, const u8 *ie, size_t ie_len, bool from_ap) { struct sk_buff *msg; void *hdr; msg = nlmsg_new(100 + ie_len, GFP_KERNEL); if (!msg) return; hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_DISCONNECT); if (!hdr) { nlmsg_free(msg); return; } if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) || (reason && nla_put_u16(msg, NL80211_ATTR_REASON_CODE, reason)) || (from_ap && nla_put_flag(msg, NL80211_ATTR_DISCONNECTED_BY_AP)) || (ie && nla_put(msg, NL80211_ATTR_IE, ie_len, ie))) goto nla_put_failure; genlmsg_end(msg, hdr); genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0, NL80211_MCGRP_MLME, GFP_KERNEL); return; nla_put_failure: nlmsg_free(msg); } void cfg80211_links_removed(struct net_device *dev, u16 link_mask) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct wiphy *wiphy = wdev->wiphy; struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy); struct sk_buff *msg; struct nlattr *links; void *hdr; ASSERT_WDEV_LOCK(wdev); trace_cfg80211_links_removed(dev, link_mask); if (WARN_ON(wdev->iftype != NL80211_IFTYPE_STATION && wdev->iftype != NL80211_IFTYPE_P2P_CLIENT)) return; if (WARN_ON(!wdev->valid_links || !link_mask || (wdev->valid_links & link_mask) != link_mask || wdev->valid_links == link_mask)) return; cfg80211_wdev_release_link_bsses(wdev, link_mask); wdev->valid_links &= ~link_mask; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) return; hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_LINKS_REMOVED); if (!hdr) { nlmsg_free(msg); return; } if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex)) goto nla_put_failure; links = nla_nest_start(msg, NL80211_ATTR_MLO_LINKS); if (!links) goto nla_put_failure; while (link_mask) { struct nlattr *link; int link_id = __ffs(link_mask); link = nla_nest_start(msg, link_id + 1); if (!link) goto nla_put_failure; if (nla_put_u8(msg, NL80211_ATTR_MLO_LINK_ID, link_id)) goto nla_put_failure; nla_nest_end(msg, link); link_mask &= ~(1 << link_id); } nla_nest_end(msg, links); genlmsg_end(msg, hdr); genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0, NL80211_MCGRP_MLME, GFP_KERNEL); return; nla_put_failure: nlmsg_free(msg); } EXPORT_SYMBOL(cfg80211_links_removed); void nl80211_send_ibss_bssid(struct cfg80211_registered_device *rdev, struct net_device *netdev, const u8 *bssid, gfp_t gfp) { struct sk_buff *msg; void *hdr; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp); if (!msg) return; hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_JOIN_IBSS); if (!hdr) { nlmsg_free(msg); return; } if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) || nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, bssid)) goto nla_put_failure; genlmsg_end(msg, hdr); genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0, NL80211_MCGRP_MLME, gfp); return; nla_put_failure: nlmsg_free(msg); } void cfg80211_notify_new_peer_candidate(struct net_device *dev, const u8 *addr, const u8 *ie, u8 ie_len, int sig_dbm, gfp_t gfp) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); struct sk_buff *msg; void *hdr; if (WARN_ON(wdev->iftype != NL80211_IFTYPE_MESH_POINT)) return; trace_cfg80211_notify_new_peer_candidate(dev, addr); msg = nlmsg_new(100 + ie_len, gfp); if (!msg) return; hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_NEW_PEER_CANDIDATE); if (!hdr) { nlmsg_free(msg); return; } if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex) || nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, addr) || (ie_len && ie && nla_put(msg, NL80211_ATTR_IE, ie_len, ie)) || (sig_dbm && nla_put_u32(msg, NL80211_ATTR_RX_SIGNAL_DBM, sig_dbm))) goto nla_put_failure; genlmsg_end(msg, hdr); genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0, NL80211_MCGRP_MLME, gfp); return; nla_put_failure: nlmsg_free(msg); } EXPORT_SYMBOL(cfg80211_notify_new_peer_candidate); void nl80211_michael_mic_failure(struct cfg80211_registered_device *rdev, struct net_device *netdev, const u8 *addr, enum nl80211_key_type key_type, int key_id, const u8 *tsc, gfp_t gfp) { struct sk_buff *msg; void *hdr; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp); if (!msg) return; hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_MICHAEL_MIC_FAILURE); if (!hdr) { nlmsg_free(msg); return; } if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) || (addr && nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, addr)) || nla_put_u32(msg, NL80211_ATTR_KEY_TYPE, key_type) || (key_id != -1 && nla_put_u8(msg, NL80211_ATTR_KEY_IDX, key_id)) || (tsc && nla_put(msg, NL80211_ATTR_KEY_SEQ, 6, tsc))) goto nla_put_failure; genlmsg_end(msg, hdr); genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0, NL80211_MCGRP_MLME, gfp); return; nla_put_failure: nlmsg_free(msg); } void nl80211_send_beacon_hint_event(struct wiphy *wiphy, struct ieee80211_channel *channel_before, struct ieee80211_channel *channel_after) { struct sk_buff *msg; void *hdr; struct nlattr *nl_freq; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC); if (!msg) return; hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_REG_BEACON_HINT); if (!hdr) { nlmsg_free(msg); return; } /* * Since we are applying the beacon hint to a wiphy we know its * wiphy_idx is valid */ if (nla_put_u32(msg, NL80211_ATTR_WIPHY, get_wiphy_idx(wiphy))) goto nla_put_failure; /* Before */ nl_freq = nla_nest_start_noflag(msg, NL80211_ATTR_FREQ_BEFORE); if (!nl_freq) goto nla_put_failure; if (nl80211_msg_put_channel(msg, wiphy, channel_before, false)) goto nla_put_failure; nla_nest_end(msg, nl_freq); /* After */ nl_freq = nla_nest_start_noflag(msg, NL80211_ATTR_FREQ_AFTER); if (!nl_freq) goto nla_put_failure; if (nl80211_msg_put_channel(msg, wiphy, channel_after, false)) goto nla_put_failure; nla_nest_end(msg, nl_freq); genlmsg_end(msg, hdr); rcu_read_lock(); genlmsg_multicast_allns(&nl80211_fam, msg, 0, NL80211_MCGRP_REGULATORY, GFP_ATOMIC); rcu_read_unlock(); return; nla_put_failure: nlmsg_free(msg); } static void nl80211_send_remain_on_chan_event( int cmd, struct cfg80211_registered_device *rdev, struct wireless_dev *wdev, u64 cookie, struct ieee80211_channel *chan, unsigned int duration, gfp_t gfp) { struct sk_buff *msg; void *hdr; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp); if (!msg) return; hdr = nl80211hdr_put(msg, 0, 0, 0, cmd); if (!hdr) { nlmsg_free(msg); return; } if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || (wdev->netdev && nla_put_u32(msg, NL80211_ATTR_IFINDEX, wdev->netdev->ifindex)) || nla_put_u64_64bit(msg, NL80211_ATTR_WDEV, wdev_id(wdev), NL80211_ATTR_PAD) || nla_put_u32(msg, NL80211_ATTR_WIPHY_FREQ, chan->center_freq) || nla_put_u32(msg, NL80211_ATTR_WIPHY_CHANNEL_TYPE, NL80211_CHAN_NO_HT) || nla_put_u64_64bit(msg, NL80211_ATTR_COOKIE, cookie, NL80211_ATTR_PAD)) goto nla_put_failure; if (cmd == NL80211_CMD_REMAIN_ON_CHANNEL && nla_put_u32(msg, NL80211_ATTR_DURATION, duration)) goto nla_put_failure; genlmsg_end(msg, hdr); genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0, NL80211_MCGRP_MLME, gfp); return; nla_put_failure: nlmsg_free(msg); } void cfg80211_assoc_comeback(struct net_device *netdev, const u8 *ap_addr, u32 timeout) { struct wireless_dev *wdev = netdev->ieee80211_ptr; struct wiphy *wiphy = wdev->wiphy; struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy); struct sk_buff *msg; void *hdr; trace_cfg80211_assoc_comeback(wdev, ap_addr, timeout); msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) return; hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_ASSOC_COMEBACK); if (!hdr) { nlmsg_free(msg); return; } if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) || nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, ap_addr) || nla_put_u32(msg, NL80211_ATTR_TIMEOUT, timeout)) goto nla_put_failure; genlmsg_end(msg, hdr); genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0, NL80211_MCGRP_MLME, GFP_KERNEL); return; nla_put_failure: nlmsg_free(msg); } EXPORT_SYMBOL(cfg80211_assoc_comeback); void cfg80211_ready_on_channel(struct wireless_dev *wdev, u64 cookie, struct ieee80211_channel *chan, unsigned int duration, gfp_t gfp) { struct wiphy *wiphy = wdev->wiphy; struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy); trace_cfg80211_ready_on_channel(wdev, cookie, chan, duration); nl80211_send_remain_on_chan_event(NL80211_CMD_REMAIN_ON_CHANNEL, rdev, wdev, cookie, chan, duration, gfp); } EXPORT_SYMBOL(cfg80211_ready_on_channel); void cfg80211_remain_on_channel_expired(struct wireless_dev *wdev, u64 cookie, struct ieee80211_channel *chan, gfp_t gfp) { struct wiphy *wiphy = wdev->wiphy; struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy); trace_cfg80211_ready_on_channel_expired(wdev, cookie, chan); nl80211_send_remain_on_chan_event(NL80211_CMD_CANCEL_REMAIN_ON_CHANNEL, rdev, wdev, cookie, chan, 0, gfp); } EXPORT_SYMBOL(cfg80211_remain_on_channel_expired); void cfg80211_tx_mgmt_expired(struct wireless_dev *wdev, u64 cookie, struct ieee80211_channel *chan, gfp_t gfp) { struct wiphy *wiphy = wdev->wiphy; struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy); trace_cfg80211_tx_mgmt_expired(wdev, cookie, chan); nl80211_send_remain_on_chan_event(NL80211_CMD_FRAME_WAIT_CANCEL, rdev, wdev, cookie, chan, 0, gfp); } EXPORT_SYMBOL(cfg80211_tx_mgmt_expired); void cfg80211_new_sta(struct net_device *dev, const u8 *mac_addr, struct station_info *sinfo, gfp_t gfp) { struct wiphy *wiphy = dev->ieee80211_ptr->wiphy; struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy); struct sk_buff *msg; trace_cfg80211_new_sta(dev, mac_addr, sinfo); msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp); if (!msg) return; if (nl80211_send_station(msg, NL80211_CMD_NEW_STATION, 0, 0, 0, rdev, dev, mac_addr, sinfo) < 0) { nlmsg_free(msg); return; } genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0, NL80211_MCGRP_MLME, gfp); } EXPORT_SYMBOL(cfg80211_new_sta); void cfg80211_del_sta_sinfo(struct net_device *dev, const u8 *mac_addr, struct station_info *sinfo, gfp_t gfp) { struct wiphy *wiphy = dev->ieee80211_ptr->wiphy; struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy); struct sk_buff *msg; struct station_info empty_sinfo = {}; if (!sinfo) sinfo = &empty_sinfo; trace_cfg80211_del_sta(dev, mac_addr); msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp); if (!msg) { cfg80211_sinfo_release_content(sinfo); return; } if (nl80211_send_station(msg, NL80211_CMD_DEL_STATION, 0, 0, 0, rdev, dev, mac_addr, sinfo) < 0) { nlmsg_free(msg); return; } genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0, NL80211_MCGRP_MLME, gfp); } EXPORT_SYMBOL(cfg80211_del_sta_sinfo); void cfg80211_conn_failed(struct net_device *dev, const u8 *mac_addr, enum nl80211_connect_failed_reason reason, gfp_t gfp) { struct wiphy *wiphy = dev->ieee80211_ptr->wiphy; struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy); struct sk_buff *msg; void *hdr; msg = nlmsg_new(NLMSG_GOODSIZE, gfp); if (!msg) return; hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_CONN_FAILED); if (!hdr) { nlmsg_free(msg); return; } if (nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex) || nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, mac_addr) || nla_put_u32(msg, NL80211_ATTR_CONN_FAILED_REASON, reason)) goto nla_put_failure; genlmsg_end(msg, hdr); genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0, NL80211_MCGRP_MLME, gfp); return; nla_put_failure: nlmsg_free(msg); } EXPORT_SYMBOL(cfg80211_conn_failed); static bool __nl80211_unexpected_frame(struct net_device *dev, u8 cmd, const u8 *addr, gfp_t gfp) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); struct sk_buff *msg; void *hdr; u32 nlportid = READ_ONCE(wdev->ap_unexpected_nlportid); if (!nlportid) return false; msg = nlmsg_new(100, gfp); if (!msg) return true; hdr = nl80211hdr_put(msg, 0, 0, 0, cmd); if (!hdr) { nlmsg_free(msg); return true; } if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex) || nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, addr)) goto nla_put_failure; genlmsg_end(msg, hdr); genlmsg_unicast(wiphy_net(&rdev->wiphy), msg, nlportid); return true; nla_put_failure: nlmsg_free(msg); return true; } bool cfg80211_rx_spurious_frame(struct net_device *dev, const u8 *addr, gfp_t gfp) { struct wireless_dev *wdev = dev->ieee80211_ptr; bool ret; trace_cfg80211_rx_spurious_frame(dev, addr); if (WARN_ON(wdev->iftype != NL80211_IFTYPE_AP && wdev->iftype != NL80211_IFTYPE_P2P_GO)) { trace_cfg80211_return_bool(false); return false; } ret = __nl80211_unexpected_frame(dev, NL80211_CMD_UNEXPECTED_FRAME, addr, gfp); trace_cfg80211_return_bool(ret); return ret; } EXPORT_SYMBOL(cfg80211_rx_spurious_frame); bool cfg80211_rx_unexpected_4addr_frame(struct net_device *dev, const u8 *addr, gfp_t gfp) { struct wireless_dev *wdev = dev->ieee80211_ptr; bool ret; trace_cfg80211_rx_unexpected_4addr_frame(dev, addr); if (WARN_ON(wdev->iftype != NL80211_IFTYPE_AP && wdev->iftype != NL80211_IFTYPE_P2P_GO && wdev->iftype != NL80211_IFTYPE_AP_VLAN)) { trace_cfg80211_return_bool(false); return false; } ret = __nl80211_unexpected_frame(dev, NL80211_CMD_UNEXPECTED_4ADDR_FRAME, addr, gfp); trace_cfg80211_return_bool(ret); return ret; } EXPORT_SYMBOL(cfg80211_rx_unexpected_4addr_frame); int nl80211_send_mgmt(struct cfg80211_registered_device *rdev, struct wireless_dev *wdev, u32 nlportid, struct cfg80211_rx_info *info, gfp_t gfp) { struct net_device *netdev = wdev->netdev; struct sk_buff *msg; void *hdr; msg = nlmsg_new(100 + info->len, gfp); if (!msg) return -ENOMEM; hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_FRAME); if (!hdr) { nlmsg_free(msg); return -ENOMEM; } if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || (netdev && nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex)) || nla_put_u64_64bit(msg, NL80211_ATTR_WDEV, wdev_id(wdev), NL80211_ATTR_PAD) || (info->have_link_id && nla_put_u8(msg, NL80211_ATTR_MLO_LINK_ID, info->link_id)) || nla_put_u32(msg, NL80211_ATTR_WIPHY_FREQ, KHZ_TO_MHZ(info->freq)) || nla_put_u32(msg, NL80211_ATTR_WIPHY_FREQ_OFFSET, info->freq % 1000) || (info->sig_dbm && nla_put_u32(msg, NL80211_ATTR_RX_SIGNAL_DBM, info->sig_dbm)) || nla_put(msg, NL80211_ATTR_FRAME, info->len, info->buf) || (info->flags && nla_put_u32(msg, NL80211_ATTR_RXMGMT_FLAGS, info->flags)) || (info->rx_tstamp && nla_put_u64_64bit(msg, NL80211_ATTR_RX_HW_TIMESTAMP, info->rx_tstamp, NL80211_ATTR_PAD)) || (info->ack_tstamp && nla_put_u64_64bit(msg, NL80211_ATTR_TX_HW_TIMESTAMP, info->ack_tstamp, NL80211_ATTR_PAD))) goto nla_put_failure; genlmsg_end(msg, hdr); return genlmsg_unicast(wiphy_net(&rdev->wiphy), msg, nlportid); nla_put_failure: nlmsg_free(msg); return -ENOBUFS; } static void nl80211_frame_tx_status(struct wireless_dev *wdev, struct cfg80211_tx_status *status, gfp_t gfp, enum nl80211_commands command) { struct wiphy *wiphy = wdev->wiphy; struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy); struct net_device *netdev = wdev->netdev; struct sk_buff *msg; void *hdr; if (command == NL80211_CMD_FRAME_TX_STATUS) trace_cfg80211_mgmt_tx_status(wdev, status->cookie, status->ack); else trace_cfg80211_control_port_tx_status(wdev, status->cookie, status->ack); msg = nlmsg_new(100 + status->len, gfp); if (!msg) return; hdr = nl80211hdr_put(msg, 0, 0, 0, command); if (!hdr) { nlmsg_free(msg); return; } if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || (netdev && nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex)) || nla_put_u64_64bit(msg, NL80211_ATTR_WDEV, wdev_id(wdev), NL80211_ATTR_PAD) || nla_put(msg, NL80211_ATTR_FRAME, status->len, status->buf) || nla_put_u64_64bit(msg, NL80211_ATTR_COOKIE, status->cookie, NL80211_ATTR_PAD) || (status->ack && nla_put_flag(msg, NL80211_ATTR_ACK)) || (status->tx_tstamp && nla_put_u64_64bit(msg, NL80211_ATTR_TX_HW_TIMESTAMP, status->tx_tstamp, NL80211_ATTR_PAD)) || (status->ack_tstamp && nla_put_u64_64bit(msg, NL80211_ATTR_RX_HW_TIMESTAMP, status->ack_tstamp, NL80211_ATTR_PAD))) goto nla_put_failure; genlmsg_end(msg, hdr); genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0, NL80211_MCGRP_MLME, gfp); return; nla_put_failure: nlmsg_free(msg); } void cfg80211_control_port_tx_status(struct wireless_dev *wdev, u64 cookie, const u8 *buf, size_t len, bool ack, gfp_t gfp) { struct cfg80211_tx_status status = { .cookie = cookie, .buf = buf, .len = len, .ack = ack }; nl80211_frame_tx_status(wdev, &status, gfp, NL80211_CMD_CONTROL_PORT_FRAME_TX_STATUS); } EXPORT_SYMBOL(cfg80211_control_port_tx_status); void cfg80211_mgmt_tx_status_ext(struct wireless_dev *wdev, struct cfg80211_tx_status *status, gfp_t gfp) { nl80211_frame_tx_status(wdev, status, gfp, NL80211_CMD_FRAME_TX_STATUS); } EXPORT_SYMBOL(cfg80211_mgmt_tx_status_ext); static int __nl80211_rx_control_port(struct net_device *dev, struct sk_buff *skb, bool unencrypted, int link_id, gfp_t gfp) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); struct ethhdr *ehdr = eth_hdr(skb); const u8 *addr = ehdr->h_source; u16 proto = be16_to_cpu(skb->protocol); struct sk_buff *msg; void *hdr; struct nlattr *frame; u32 nlportid = READ_ONCE(wdev->conn_owner_nlportid); if (!nlportid) return -ENOENT; msg = nlmsg_new(100 + skb->len, gfp); if (!msg) return -ENOMEM; hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_CONTROL_PORT_FRAME); if (!hdr) { nlmsg_free(msg); return -ENOBUFS; } if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex) || nla_put_u64_64bit(msg, NL80211_ATTR_WDEV, wdev_id(wdev), NL80211_ATTR_PAD) || nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, addr) || nla_put_u16(msg, NL80211_ATTR_CONTROL_PORT_ETHERTYPE, proto) || (link_id >= 0 && nla_put_u8(msg, NL80211_ATTR_MLO_LINK_ID, link_id)) || (unencrypted && nla_put_flag(msg, NL80211_ATTR_CONTROL_PORT_NO_ENCRYPT))) goto nla_put_failure; frame = nla_reserve(msg, NL80211_ATTR_FRAME, skb->len); if (!frame) goto nla_put_failure; skb_copy_bits(skb, 0, nla_data(frame), skb->len); genlmsg_end(msg, hdr); return genlmsg_unicast(wiphy_net(&rdev->wiphy), msg, nlportid); nla_put_failure: nlmsg_free(msg); return -ENOBUFS; } bool cfg80211_rx_control_port(struct net_device *dev, struct sk_buff *skb, bool unencrypted, int link_id) { int ret; trace_cfg80211_rx_control_port(dev, skb, unencrypted, link_id); ret = __nl80211_rx_control_port(dev, skb, unencrypted, link_id, GFP_ATOMIC); trace_cfg80211_return_bool(ret == 0); return ret == 0; } EXPORT_SYMBOL(cfg80211_rx_control_port); static struct sk_buff *cfg80211_prepare_cqm(struct net_device *dev, const char *mac, gfp_t gfp) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); struct sk_buff *msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp); void **cb; if (!msg) return NULL; cb = (void **)msg->cb; cb[0] = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_NOTIFY_CQM); if (!cb[0]) { nlmsg_free(msg); return NULL; } if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex)) goto nla_put_failure; if (mac && nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, mac)) goto nla_put_failure; cb[1] = nla_nest_start_noflag(msg, NL80211_ATTR_CQM); if (!cb[1]) goto nla_put_failure; cb[2] = rdev; return msg; nla_put_failure: nlmsg_free(msg); return NULL; } static void cfg80211_send_cqm(struct sk_buff *msg, gfp_t gfp) { void **cb = (void **)msg->cb; struct cfg80211_registered_device *rdev = cb[2]; nla_nest_end(msg, cb[1]); genlmsg_end(msg, cb[0]); memset(msg->cb, 0, sizeof(msg->cb)); genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0, NL80211_MCGRP_MLME, gfp); } void cfg80211_cqm_rssi_notify(struct net_device *dev, enum nl80211_cqm_rssi_threshold_event rssi_event, s32 rssi_level, gfp_t gfp) { struct sk_buff *msg; struct wireless_dev *wdev = dev->ieee80211_ptr; struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); trace_cfg80211_cqm_rssi_notify(dev, rssi_event, rssi_level); if (WARN_ON(rssi_event != NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW && rssi_event != NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH)) return; if (wdev->cqm_config) { wdev->cqm_config->last_rssi_event_value = rssi_level; cfg80211_cqm_rssi_update(rdev, dev); if (rssi_level == 0) rssi_level = wdev->cqm_config->last_rssi_event_value; } msg = cfg80211_prepare_cqm(dev, NULL, gfp); if (!msg) return; if (nla_put_u32(msg, NL80211_ATTR_CQM_RSSI_THRESHOLD_EVENT, rssi_event)) goto nla_put_failure; if (rssi_level && nla_put_s32(msg, NL80211_ATTR_CQM_RSSI_LEVEL, rssi_level)) goto nla_put_failure; cfg80211_send_cqm(msg, gfp); return; nla_put_failure: nlmsg_free(msg); } EXPORT_SYMBOL(cfg80211_cqm_rssi_notify); void cfg80211_cqm_txe_notify(struct net_device *dev, const u8 *peer, u32 num_packets, u32 rate, u32 intvl, gfp_t gfp) { struct sk_buff *msg; msg = cfg80211_prepare_cqm(dev, peer, gfp); if (!msg) return; if (nla_put_u32(msg, NL80211_ATTR_CQM_TXE_PKTS, num_packets)) goto nla_put_failure; if (nla_put_u32(msg, NL80211_ATTR_CQM_TXE_RATE, rate)) goto nla_put_failure; if (nla_put_u32(msg, NL80211_ATTR_CQM_TXE_INTVL, intvl)) goto nla_put_failure; cfg80211_send_cqm(msg, gfp); return; nla_put_failure: nlmsg_free(msg); } EXPORT_SYMBOL(cfg80211_cqm_txe_notify); void cfg80211_cqm_pktloss_notify(struct net_device *dev, const u8 *peer, u32 num_packets, gfp_t gfp) { struct sk_buff *msg; trace_cfg80211_cqm_pktloss_notify(dev, peer, num_packets); msg = cfg80211_prepare_cqm(dev, peer, gfp); if (!msg) return; if (nla_put_u32(msg, NL80211_ATTR_CQM_PKT_LOSS_EVENT, num_packets)) goto nla_put_failure; cfg80211_send_cqm(msg, gfp); return; nla_put_failure: nlmsg_free(msg); } EXPORT_SYMBOL(cfg80211_cqm_pktloss_notify); void cfg80211_cqm_beacon_loss_notify(struct net_device *dev, gfp_t gfp) { struct sk_buff *msg; msg = cfg80211_prepare_cqm(dev, NULL, gfp); if (!msg) return; if (nla_put_flag(msg, NL80211_ATTR_CQM_BEACON_LOSS_EVENT)) goto nla_put_failure; cfg80211_send_cqm(msg, gfp); return; nla_put_failure: nlmsg_free(msg); } EXPORT_SYMBOL(cfg80211_cqm_beacon_loss_notify); static void nl80211_gtk_rekey_notify(struct cfg80211_registered_device *rdev, struct net_device *netdev, const u8 *bssid, const u8 *replay_ctr, gfp_t gfp) { struct sk_buff *msg; struct nlattr *rekey_attr; void *hdr; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp); if (!msg) return; hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_SET_REKEY_OFFLOAD); if (!hdr) { nlmsg_free(msg); return; } if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) || nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, bssid)) goto nla_put_failure; rekey_attr = nla_nest_start_noflag(msg, NL80211_ATTR_REKEY_DATA); if (!rekey_attr) goto nla_put_failure; if (nla_put(msg, NL80211_REKEY_DATA_REPLAY_CTR, NL80211_REPLAY_CTR_LEN, replay_ctr)) goto nla_put_failure; nla_nest_end(msg, rekey_attr); genlmsg_end(msg, hdr); genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0, NL80211_MCGRP_MLME, gfp); return; nla_put_failure: nlmsg_free(msg); } void cfg80211_gtk_rekey_notify(struct net_device *dev, const u8 *bssid, const u8 *replay_ctr, gfp_t gfp) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct wiphy *wiphy = wdev->wiphy; struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy); trace_cfg80211_gtk_rekey_notify(dev, bssid); nl80211_gtk_rekey_notify(rdev, dev, bssid, replay_ctr, gfp); } EXPORT_SYMBOL(cfg80211_gtk_rekey_notify); static void nl80211_pmksa_candidate_notify(struct cfg80211_registered_device *rdev, struct net_device *netdev, int index, const u8 *bssid, bool preauth, gfp_t gfp) { struct sk_buff *msg; struct nlattr *attr; void *hdr; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp); if (!msg) return; hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_PMKSA_CANDIDATE); if (!hdr) { nlmsg_free(msg); return; } if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex)) goto nla_put_failure; attr = nla_nest_start_noflag(msg, NL80211_ATTR_PMKSA_CANDIDATE); if (!attr) goto nla_put_failure; if (nla_put_u32(msg, NL80211_PMKSA_CANDIDATE_INDEX, index) || nla_put(msg, NL80211_PMKSA_CANDIDATE_BSSID, ETH_ALEN, bssid) || (preauth && nla_put_flag(msg, NL80211_PMKSA_CANDIDATE_PREAUTH))) goto nla_put_failure; nla_nest_end(msg, attr); genlmsg_end(msg, hdr); genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0, NL80211_MCGRP_MLME, gfp); return; nla_put_failure: nlmsg_free(msg); } void cfg80211_pmksa_candidate_notify(struct net_device *dev, int index, const u8 *bssid, bool preauth, gfp_t gfp) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct wiphy *wiphy = wdev->wiphy; struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy); trace_cfg80211_pmksa_candidate_notify(dev, index, bssid, preauth); nl80211_pmksa_candidate_notify(rdev, dev, index, bssid, preauth, gfp); } EXPORT_SYMBOL(cfg80211_pmksa_candidate_notify); static void nl80211_ch_switch_notify(struct cfg80211_registered_device *rdev, struct net_device *netdev, unsigned int link_id, struct cfg80211_chan_def *chandef, gfp_t gfp, enum nl80211_commands notif, u8 count, bool quiet, u16 punct_bitmap) { struct wireless_dev *wdev = netdev->ieee80211_ptr; struct sk_buff *msg; void *hdr; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp); if (!msg) return; hdr = nl80211hdr_put(msg, 0, 0, 0, notif); if (!hdr) { nlmsg_free(msg); return; } if (nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex)) goto nla_put_failure; if (wdev->valid_links && nla_put_u8(msg, NL80211_ATTR_MLO_LINK_ID, link_id)) goto nla_put_failure; if (nl80211_send_chandef(msg, chandef)) goto nla_put_failure; if (notif == NL80211_CMD_CH_SWITCH_STARTED_NOTIFY) { if (nla_put_u32(msg, NL80211_ATTR_CH_SWITCH_COUNT, count)) goto nla_put_failure; if (quiet && nla_put_flag(msg, NL80211_ATTR_CH_SWITCH_BLOCK_TX)) goto nla_put_failure; } if (nla_put_u32(msg, NL80211_ATTR_PUNCT_BITMAP, punct_bitmap)) goto nla_put_failure; genlmsg_end(msg, hdr); genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0, NL80211_MCGRP_MLME, gfp); return; nla_put_failure: nlmsg_free(msg); } void cfg80211_ch_switch_notify(struct net_device *dev, struct cfg80211_chan_def *chandef, unsigned int link_id, u16 punct_bitmap) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct wiphy *wiphy = wdev->wiphy; struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy); ASSERT_WDEV_LOCK(wdev); WARN_INVALID_LINK_ID(wdev, link_id); trace_cfg80211_ch_switch_notify(dev, chandef, link_id, punct_bitmap); switch (wdev->iftype) { case NL80211_IFTYPE_STATION: case NL80211_IFTYPE_P2P_CLIENT: if (!WARN_ON(!wdev->links[link_id].client.current_bss)) cfg80211_update_assoc_bss_entry(wdev, link_id, chandef->chan); break; case NL80211_IFTYPE_MESH_POINT: wdev->u.mesh.chandef = *chandef; wdev->u.mesh.preset_chandef = *chandef; break; case NL80211_IFTYPE_AP: case NL80211_IFTYPE_P2P_GO: wdev->links[link_id].ap.chandef = *chandef; break; case NL80211_IFTYPE_ADHOC: wdev->u.ibss.chandef = *chandef; break; default: WARN_ON(1); break; } cfg80211_sched_dfs_chan_update(rdev); nl80211_ch_switch_notify(rdev, dev, link_id, chandef, GFP_KERNEL, NL80211_CMD_CH_SWITCH_NOTIFY, 0, false, punct_bitmap); } EXPORT_SYMBOL(cfg80211_ch_switch_notify); void cfg80211_ch_switch_started_notify(struct net_device *dev, struct cfg80211_chan_def *chandef, unsigned int link_id, u8 count, bool quiet, u16 punct_bitmap) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct wiphy *wiphy = wdev->wiphy; struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy); ASSERT_WDEV_LOCK(wdev); WARN_INVALID_LINK_ID(wdev, link_id); trace_cfg80211_ch_switch_started_notify(dev, chandef, link_id, punct_bitmap); nl80211_ch_switch_notify(rdev, dev, link_id, chandef, GFP_KERNEL, NL80211_CMD_CH_SWITCH_STARTED_NOTIFY, count, quiet, punct_bitmap); } EXPORT_SYMBOL(cfg80211_ch_switch_started_notify); int cfg80211_bss_color_notify(struct net_device *dev, enum nl80211_commands cmd, u8 count, u64 color_bitmap) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct wiphy *wiphy = wdev->wiphy; struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy); struct sk_buff *msg; void *hdr; ASSERT_WDEV_LOCK(wdev); trace_cfg80211_bss_color_notify(dev, cmd, count, color_bitmap); msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) return -ENOMEM; hdr = nl80211hdr_put(msg, 0, 0, 0, cmd); if (!hdr) goto nla_put_failure; if (nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex)) goto nla_put_failure; if (cmd == NL80211_CMD_COLOR_CHANGE_STARTED && nla_put_u32(msg, NL80211_ATTR_COLOR_CHANGE_COUNT, count)) goto nla_put_failure; if (cmd == NL80211_CMD_OBSS_COLOR_COLLISION && nla_put_u64_64bit(msg, NL80211_ATTR_OBSS_COLOR_BITMAP, color_bitmap, NL80211_ATTR_PAD)) goto nla_put_failure; genlmsg_end(msg, hdr); return genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0, NL80211_MCGRP_MLME, GFP_KERNEL); nla_put_failure: nlmsg_free(msg); return -EINVAL; } EXPORT_SYMBOL(cfg80211_bss_color_notify); void nl80211_radar_notify(struct cfg80211_registered_device *rdev, const struct cfg80211_chan_def *chandef, enum nl80211_radar_event event, struct net_device *netdev, gfp_t gfp) { struct sk_buff *msg; void *hdr; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp); if (!msg) return; hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_RADAR_DETECT); if (!hdr) { nlmsg_free(msg); return; } if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx)) goto nla_put_failure; /* NOP and radar events don't need a netdev parameter */ if (netdev) { struct wireless_dev *wdev = netdev->ieee80211_ptr; if (nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) || nla_put_u64_64bit(msg, NL80211_ATTR_WDEV, wdev_id(wdev), NL80211_ATTR_PAD)) goto nla_put_failure; } if (nla_put_u32(msg, NL80211_ATTR_RADAR_EVENT, event)) goto nla_put_failure; if (nl80211_send_chandef(msg, chandef)) goto nla_put_failure; genlmsg_end(msg, hdr); genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0, NL80211_MCGRP_MLME, gfp); return; nla_put_failure: nlmsg_free(msg); } void cfg80211_sta_opmode_change_notify(struct net_device *dev, const u8 *mac, struct sta_opmode_info *sta_opmode, gfp_t gfp) { struct sk_buff *msg; struct wireless_dev *wdev = dev->ieee80211_ptr; struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); void *hdr; if (WARN_ON(!mac)) return; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp); if (!msg) return; hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_STA_OPMODE_CHANGED); if (!hdr) { nlmsg_free(msg); return; } if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx)) goto nla_put_failure; if (nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex)) goto nla_put_failure; if (nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, mac)) goto nla_put_failure; if ((sta_opmode->changed & STA_OPMODE_SMPS_MODE_CHANGED) && nla_put_u8(msg, NL80211_ATTR_SMPS_MODE, sta_opmode->smps_mode)) goto nla_put_failure; if ((sta_opmode->changed & STA_OPMODE_MAX_BW_CHANGED) && nla_put_u32(msg, NL80211_ATTR_CHANNEL_WIDTH, sta_opmode->bw)) goto nla_put_failure; if ((sta_opmode->changed & STA_OPMODE_N_SS_CHANGED) && nla_put_u8(msg, NL80211_ATTR_NSS, sta_opmode->rx_nss)) goto nla_put_failure; genlmsg_end(msg, hdr); genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0, NL80211_MCGRP_MLME, gfp); return; nla_put_failure: nlmsg_free(msg); } EXPORT_SYMBOL(cfg80211_sta_opmode_change_notify); void cfg80211_probe_status(struct net_device *dev, const u8 *addr, u64 cookie, bool acked, s32 ack_signal, bool is_valid_ack_signal, gfp_t gfp) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); struct sk_buff *msg; void *hdr; trace_cfg80211_probe_status(dev, addr, cookie, acked); msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp); if (!msg) return; hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_PROBE_CLIENT); if (!hdr) { nlmsg_free(msg); return; } if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex) || nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, addr) || nla_put_u64_64bit(msg, NL80211_ATTR_COOKIE, cookie, NL80211_ATTR_PAD) || (acked && nla_put_flag(msg, NL80211_ATTR_ACK)) || (is_valid_ack_signal && nla_put_s32(msg, NL80211_ATTR_ACK_SIGNAL, ack_signal))) goto nla_put_failure; genlmsg_end(msg, hdr); genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0, NL80211_MCGRP_MLME, gfp); return; nla_put_failure: nlmsg_free(msg); } EXPORT_SYMBOL(cfg80211_probe_status); void cfg80211_report_obss_beacon_khz(struct wiphy *wiphy, const u8 *frame, size_t len, int freq, int sig_dbm) { struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy); struct sk_buff *msg; void *hdr; struct cfg80211_beacon_registration *reg; trace_cfg80211_report_obss_beacon(wiphy, frame, len, freq, sig_dbm); spin_lock_bh(&rdev->beacon_registrations_lock); list_for_each_entry(reg, &rdev->beacon_registrations, list) { msg = nlmsg_new(len + 100, GFP_ATOMIC); if (!msg) { spin_unlock_bh(&rdev->beacon_registrations_lock); return; } hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_FRAME); if (!hdr) goto nla_put_failure; if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || (freq && (nla_put_u32(msg, NL80211_ATTR_WIPHY_FREQ, KHZ_TO_MHZ(freq)) || nla_put_u32(msg, NL80211_ATTR_WIPHY_FREQ_OFFSET, freq % 1000))) || (sig_dbm && nla_put_u32(msg, NL80211_ATTR_RX_SIGNAL_DBM, sig_dbm)) || nla_put(msg, NL80211_ATTR_FRAME, len, frame)) goto nla_put_failure; genlmsg_end(msg, hdr); genlmsg_unicast(wiphy_net(&rdev->wiphy), msg, reg->nlportid); } spin_unlock_bh(&rdev->beacon_registrations_lock); return; nla_put_failure: spin_unlock_bh(&rdev->beacon_registrations_lock); nlmsg_free(msg); } EXPORT_SYMBOL(cfg80211_report_obss_beacon_khz); #ifdef CONFIG_PM static int cfg80211_net_detect_results(struct sk_buff *msg, struct cfg80211_wowlan_wakeup *wakeup) { struct cfg80211_wowlan_nd_info *nd = wakeup->net_detect; struct nlattr *nl_results, *nl_match, *nl_freqs; int i, j; nl_results = nla_nest_start_noflag(msg, NL80211_WOWLAN_TRIG_NET_DETECT_RESULTS); if (!nl_results) return -EMSGSIZE; for (i = 0; i < nd->n_matches; i++) { struct cfg80211_wowlan_nd_match *match = nd->matches[i]; nl_match = nla_nest_start_noflag(msg, i); if (!nl_match) break; /* The SSID attribute is optional in nl80211, but for * simplicity reasons it's always present in the * cfg80211 structure. If a driver can't pass the * SSID, that needs to be changed. A zero length SSID * is still a valid SSID (wildcard), so it cannot be * used for this purpose. */ if (nla_put(msg, NL80211_ATTR_SSID, match->ssid.ssid_len, match->ssid.ssid)) { nla_nest_cancel(msg, nl_match); goto out; } if (match->n_channels) { nl_freqs = nla_nest_start_noflag(msg, NL80211_ATTR_SCAN_FREQUENCIES); if (!nl_freqs) { nla_nest_cancel(msg, nl_match); goto out; } for (j = 0; j < match->n_channels; j++) { if (nla_put_u32(msg, j, match->channels[j])) { nla_nest_cancel(msg, nl_freqs); nla_nest_cancel(msg, nl_match); goto out; } } nla_nest_end(msg, nl_freqs); } nla_nest_end(msg, nl_match); } out: nla_nest_end(msg, nl_results); return 0; } void cfg80211_report_wowlan_wakeup(struct wireless_dev *wdev, struct cfg80211_wowlan_wakeup *wakeup, gfp_t gfp) { struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); struct sk_buff *msg; void *hdr; int size = 200; trace_cfg80211_report_wowlan_wakeup(wdev->wiphy, wdev, wakeup); if (wakeup) size += wakeup->packet_present_len; msg = nlmsg_new(size, gfp); if (!msg) return; hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_SET_WOWLAN); if (!hdr) goto free_msg; if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || nla_put_u64_64bit(msg, NL80211_ATTR_WDEV, wdev_id(wdev), NL80211_ATTR_PAD)) goto free_msg; if (wdev->netdev && nla_put_u32(msg, NL80211_ATTR_IFINDEX, wdev->netdev->ifindex)) goto free_msg; if (wakeup) { struct nlattr *reasons; reasons = nla_nest_start_noflag(msg, NL80211_ATTR_WOWLAN_TRIGGERS); if (!reasons) goto free_msg; if (wakeup->disconnect && nla_put_flag(msg, NL80211_WOWLAN_TRIG_DISCONNECT)) goto free_msg; if (wakeup->magic_pkt && nla_put_flag(msg, NL80211_WOWLAN_TRIG_MAGIC_PKT)) goto free_msg; if (wakeup->gtk_rekey_failure && nla_put_flag(msg, NL80211_WOWLAN_TRIG_GTK_REKEY_FAILURE)) goto free_msg; if (wakeup->eap_identity_req && nla_put_flag(msg, NL80211_WOWLAN_TRIG_EAP_IDENT_REQUEST)) goto free_msg; if (wakeup->four_way_handshake && nla_put_flag(msg, NL80211_WOWLAN_TRIG_4WAY_HANDSHAKE)) goto free_msg; if (wakeup->rfkill_release && nla_put_flag(msg, NL80211_WOWLAN_TRIG_RFKILL_RELEASE)) goto free_msg; if (wakeup->pattern_idx >= 0 && nla_put_u32(msg, NL80211_WOWLAN_TRIG_PKT_PATTERN, wakeup->pattern_idx)) goto free_msg; if (wakeup->tcp_match && nla_put_flag(msg, NL80211_WOWLAN_TRIG_WAKEUP_TCP_MATCH)) goto free_msg; if (wakeup->tcp_connlost && nla_put_flag(msg, NL80211_WOWLAN_TRIG_WAKEUP_TCP_CONNLOST)) goto free_msg; if (wakeup->tcp_nomoretokens && nla_put_flag(msg, NL80211_WOWLAN_TRIG_WAKEUP_TCP_NOMORETOKENS)) goto free_msg; if (wakeup->packet) { u32 pkt_attr = NL80211_WOWLAN_TRIG_WAKEUP_PKT_80211; u32 len_attr = NL80211_WOWLAN_TRIG_WAKEUP_PKT_80211_LEN; if (!wakeup->packet_80211) { pkt_attr = NL80211_WOWLAN_TRIG_WAKEUP_PKT_8023; len_attr = NL80211_WOWLAN_TRIG_WAKEUP_PKT_8023_LEN; } if (wakeup->packet_len && nla_put_u32(msg, len_attr, wakeup->packet_len)) goto free_msg; if (nla_put(msg, pkt_attr, wakeup->packet_present_len, wakeup->packet)) goto free_msg; } if (wakeup->net_detect && cfg80211_net_detect_results(msg, wakeup)) goto free_msg; nla_nest_end(msg, reasons); } genlmsg_end(msg, hdr); genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0, NL80211_MCGRP_MLME, gfp); return; free_msg: nlmsg_free(msg); } EXPORT_SYMBOL(cfg80211_report_wowlan_wakeup); #endif void cfg80211_tdls_oper_request(struct net_device *dev, const u8 *peer, enum nl80211_tdls_operation oper, u16 reason_code, gfp_t gfp) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); struct sk_buff *msg; void *hdr; trace_cfg80211_tdls_oper_request(wdev->wiphy, dev, peer, oper, reason_code); msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp); if (!msg) return; hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_TDLS_OPER); if (!hdr) { nlmsg_free(msg); return; } if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex) || nla_put_u8(msg, NL80211_ATTR_TDLS_OPERATION, oper) || nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, peer) || (reason_code > 0 && nla_put_u16(msg, NL80211_ATTR_REASON_CODE, reason_code))) goto nla_put_failure; genlmsg_end(msg, hdr); genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0, NL80211_MCGRP_MLME, gfp); return; nla_put_failure: nlmsg_free(msg); } EXPORT_SYMBOL(cfg80211_tdls_oper_request); static int nl80211_netlink_notify(struct notifier_block * nb, unsigned long state, void *_notify) { struct netlink_notify *notify = _notify; struct cfg80211_registered_device *rdev; struct wireless_dev *wdev; struct cfg80211_beacon_registration *reg, *tmp; if (state != NETLINK_URELEASE || notify->protocol != NETLINK_GENERIC) return NOTIFY_DONE; rcu_read_lock(); list_for_each_entry_rcu(rdev, &cfg80211_rdev_list, list) { struct cfg80211_sched_scan_request *sched_scan_req; list_for_each_entry_rcu(sched_scan_req, &rdev->sched_scan_req_list, list) { if (sched_scan_req->owner_nlportid == notify->portid) { sched_scan_req->nl_owner_dead = true; wiphy_work_queue(&rdev->wiphy, &rdev->sched_scan_stop_wk); } } list_for_each_entry_rcu(wdev, &rdev->wiphy.wdev_list, list) { cfg80211_mlme_unregister_socket(wdev, notify->portid); if (wdev->owner_nlportid == notify->portid) { wdev->nl_owner_dead = true; schedule_work(&rdev->destroy_work); } else if (wdev->conn_owner_nlportid == notify->portid) { schedule_work(&wdev->disconnect_wk); } cfg80211_release_pmsr(wdev, notify->portid); } spin_lock_bh(&rdev->beacon_registrations_lock); list_for_each_entry_safe(reg, tmp, &rdev->beacon_registrations, list) { if (reg->nlportid == notify->portid) { list_del(&reg->list); kfree(reg); break; } } spin_unlock_bh(&rdev->beacon_registrations_lock); } rcu_read_unlock(); /* * It is possible that the user space process that is controlling the * indoor setting disappeared, so notify the regulatory core. */ regulatory_netlink_notify(notify->portid); return NOTIFY_OK; } static struct notifier_block nl80211_netlink_notifier = { .notifier_call = nl80211_netlink_notify, }; void cfg80211_ft_event(struct net_device *netdev, struct cfg80211_ft_event_params *ft_event) { struct wiphy *wiphy = netdev->ieee80211_ptr->wiphy; struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy); struct sk_buff *msg; void *hdr; trace_cfg80211_ft_event(wiphy, netdev, ft_event); if (!ft_event->target_ap) return; msg = nlmsg_new(100 + ft_event->ies_len + ft_event->ric_ies_len, GFP_KERNEL); if (!msg) return; hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_FT_EVENT); if (!hdr) goto out; if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) || nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, ft_event->target_ap)) goto out; if (ft_event->ies && nla_put(msg, NL80211_ATTR_IE, ft_event->ies_len, ft_event->ies)) goto out; if (ft_event->ric_ies && nla_put(msg, NL80211_ATTR_IE_RIC, ft_event->ric_ies_len, ft_event->ric_ies)) goto out; genlmsg_end(msg, hdr); genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0, NL80211_MCGRP_MLME, GFP_KERNEL); return; out: nlmsg_free(msg); } EXPORT_SYMBOL(cfg80211_ft_event); void cfg80211_crit_proto_stopped(struct wireless_dev *wdev, gfp_t gfp) { struct cfg80211_registered_device *rdev; struct sk_buff *msg; void *hdr; u32 nlportid; rdev = wiphy_to_rdev(wdev->wiphy); if (!rdev->crit_proto_nlportid) return; nlportid = rdev->crit_proto_nlportid; rdev->crit_proto_nlportid = 0; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp); if (!msg) return; hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_CRIT_PROTOCOL_STOP); if (!hdr) goto nla_put_failure; if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || nla_put_u64_64bit(msg, NL80211_ATTR_WDEV, wdev_id(wdev), NL80211_ATTR_PAD)) goto nla_put_failure; genlmsg_end(msg, hdr); genlmsg_unicast(wiphy_net(&rdev->wiphy), msg, nlportid); return; nla_put_failure: nlmsg_free(msg); } EXPORT_SYMBOL(cfg80211_crit_proto_stopped); void nl80211_send_ap_stopped(struct wireless_dev *wdev, unsigned int link_id) { struct wiphy *wiphy = wdev->wiphy; struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy); struct sk_buff *msg; void *hdr; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) return; hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_STOP_AP); if (!hdr) goto out; if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || nla_put_u32(msg, NL80211_ATTR_IFINDEX, wdev->netdev->ifindex) || nla_put_u64_64bit(msg, NL80211_ATTR_WDEV, wdev_id(wdev), NL80211_ATTR_PAD) || (wdev->valid_links && nla_put_u8(msg, NL80211_ATTR_MLO_LINK_ID, link_id))) goto out; genlmsg_end(msg, hdr); genlmsg_multicast_netns(&nl80211_fam, wiphy_net(wiphy), msg, 0, NL80211_MCGRP_MLME, GFP_KERNEL); return; out: nlmsg_free(msg); } int cfg80211_external_auth_request(struct net_device *dev, struct cfg80211_external_auth_params *params, gfp_t gfp) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); struct sk_buff *msg; void *hdr; if (!wdev->conn_owner_nlportid) return -EINVAL; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp); if (!msg) return -ENOMEM; hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_EXTERNAL_AUTH); if (!hdr) goto nla_put_failure; if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex) || nla_put_u32(msg, NL80211_ATTR_AKM_SUITES, params->key_mgmt_suite) || nla_put_u32(msg, NL80211_ATTR_EXTERNAL_AUTH_ACTION, params->action) || nla_put(msg, NL80211_ATTR_BSSID, ETH_ALEN, params->bssid) || nla_put(msg, NL80211_ATTR_SSID, params->ssid.ssid_len, params->ssid.ssid) || (!is_zero_ether_addr(params->mld_addr) && nla_put(msg, NL80211_ATTR_MLD_ADDR, ETH_ALEN, params->mld_addr))) goto nla_put_failure; genlmsg_end(msg, hdr); genlmsg_unicast(wiphy_net(&rdev->wiphy), msg, wdev->conn_owner_nlportid); return 0; nla_put_failure: nlmsg_free(msg); return -ENOBUFS; } EXPORT_SYMBOL(cfg80211_external_auth_request); void cfg80211_update_owe_info_event(struct net_device *netdev, struct cfg80211_update_owe_info *owe_info, gfp_t gfp) { struct wiphy *wiphy = netdev->ieee80211_ptr->wiphy; struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy); struct sk_buff *msg; void *hdr; trace_cfg80211_update_owe_info_event(wiphy, netdev, owe_info); msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp); if (!msg) return; hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_UPDATE_OWE_INFO); if (!hdr) goto nla_put_failure; if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) || nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, owe_info->peer)) goto nla_put_failure; if (!owe_info->ie_len || nla_put(msg, NL80211_ATTR_IE, owe_info->ie_len, owe_info->ie)) goto nla_put_failure; if (owe_info->assoc_link_id != -1) { if (nla_put_u8(msg, NL80211_ATTR_MLO_LINK_ID, owe_info->assoc_link_id)) goto nla_put_failure; if (!is_zero_ether_addr(owe_info->peer_mld_addr) && nla_put(msg, NL80211_ATTR_MLD_ADDR, ETH_ALEN, owe_info->peer_mld_addr)) goto nla_put_failure; } genlmsg_end(msg, hdr); genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0, NL80211_MCGRP_MLME, gfp); return; nla_put_failure: genlmsg_cancel(msg, hdr); nlmsg_free(msg); } EXPORT_SYMBOL(cfg80211_update_owe_info_event); /* initialisation/exit functions */ int __init nl80211_init(void) { int err; err = genl_register_family(&nl80211_fam); if (err) return err; err = netlink_register_notifier(&nl80211_netlink_notifier); if (err) goto err_out; return 0; err_out: genl_unregister_family(&nl80211_fam); return err; } void nl80211_exit(void) { netlink_unregister_notifier(&nl80211_netlink_notifier); genl_unregister_family(&nl80211_fam); }
linux-master
net/wireless/nl80211.c
// SPDX-License-Identifier: GPL-2.0-only /* * lib80211 crypt: host-based CCMP encryption implementation for lib80211 * * Copyright (c) 2003-2004, Jouni Malinen <[email protected]> * Copyright (c) 2008, John W. Linville <[email protected]> */ #include <linux/kernel.h> #include <linux/err.h> #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/random.h> #include <linux/skbuff.h> #include <linux/netdevice.h> #include <linux/if_ether.h> #include <linux/if_arp.h> #include <asm/string.h> #include <linux/wireless.h> #include <linux/ieee80211.h> #include <linux/crypto.h> #include <crypto/aead.h> #include <net/lib80211.h> MODULE_AUTHOR("Jouni Malinen"); MODULE_DESCRIPTION("Host AP crypt: CCMP"); MODULE_LICENSE("GPL"); #define AES_BLOCK_LEN 16 #define CCMP_HDR_LEN 8 #define CCMP_MIC_LEN 8 #define CCMP_TK_LEN 16 #define CCMP_PN_LEN 6 struct lib80211_ccmp_data { u8 key[CCMP_TK_LEN]; int key_set; u8 tx_pn[CCMP_PN_LEN]; u8 rx_pn[CCMP_PN_LEN]; u32 dot11RSNAStatsCCMPFormatErrors; u32 dot11RSNAStatsCCMPReplays; u32 dot11RSNAStatsCCMPDecryptErrors; int key_idx; struct crypto_aead *tfm; /* scratch buffers for virt_to_page() (crypto API) */ u8 tx_aad[2 * AES_BLOCK_LEN]; u8 rx_aad[2 * AES_BLOCK_LEN]; }; static void *lib80211_ccmp_init(int key_idx) { struct lib80211_ccmp_data *priv; priv = kzalloc(sizeof(*priv), GFP_ATOMIC); if (priv == NULL) goto fail; priv->key_idx = key_idx; priv->tfm = crypto_alloc_aead("ccm(aes)", 0, CRYPTO_ALG_ASYNC); if (IS_ERR(priv->tfm)) { priv->tfm = NULL; goto fail; } return priv; fail: if (priv) { if (priv->tfm) crypto_free_aead(priv->tfm); kfree(priv); } return NULL; } static void lib80211_ccmp_deinit(void *priv) { struct lib80211_ccmp_data *_priv = priv; if (_priv && _priv->tfm) crypto_free_aead(_priv->tfm); kfree(priv); } static int ccmp_init_iv_and_aad(const struct ieee80211_hdr *hdr, const u8 *pn, u8 *iv, u8 *aad) { u8 *pos, qc = 0; size_t aad_len; int a4_included, qc_included; a4_included = ieee80211_has_a4(hdr->frame_control); qc_included = ieee80211_is_data_qos(hdr->frame_control); aad_len = 22; if (a4_included) aad_len += 6; if (qc_included) { pos = (u8 *) & hdr->addr4; if (a4_included) pos += 6; qc = *pos & 0x0f; aad_len += 2; } /* In CCM, the initial vectors (IV) used for CTR mode encryption and CBC * mode authentication are not allowed to collide, yet both are derived * from the same vector. We only set L := 1 here to indicate that the * data size can be represented in (L+1) bytes. The CCM layer will take * care of storing the data length in the top (L+1) bytes and setting * and clearing the other bits as is required to derive the two IVs. */ iv[0] = 0x1; /* Nonce: QC | A2 | PN */ iv[1] = qc; memcpy(iv + 2, hdr->addr2, ETH_ALEN); memcpy(iv + 8, pn, CCMP_PN_LEN); /* AAD: * FC with bits 4..6 and 11..13 masked to zero; 14 is always one * A1 | A2 | A3 * SC with bits 4..15 (seq#) masked to zero * A4 (if present) * QC (if present) */ pos = (u8 *) hdr; aad[0] = pos[0] & 0x8f; aad[1] = pos[1] & 0xc7; memcpy(aad + 2, &hdr->addrs, 3 * ETH_ALEN); pos = (u8 *) & hdr->seq_ctrl; aad[20] = pos[0] & 0x0f; aad[21] = 0; /* all bits masked */ memset(aad + 22, 0, 8); if (a4_included) memcpy(aad + 22, hdr->addr4, ETH_ALEN); if (qc_included) { aad[a4_included ? 28 : 22] = qc; /* rest of QC masked */ } return aad_len; } static int lib80211_ccmp_hdr(struct sk_buff *skb, int hdr_len, u8 *aeskey, int keylen, void *priv) { struct lib80211_ccmp_data *key = priv; int i; u8 *pos; if (skb_headroom(skb) < CCMP_HDR_LEN || skb->len < hdr_len) return -1; if (aeskey != NULL && keylen >= CCMP_TK_LEN) memcpy(aeskey, key->key, CCMP_TK_LEN); pos = skb_push(skb, CCMP_HDR_LEN); memmove(pos, pos + CCMP_HDR_LEN, hdr_len); pos += hdr_len; i = CCMP_PN_LEN - 1; while (i >= 0) { key->tx_pn[i]++; if (key->tx_pn[i] != 0) break; i--; } *pos++ = key->tx_pn[5]; *pos++ = key->tx_pn[4]; *pos++ = 0; *pos++ = (key->key_idx << 6) | (1 << 5) /* Ext IV included */ ; *pos++ = key->tx_pn[3]; *pos++ = key->tx_pn[2]; *pos++ = key->tx_pn[1]; *pos++ = key->tx_pn[0]; return CCMP_HDR_LEN; } static int lib80211_ccmp_encrypt(struct sk_buff *skb, int hdr_len, void *priv) { struct lib80211_ccmp_data *key = priv; struct ieee80211_hdr *hdr; struct aead_request *req; struct scatterlist sg[2]; u8 *aad = key->tx_aad; u8 iv[AES_BLOCK_LEN]; int len, data_len, aad_len; int ret; if (skb_tailroom(skb) < CCMP_MIC_LEN || skb->len < hdr_len) return -1; data_len = skb->len - hdr_len; len = lib80211_ccmp_hdr(skb, hdr_len, NULL, 0, priv); if (len < 0) return -1; req = aead_request_alloc(key->tfm, GFP_ATOMIC); if (!req) return -ENOMEM; hdr = (struct ieee80211_hdr *)skb->data; aad_len = ccmp_init_iv_and_aad(hdr, key->tx_pn, iv, aad); skb_put(skb, CCMP_MIC_LEN); sg_init_table(sg, 2); sg_set_buf(&sg[0], aad, aad_len); sg_set_buf(&sg[1], skb->data + hdr_len + CCMP_HDR_LEN, data_len + CCMP_MIC_LEN); aead_request_set_callback(req, 0, NULL, NULL); aead_request_set_ad(req, aad_len); aead_request_set_crypt(req, sg, sg, data_len, iv); ret = crypto_aead_encrypt(req); aead_request_free(req); return ret; } /* * deal with seq counter wrapping correctly. * refer to timer_after() for jiffies wrapping handling */ static inline int ccmp_replay_check(u8 *pn_n, u8 *pn_o) { u32 iv32_n, iv16_n; u32 iv32_o, iv16_o; iv32_n = (pn_n[0] << 24) | (pn_n[1] << 16) | (pn_n[2] << 8) | pn_n[3]; iv16_n = (pn_n[4] << 8) | pn_n[5]; iv32_o = (pn_o[0] << 24) | (pn_o[1] << 16) | (pn_o[2] << 8) | pn_o[3]; iv16_o = (pn_o[4] << 8) | pn_o[5]; if ((s32)iv32_n - (s32)iv32_o < 0 || (iv32_n == iv32_o && iv16_n <= iv16_o)) return 1; return 0; } static int lib80211_ccmp_decrypt(struct sk_buff *skb, int hdr_len, void *priv) { struct lib80211_ccmp_data *key = priv; u8 keyidx, *pos; struct ieee80211_hdr *hdr; struct aead_request *req; struct scatterlist sg[2]; u8 *aad = key->rx_aad; u8 iv[AES_BLOCK_LEN]; u8 pn[6]; int aad_len, ret; size_t data_len = skb->len - hdr_len - CCMP_HDR_LEN; if (skb->len < hdr_len + CCMP_HDR_LEN + CCMP_MIC_LEN) { key->dot11RSNAStatsCCMPFormatErrors++; return -1; } hdr = (struct ieee80211_hdr *)skb->data; pos = skb->data + hdr_len; keyidx = pos[3]; if (!(keyidx & (1 << 5))) { net_dbg_ratelimited("CCMP: received packet without ExtIV flag from %pM\n", hdr->addr2); key->dot11RSNAStatsCCMPFormatErrors++; return -2; } keyidx >>= 6; if (key->key_idx != keyidx) { net_dbg_ratelimited("CCMP: RX tkey->key_idx=%d frame keyidx=%d\n", key->key_idx, keyidx); return -6; } if (!key->key_set) { net_dbg_ratelimited("CCMP: received packet from %pM with keyid=%d that does not have a configured key\n", hdr->addr2, keyidx); return -3; } pn[0] = pos[7]; pn[1] = pos[6]; pn[2] = pos[5]; pn[3] = pos[4]; pn[4] = pos[1]; pn[5] = pos[0]; pos += 8; if (ccmp_replay_check(pn, key->rx_pn)) { #ifdef CONFIG_LIB80211_DEBUG net_dbg_ratelimited("CCMP: replay detected: STA=%pM previous PN %02x%02x%02x%02x%02x%02x received PN %02x%02x%02x%02x%02x%02x\n", hdr->addr2, key->rx_pn[0], key->rx_pn[1], key->rx_pn[2], key->rx_pn[3], key->rx_pn[4], key->rx_pn[5], pn[0], pn[1], pn[2], pn[3], pn[4], pn[5]); #endif key->dot11RSNAStatsCCMPReplays++; return -4; } req = aead_request_alloc(key->tfm, GFP_ATOMIC); if (!req) return -ENOMEM; aad_len = ccmp_init_iv_and_aad(hdr, pn, iv, aad); sg_init_table(sg, 2); sg_set_buf(&sg[0], aad, aad_len); sg_set_buf(&sg[1], pos, data_len); aead_request_set_callback(req, 0, NULL, NULL); aead_request_set_ad(req, aad_len); aead_request_set_crypt(req, sg, sg, data_len, iv); ret = crypto_aead_decrypt(req); aead_request_free(req); if (ret) { net_dbg_ratelimited("CCMP: decrypt failed: STA=%pM (%d)\n", hdr->addr2, ret); key->dot11RSNAStatsCCMPDecryptErrors++; return -5; } memcpy(key->rx_pn, pn, CCMP_PN_LEN); /* Remove hdr and MIC */ memmove(skb->data + CCMP_HDR_LEN, skb->data, hdr_len); skb_pull(skb, CCMP_HDR_LEN); skb_trim(skb, skb->len - CCMP_MIC_LEN); return keyidx; } static int lib80211_ccmp_set_key(void *key, int len, u8 * seq, void *priv) { struct lib80211_ccmp_data *data = priv; int keyidx; struct crypto_aead *tfm = data->tfm; keyidx = data->key_idx; memset(data, 0, sizeof(*data)); data->key_idx = keyidx; data->tfm = tfm; if (len == CCMP_TK_LEN) { memcpy(data->key, key, CCMP_TK_LEN); data->key_set = 1; if (seq) { data->rx_pn[0] = seq[5]; data->rx_pn[1] = seq[4]; data->rx_pn[2] = seq[3]; data->rx_pn[3] = seq[2]; data->rx_pn[4] = seq[1]; data->rx_pn[5] = seq[0]; } if (crypto_aead_setauthsize(data->tfm, CCMP_MIC_LEN) || crypto_aead_setkey(data->tfm, data->key, CCMP_TK_LEN)) return -1; } else if (len == 0) data->key_set = 0; else return -1; return 0; } static int lib80211_ccmp_get_key(void *key, int len, u8 * seq, void *priv) { struct lib80211_ccmp_data *data = priv; if (len < CCMP_TK_LEN) return -1; if (!data->key_set) return 0; memcpy(key, data->key, CCMP_TK_LEN); if (seq) { seq[0] = data->tx_pn[5]; seq[1] = data->tx_pn[4]; seq[2] = data->tx_pn[3]; seq[3] = data->tx_pn[2]; seq[4] = data->tx_pn[1]; seq[5] = data->tx_pn[0]; } return CCMP_TK_LEN; } static void lib80211_ccmp_print_stats(struct seq_file *m, void *priv) { struct lib80211_ccmp_data *ccmp = priv; seq_printf(m, "key[%d] alg=CCMP key_set=%d " "tx_pn=%02x%02x%02x%02x%02x%02x " "rx_pn=%02x%02x%02x%02x%02x%02x " "format_errors=%d replays=%d decrypt_errors=%d\n", ccmp->key_idx, ccmp->key_set, ccmp->tx_pn[0], ccmp->tx_pn[1], ccmp->tx_pn[2], ccmp->tx_pn[3], ccmp->tx_pn[4], ccmp->tx_pn[5], ccmp->rx_pn[0], ccmp->rx_pn[1], ccmp->rx_pn[2], ccmp->rx_pn[3], ccmp->rx_pn[4], ccmp->rx_pn[5], ccmp->dot11RSNAStatsCCMPFormatErrors, ccmp->dot11RSNAStatsCCMPReplays, ccmp->dot11RSNAStatsCCMPDecryptErrors); } static struct lib80211_crypto_ops lib80211_crypt_ccmp = { .name = "CCMP", .init = lib80211_ccmp_init, .deinit = lib80211_ccmp_deinit, .encrypt_mpdu = lib80211_ccmp_encrypt, .decrypt_mpdu = lib80211_ccmp_decrypt, .encrypt_msdu = NULL, .decrypt_msdu = NULL, .set_key = lib80211_ccmp_set_key, .get_key = lib80211_ccmp_get_key, .print_stats = lib80211_ccmp_print_stats, .extra_mpdu_prefix_len = CCMP_HDR_LEN, .extra_mpdu_postfix_len = CCMP_MIC_LEN, .owner = THIS_MODULE, }; static int __init lib80211_crypto_ccmp_init(void) { return lib80211_register_crypto_ops(&lib80211_crypt_ccmp); } static void __exit lib80211_crypto_ccmp_exit(void) { lib80211_unregister_crypto_ops(&lib80211_crypt_ccmp); } module_init(lib80211_crypto_ccmp_init); module_exit(lib80211_crypto_ccmp_exit);
linux-master
net/wireless/lib80211_crypt_ccmp.c
// SPDX-License-Identifier: GPL-2.0 /* * Portions * Copyright (C) 2022 Intel Corporation */ #include <linux/ieee80211.h> #include <linux/export.h> #include <net/cfg80211.h> #include "nl80211.h" #include "core.h" #include "rdev-ops.h" /* Default values, timeouts in ms */ #define MESH_TTL 31 #define MESH_DEFAULT_ELEMENT_TTL 31 #define MESH_MAX_RETR 3 #define MESH_RET_T 100 #define MESH_CONF_T 100 #define MESH_HOLD_T 100 #define MESH_PATH_TIMEOUT 5000 #define MESH_RANN_INTERVAL 5000 #define MESH_PATH_TO_ROOT_TIMEOUT 6000 #define MESH_ROOT_INTERVAL 5000 #define MESH_ROOT_CONFIRMATION_INTERVAL 2000 #define MESH_DEFAULT_PLINK_TIMEOUT 1800 /* timeout in seconds */ /* * Minimum interval between two consecutive PREQs originated by the same * interface */ #define MESH_PREQ_MIN_INT 10 #define MESH_PERR_MIN_INT 100 #define MESH_DIAM_TRAVERSAL_TIME 50 #define MESH_RSSI_THRESHOLD 0 /* * A path will be refreshed if it is used PATH_REFRESH_TIME milliseconds * before timing out. This way it will remain ACTIVE and no data frames * will be unnecessarily held in the pending queue. */ #define MESH_PATH_REFRESH_TIME 1000 #define MESH_MIN_DISCOVERY_TIMEOUT (2 * MESH_DIAM_TRAVERSAL_TIME) /* Default maximum number of established plinks per interface */ #define MESH_MAX_ESTAB_PLINKS 32 #define MESH_MAX_PREQ_RETRIES 4 #define MESH_SYNC_NEIGHBOR_OFFSET_MAX 50 #define MESH_DEFAULT_BEACON_INTERVAL 1000 /* in 1024 us units (=TUs) */ #define MESH_DEFAULT_DTIM_PERIOD 2 #define MESH_DEFAULT_AWAKE_WINDOW 10 /* in 1024 us units (=TUs) */ const struct mesh_config default_mesh_config = { .dot11MeshRetryTimeout = MESH_RET_T, .dot11MeshConfirmTimeout = MESH_CONF_T, .dot11MeshHoldingTimeout = MESH_HOLD_T, .dot11MeshMaxRetries = MESH_MAX_RETR, .dot11MeshTTL = MESH_TTL, .element_ttl = MESH_DEFAULT_ELEMENT_TTL, .auto_open_plinks = true, .dot11MeshMaxPeerLinks = MESH_MAX_ESTAB_PLINKS, .dot11MeshNbrOffsetMaxNeighbor = MESH_SYNC_NEIGHBOR_OFFSET_MAX, .dot11MeshHWMPactivePathTimeout = MESH_PATH_TIMEOUT, .dot11MeshHWMPpreqMinInterval = MESH_PREQ_MIN_INT, .dot11MeshHWMPperrMinInterval = MESH_PERR_MIN_INT, .dot11MeshHWMPnetDiameterTraversalTime = MESH_DIAM_TRAVERSAL_TIME, .dot11MeshHWMPmaxPREQretries = MESH_MAX_PREQ_RETRIES, .path_refresh_time = MESH_PATH_REFRESH_TIME, .min_discovery_timeout = MESH_MIN_DISCOVERY_TIMEOUT, .dot11MeshHWMPRannInterval = MESH_RANN_INTERVAL, .dot11MeshGateAnnouncementProtocol = false, .dot11MeshForwarding = true, .rssi_threshold = MESH_RSSI_THRESHOLD, .ht_opmode = IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED, .dot11MeshHWMPactivePathToRootTimeout = MESH_PATH_TO_ROOT_TIMEOUT, .dot11MeshHWMProotInterval = MESH_ROOT_INTERVAL, .dot11MeshHWMPconfirmationInterval = MESH_ROOT_CONFIRMATION_INTERVAL, .power_mode = NL80211_MESH_POWER_ACTIVE, .dot11MeshAwakeWindowDuration = MESH_DEFAULT_AWAKE_WINDOW, .plink_timeout = MESH_DEFAULT_PLINK_TIMEOUT, .dot11MeshNolearn = false, }; const struct mesh_setup default_mesh_setup = { /* cfg80211_join_mesh() will pick a channel if needed */ .sync_method = IEEE80211_SYNC_METHOD_NEIGHBOR_OFFSET, .path_sel_proto = IEEE80211_PATH_PROTOCOL_HWMP, .path_metric = IEEE80211_PATH_METRIC_AIRTIME, .auth_id = 0, /* open */ .ie = NULL, .ie_len = 0, .is_secure = false, .user_mpm = false, .beacon_interval = MESH_DEFAULT_BEACON_INTERVAL, .dtim_period = MESH_DEFAULT_DTIM_PERIOD, }; int __cfg80211_join_mesh(struct cfg80211_registered_device *rdev, struct net_device *dev, struct mesh_setup *setup, const struct mesh_config *conf) { struct wireless_dev *wdev = dev->ieee80211_ptr; int err; BUILD_BUG_ON(IEEE80211_MAX_SSID_LEN != IEEE80211_MAX_MESH_ID_LEN); ASSERT_WDEV_LOCK(wdev); if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_MESH_POINT) return -EOPNOTSUPP; if (!(rdev->wiphy.flags & WIPHY_FLAG_MESH_AUTH) && setup->is_secure) return -EOPNOTSUPP; if (wdev->u.mesh.id_len) return -EALREADY; if (!setup->mesh_id_len) return -EINVAL; if (!rdev->ops->join_mesh) return -EOPNOTSUPP; if (!setup->chandef.chan) { /* if no channel explicitly given, use preset channel */ setup->chandef = wdev->u.mesh.preset_chandef; } if (!setup->chandef.chan) { /* if we don't have that either, use the first usable channel */ enum nl80211_band band; for (band = 0; band < NUM_NL80211_BANDS; band++) { struct ieee80211_supported_band *sband; struct ieee80211_channel *chan; int i; sband = rdev->wiphy.bands[band]; if (!sband) continue; for (i = 0; i < sband->n_channels; i++) { chan = &sband->channels[i]; if (chan->flags & (IEEE80211_CHAN_NO_IR | IEEE80211_CHAN_DISABLED | IEEE80211_CHAN_RADAR)) continue; setup->chandef.chan = chan; break; } if (setup->chandef.chan) break; } /* no usable channel ... */ if (!setup->chandef.chan) return -EINVAL; setup->chandef.width = NL80211_CHAN_WIDTH_20_NOHT; setup->chandef.center_freq1 = setup->chandef.chan->center_freq; } /* * check if basic rates are available otherwise use mandatory rates as * basic rates */ if (!setup->basic_rates) { enum nl80211_bss_scan_width scan_width; struct ieee80211_supported_band *sband = rdev->wiphy.bands[setup->chandef.chan->band]; if (setup->chandef.chan->band == NL80211_BAND_2GHZ) { int i; /* * Older versions selected the mandatory rates for * 2.4 GHz as well, but were broken in that only * 1 Mbps was regarded as a mandatory rate. Keep * using just 1 Mbps as the default basic rate for * mesh to be interoperable with older versions. */ for (i = 0; i < sband->n_bitrates; i++) { if (sband->bitrates[i].bitrate == 10) { setup->basic_rates = BIT(i); break; } } } else { scan_width = cfg80211_chandef_to_scan_width(&setup->chandef); setup->basic_rates = ieee80211_mandatory_rates(sband, scan_width); } } err = cfg80211_chandef_dfs_required(&rdev->wiphy, &setup->chandef, NL80211_IFTYPE_MESH_POINT); if (err < 0) return err; if (err > 0 && !setup->userspace_handles_dfs) return -EINVAL; if (!cfg80211_reg_can_beacon(&rdev->wiphy, &setup->chandef, NL80211_IFTYPE_MESH_POINT)) return -EINVAL; err = rdev_join_mesh(rdev, dev, conf, setup); if (!err) { memcpy(wdev->u.mesh.id, setup->mesh_id, setup->mesh_id_len); wdev->u.mesh.id_len = setup->mesh_id_len; wdev->u.mesh.chandef = setup->chandef; wdev->u.mesh.beacon_interval = setup->beacon_interval; } return err; } int cfg80211_set_mesh_channel(struct cfg80211_registered_device *rdev, struct wireless_dev *wdev, struct cfg80211_chan_def *chandef) { int err; /* * Workaround for libertas (only!), it puts the interface * into mesh mode but doesn't implement join_mesh. Instead, * it is configured via sysfs and then joins the mesh when * you set the channel. Note that the libertas mesh isn't * compatible with 802.11 mesh. */ if (rdev->ops->libertas_set_mesh_channel) { if (chandef->width != NL80211_CHAN_WIDTH_20_NOHT) return -EINVAL; if (!netif_running(wdev->netdev)) return -ENETDOWN; err = rdev_libertas_set_mesh_channel(rdev, wdev->netdev, chandef->chan); if (!err) wdev->u.mesh.chandef = *chandef; return err; } if (wdev->u.mesh.id_len) return -EBUSY; wdev->u.mesh.preset_chandef = *chandef; return 0; } int __cfg80211_leave_mesh(struct cfg80211_registered_device *rdev, struct net_device *dev) { struct wireless_dev *wdev = dev->ieee80211_ptr; int err; ASSERT_WDEV_LOCK(wdev); if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_MESH_POINT) return -EOPNOTSUPP; if (!rdev->ops->leave_mesh) return -EOPNOTSUPP; if (!wdev->u.mesh.id_len) return -ENOTCONN; err = rdev_leave_mesh(rdev, dev); if (!err) { wdev->conn_owner_nlportid = 0; wdev->u.mesh.id_len = 0; wdev->u.mesh.beacon_interval = 0; memset(&wdev->u.mesh.chandef, 0, sizeof(wdev->u.mesh.chandef)); rdev_set_qos_map(rdev, dev, NULL); cfg80211_sched_dfs_chan_update(rdev); } return err; } int cfg80211_leave_mesh(struct cfg80211_registered_device *rdev, struct net_device *dev) { struct wireless_dev *wdev = dev->ieee80211_ptr; int err; wdev_lock(wdev); err = __cfg80211_leave_mesh(rdev, dev); wdev_unlock(wdev); return err; }
linux-master
net/wireless/mesh.c
// SPDX-License-Identifier: GPL-2.0-only /* * lib80211 crypt: host-based TKIP encryption implementation for lib80211 * * Copyright (c) 2003-2004, Jouni Malinen <[email protected]> * Copyright (c) 2008, John W. Linville <[email protected]> */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/err.h> #include <linux/fips.h> #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/random.h> #include <linux/scatterlist.h> #include <linux/skbuff.h> #include <linux/netdevice.h> #include <linux/mm.h> #include <linux/if_ether.h> #include <linux/if_arp.h> #include <asm/string.h> #include <linux/wireless.h> #include <linux/ieee80211.h> #include <net/iw_handler.h> #include <crypto/arc4.h> #include <crypto/hash.h> #include <linux/crypto.h> #include <linux/crc32.h> #include <net/lib80211.h> MODULE_AUTHOR("Jouni Malinen"); MODULE_DESCRIPTION("lib80211 crypt: TKIP"); MODULE_LICENSE("GPL"); #define TKIP_HDR_LEN 8 struct lib80211_tkip_data { #define TKIP_KEY_LEN 32 u8 key[TKIP_KEY_LEN]; int key_set; u32 tx_iv32; u16 tx_iv16; u16 tx_ttak[5]; int tx_phase1_done; u32 rx_iv32; u16 rx_iv16; u16 rx_ttak[5]; int rx_phase1_done; u32 rx_iv32_new; u16 rx_iv16_new; u32 dot11RSNAStatsTKIPReplays; u32 dot11RSNAStatsTKIPICVErrors; u32 dot11RSNAStatsTKIPLocalMICFailures; int key_idx; struct arc4_ctx rx_ctx_arc4; struct arc4_ctx tx_ctx_arc4; struct crypto_shash *rx_tfm_michael; struct crypto_shash *tx_tfm_michael; /* scratch buffers for virt_to_page() (crypto API) */ u8 rx_hdr[16], tx_hdr[16]; unsigned long flags; }; static unsigned long lib80211_tkip_set_flags(unsigned long flags, void *priv) { struct lib80211_tkip_data *_priv = priv; unsigned long old_flags = _priv->flags; _priv->flags = flags; return old_flags; } static unsigned long lib80211_tkip_get_flags(void *priv) { struct lib80211_tkip_data *_priv = priv; return _priv->flags; } static void *lib80211_tkip_init(int key_idx) { struct lib80211_tkip_data *priv; if (fips_enabled) return NULL; priv = kzalloc(sizeof(*priv), GFP_ATOMIC); if (priv == NULL) goto fail; priv->key_idx = key_idx; priv->tx_tfm_michael = crypto_alloc_shash("michael_mic", 0, 0); if (IS_ERR(priv->tx_tfm_michael)) { priv->tx_tfm_michael = NULL; goto fail; } priv->rx_tfm_michael = crypto_alloc_shash("michael_mic", 0, 0); if (IS_ERR(priv->rx_tfm_michael)) { priv->rx_tfm_michael = NULL; goto fail; } return priv; fail: if (priv) { crypto_free_shash(priv->tx_tfm_michael); crypto_free_shash(priv->rx_tfm_michael); kfree(priv); } return NULL; } static void lib80211_tkip_deinit(void *priv) { struct lib80211_tkip_data *_priv = priv; if (_priv) { crypto_free_shash(_priv->tx_tfm_michael); crypto_free_shash(_priv->rx_tfm_michael); } kfree_sensitive(priv); } static inline u16 RotR1(u16 val) { return (val >> 1) | (val << 15); } static inline u8 Lo8(u16 val) { return val & 0xff; } static inline u8 Hi8(u16 val) { return val >> 8; } static inline u16 Lo16(u32 val) { return val & 0xffff; } static inline u16 Hi16(u32 val) { return val >> 16; } static inline u16 Mk16(u8 hi, u8 lo) { return lo | (((u16) hi) << 8); } static inline u16 Mk16_le(__le16 * v) { return le16_to_cpu(*v); } static const u16 Sbox[256] = { 0xC6A5, 0xF884, 0xEE99, 0xF68D, 0xFF0D, 0xD6BD, 0xDEB1, 0x9154, 0x6050, 0x0203, 0xCEA9, 0x567D, 0xE719, 0xB562, 0x4DE6, 0xEC9A, 0x8F45, 0x1F9D, 0x8940, 0xFA87, 0xEF15, 0xB2EB, 0x8EC9, 0xFB0B, 0x41EC, 0xB367, 0x5FFD, 0x45EA, 0x23BF, 0x53F7, 0xE496, 0x9B5B, 0x75C2, 0xE11C, 0x3DAE, 0x4C6A, 0x6C5A, 0x7E41, 0xF502, 0x834F, 0x685C, 0x51F4, 0xD134, 0xF908, 0xE293, 0xAB73, 0x6253, 0x2A3F, 0x080C, 0x9552, 0x4665, 0x9D5E, 0x3028, 0x37A1, 0x0A0F, 0x2FB5, 0x0E09, 0x2436, 0x1B9B, 0xDF3D, 0xCD26, 0x4E69, 0x7FCD, 0xEA9F, 0x121B, 0x1D9E, 0x5874, 0x342E, 0x362D, 0xDCB2, 0xB4EE, 0x5BFB, 0xA4F6, 0x764D, 0xB761, 0x7DCE, 0x527B, 0xDD3E, 0x5E71, 0x1397, 0xA6F5, 0xB968, 0x0000, 0xC12C, 0x4060, 0xE31F, 0x79C8, 0xB6ED, 0xD4BE, 0x8D46, 0x67D9, 0x724B, 0x94DE, 0x98D4, 0xB0E8, 0x854A, 0xBB6B, 0xC52A, 0x4FE5, 0xED16, 0x86C5, 0x9AD7, 0x6655, 0x1194, 0x8ACF, 0xE910, 0x0406, 0xFE81, 0xA0F0, 0x7844, 0x25BA, 0x4BE3, 0xA2F3, 0x5DFE, 0x80C0, 0x058A, 0x3FAD, 0x21BC, 0x7048, 0xF104, 0x63DF, 0x77C1, 0xAF75, 0x4263, 0x2030, 0xE51A, 0xFD0E, 0xBF6D, 0x814C, 0x1814, 0x2635, 0xC32F, 0xBEE1, 0x35A2, 0x88CC, 0x2E39, 0x9357, 0x55F2, 0xFC82, 0x7A47, 0xC8AC, 0xBAE7, 0x322B, 0xE695, 0xC0A0, 0x1998, 0x9ED1, 0xA37F, 0x4466, 0x547E, 0x3BAB, 0x0B83, 0x8CCA, 0xC729, 0x6BD3, 0x283C, 0xA779, 0xBCE2, 0x161D, 0xAD76, 0xDB3B, 0x6456, 0x744E, 0x141E, 0x92DB, 0x0C0A, 0x486C, 0xB8E4, 0x9F5D, 0xBD6E, 0x43EF, 0xC4A6, 0x39A8, 0x31A4, 0xD337, 0xF28B, 0xD532, 0x8B43, 0x6E59, 0xDAB7, 0x018C, 0xB164, 0x9CD2, 0x49E0, 0xD8B4, 0xACFA, 0xF307, 0xCF25, 0xCAAF, 0xF48E, 0x47E9, 0x1018, 0x6FD5, 0xF088, 0x4A6F, 0x5C72, 0x3824, 0x57F1, 0x73C7, 0x9751, 0xCB23, 0xA17C, 0xE89C, 0x3E21, 0x96DD, 0x61DC, 0x0D86, 0x0F85, 0xE090, 0x7C42, 0x71C4, 0xCCAA, 0x90D8, 0x0605, 0xF701, 0x1C12, 0xC2A3, 0x6A5F, 0xAEF9, 0x69D0, 0x1791, 0x9958, 0x3A27, 0x27B9, 0xD938, 0xEB13, 0x2BB3, 0x2233, 0xD2BB, 0xA970, 0x0789, 0x33A7, 0x2DB6, 0x3C22, 0x1592, 0xC920, 0x8749, 0xAAFF, 0x5078, 0xA57A, 0x038F, 0x59F8, 0x0980, 0x1A17, 0x65DA, 0xD731, 0x84C6, 0xD0B8, 0x82C3, 0x29B0, 0x5A77, 0x1E11, 0x7BCB, 0xA8FC, 0x6DD6, 0x2C3A, }; static inline u16 _S_(u16 v) { u16 t = Sbox[Hi8(v)]; return Sbox[Lo8(v)] ^ ((t << 8) | (t >> 8)); } #define PHASE1_LOOP_COUNT 8 static void tkip_mixing_phase1(u16 * TTAK, const u8 * TK, const u8 * TA, u32 IV32) { int i, j; /* Initialize the 80-bit TTAK from TSC (IV32) and TA[0..5] */ TTAK[0] = Lo16(IV32); TTAK[1] = Hi16(IV32); TTAK[2] = Mk16(TA[1], TA[0]); TTAK[3] = Mk16(TA[3], TA[2]); TTAK[4] = Mk16(TA[5], TA[4]); for (i = 0; i < PHASE1_LOOP_COUNT; i++) { j = 2 * (i & 1); TTAK[0] += _S_(TTAK[4] ^ Mk16(TK[1 + j], TK[0 + j])); TTAK[1] += _S_(TTAK[0] ^ Mk16(TK[5 + j], TK[4 + j])); TTAK[2] += _S_(TTAK[1] ^ Mk16(TK[9 + j], TK[8 + j])); TTAK[3] += _S_(TTAK[2] ^ Mk16(TK[13 + j], TK[12 + j])); TTAK[4] += _S_(TTAK[3] ^ Mk16(TK[1 + j], TK[0 + j])) + i; } } static void tkip_mixing_phase2(u8 * WEPSeed, const u8 * TK, const u16 * TTAK, u16 IV16) { /* Make temporary area overlap WEP seed so that the final copy can be * avoided on little endian hosts. */ u16 *PPK = (u16 *) & WEPSeed[4]; /* Step 1 - make copy of TTAK and bring in TSC */ PPK[0] = TTAK[0]; PPK[1] = TTAK[1]; PPK[2] = TTAK[2]; PPK[3] = TTAK[3]; PPK[4] = TTAK[4]; PPK[5] = TTAK[4] + IV16; /* Step 2 - 96-bit bijective mixing using S-box */ PPK[0] += _S_(PPK[5] ^ Mk16_le((__le16 *) & TK[0])); PPK[1] += _S_(PPK[0] ^ Mk16_le((__le16 *) & TK[2])); PPK[2] += _S_(PPK[1] ^ Mk16_le((__le16 *) & TK[4])); PPK[3] += _S_(PPK[2] ^ Mk16_le((__le16 *) & TK[6])); PPK[4] += _S_(PPK[3] ^ Mk16_le((__le16 *) & TK[8])); PPK[5] += _S_(PPK[4] ^ Mk16_le((__le16 *) & TK[10])); PPK[0] += RotR1(PPK[5] ^ Mk16_le((__le16 *) & TK[12])); PPK[1] += RotR1(PPK[0] ^ Mk16_le((__le16 *) & TK[14])); PPK[2] += RotR1(PPK[1]); PPK[3] += RotR1(PPK[2]); PPK[4] += RotR1(PPK[3]); PPK[5] += RotR1(PPK[4]); /* Step 3 - bring in last of TK bits, assign 24-bit WEP IV value * WEPSeed[0..2] is transmitted as WEP IV */ WEPSeed[0] = Hi8(IV16); WEPSeed[1] = (Hi8(IV16) | 0x20) & 0x7F; WEPSeed[2] = Lo8(IV16); WEPSeed[3] = Lo8((PPK[5] ^ Mk16_le((__le16 *) & TK[0])) >> 1); #ifdef __BIG_ENDIAN { int i; for (i = 0; i < 6; i++) PPK[i] = (PPK[i] << 8) | (PPK[i] >> 8); } #endif } static int lib80211_tkip_hdr(struct sk_buff *skb, int hdr_len, u8 * rc4key, int keylen, void *priv) { struct lib80211_tkip_data *tkey = priv; u8 *pos; struct ieee80211_hdr *hdr; hdr = (struct ieee80211_hdr *)skb->data; if (skb_headroom(skb) < TKIP_HDR_LEN || skb->len < hdr_len) return -1; if (rc4key == NULL || keylen < 16) return -1; if (!tkey->tx_phase1_done) { tkip_mixing_phase1(tkey->tx_ttak, tkey->key, hdr->addr2, tkey->tx_iv32); tkey->tx_phase1_done = 1; } tkip_mixing_phase2(rc4key, tkey->key, tkey->tx_ttak, tkey->tx_iv16); pos = skb_push(skb, TKIP_HDR_LEN); memmove(pos, pos + TKIP_HDR_LEN, hdr_len); pos += hdr_len; *pos++ = *rc4key; *pos++ = *(rc4key + 1); *pos++ = *(rc4key + 2); *pos++ = (tkey->key_idx << 6) | (1 << 5) /* Ext IV included */ ; *pos++ = tkey->tx_iv32 & 0xff; *pos++ = (tkey->tx_iv32 >> 8) & 0xff; *pos++ = (tkey->tx_iv32 >> 16) & 0xff; *pos++ = (tkey->tx_iv32 >> 24) & 0xff; tkey->tx_iv16++; if (tkey->tx_iv16 == 0) { tkey->tx_phase1_done = 0; tkey->tx_iv32++; } return TKIP_HDR_LEN; } static int lib80211_tkip_encrypt(struct sk_buff *skb, int hdr_len, void *priv) { struct lib80211_tkip_data *tkey = priv; int len; u8 rc4key[16], *pos, *icv; u32 crc; if (tkey->flags & IEEE80211_CRYPTO_TKIP_COUNTERMEASURES) { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; net_dbg_ratelimited("TKIP countermeasures: dropped TX packet to %pM\n", hdr->addr1); return -1; } if (skb_tailroom(skb) < 4 || skb->len < hdr_len) return -1; len = skb->len - hdr_len; pos = skb->data + hdr_len; if ((lib80211_tkip_hdr(skb, hdr_len, rc4key, 16, priv)) < 0) return -1; crc = ~crc32_le(~0, pos, len); icv = skb_put(skb, 4); icv[0] = crc; icv[1] = crc >> 8; icv[2] = crc >> 16; icv[3] = crc >> 24; arc4_setkey(&tkey->tx_ctx_arc4, rc4key, 16); arc4_crypt(&tkey->tx_ctx_arc4, pos, pos, len + 4); return 0; } /* * deal with seq counter wrapping correctly. * refer to timer_after() for jiffies wrapping handling */ static inline int tkip_replay_check(u32 iv32_n, u16 iv16_n, u32 iv32_o, u16 iv16_o) { if ((s32)iv32_n - (s32)iv32_o < 0 || (iv32_n == iv32_o && iv16_n <= iv16_o)) return 1; return 0; } static int lib80211_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv) { struct lib80211_tkip_data *tkey = priv; u8 rc4key[16]; u8 keyidx, *pos; u32 iv32; u16 iv16; struct ieee80211_hdr *hdr; u8 icv[4]; u32 crc; int plen; hdr = (struct ieee80211_hdr *)skb->data; if (tkey->flags & IEEE80211_CRYPTO_TKIP_COUNTERMEASURES) { net_dbg_ratelimited("TKIP countermeasures: dropped received packet from %pM\n", hdr->addr2); return -1; } if (skb->len < hdr_len + TKIP_HDR_LEN + 4) return -1; pos = skb->data + hdr_len; keyidx = pos[3]; if (!(keyidx & (1 << 5))) { net_dbg_ratelimited("TKIP: received packet without ExtIV flag from %pM\n", hdr->addr2); return -2; } keyidx >>= 6; if (tkey->key_idx != keyidx) { net_dbg_ratelimited("TKIP: RX tkey->key_idx=%d frame keyidx=%d\n", tkey->key_idx, keyidx); return -6; } if (!tkey->key_set) { net_dbg_ratelimited("TKIP: received packet from %pM with keyid=%d that does not have a configured key\n", hdr->addr2, keyidx); return -3; } iv16 = (pos[0] << 8) | pos[2]; iv32 = pos[4] | (pos[5] << 8) | (pos[6] << 16) | (pos[7] << 24); pos += TKIP_HDR_LEN; if (tkip_replay_check(iv32, iv16, tkey->rx_iv32, tkey->rx_iv16)) { #ifdef CONFIG_LIB80211_DEBUG net_dbg_ratelimited("TKIP: replay detected: STA=%pM previous TSC %08x%04x received TSC %08x%04x\n", hdr->addr2, tkey->rx_iv32, tkey->rx_iv16, iv32, iv16); #endif tkey->dot11RSNAStatsTKIPReplays++; return -4; } if (iv32 != tkey->rx_iv32 || !tkey->rx_phase1_done) { tkip_mixing_phase1(tkey->rx_ttak, tkey->key, hdr->addr2, iv32); tkey->rx_phase1_done = 1; } tkip_mixing_phase2(rc4key, tkey->key, tkey->rx_ttak, iv16); plen = skb->len - hdr_len - 12; arc4_setkey(&tkey->rx_ctx_arc4, rc4key, 16); arc4_crypt(&tkey->rx_ctx_arc4, pos, pos, plen + 4); crc = ~crc32_le(~0, pos, plen); icv[0] = crc; icv[1] = crc >> 8; icv[2] = crc >> 16; icv[3] = crc >> 24; if (memcmp(icv, pos + plen, 4) != 0) { if (iv32 != tkey->rx_iv32) { /* Previously cached Phase1 result was already lost, so * it needs to be recalculated for the next packet. */ tkey->rx_phase1_done = 0; } #ifdef CONFIG_LIB80211_DEBUG net_dbg_ratelimited("TKIP: ICV error detected: STA=%pM\n", hdr->addr2); #endif tkey->dot11RSNAStatsTKIPICVErrors++; return -5; } /* Update real counters only after Michael MIC verification has * completed */ tkey->rx_iv32_new = iv32; tkey->rx_iv16_new = iv16; /* Remove IV and ICV */ memmove(skb->data + TKIP_HDR_LEN, skb->data, hdr_len); skb_pull(skb, TKIP_HDR_LEN); skb_trim(skb, skb->len - 4); return keyidx; } static int michael_mic(struct crypto_shash *tfm_michael, u8 *key, u8 *hdr, u8 *data, size_t data_len, u8 *mic) { SHASH_DESC_ON_STACK(desc, tfm_michael); int err; if (tfm_michael == NULL) { pr_warn("%s(): tfm_michael == NULL\n", __func__); return -1; } desc->tfm = tfm_michael; if (crypto_shash_setkey(tfm_michael, key, 8)) return -1; err = crypto_shash_init(desc); if (err) goto out; err = crypto_shash_update(desc, hdr, 16); if (err) goto out; err = crypto_shash_update(desc, data, data_len); if (err) goto out; err = crypto_shash_final(desc, mic); out: shash_desc_zero(desc); return err; } static void michael_mic_hdr(struct sk_buff *skb, u8 * hdr) { struct ieee80211_hdr *hdr11; hdr11 = (struct ieee80211_hdr *)skb->data; switch (le16_to_cpu(hdr11->frame_control) & (IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS)) { case IEEE80211_FCTL_TODS: memcpy(hdr, hdr11->addr3, ETH_ALEN); /* DA */ memcpy(hdr + ETH_ALEN, hdr11->addr2, ETH_ALEN); /* SA */ break; case IEEE80211_FCTL_FROMDS: memcpy(hdr, hdr11->addr1, ETH_ALEN); /* DA */ memcpy(hdr + ETH_ALEN, hdr11->addr3, ETH_ALEN); /* SA */ break; case IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS: memcpy(hdr, hdr11->addr3, ETH_ALEN); /* DA */ memcpy(hdr + ETH_ALEN, hdr11->addr4, ETH_ALEN); /* SA */ break; default: memcpy(hdr, hdr11->addr1, ETH_ALEN); /* DA */ memcpy(hdr + ETH_ALEN, hdr11->addr2, ETH_ALEN); /* SA */ break; } if (ieee80211_is_data_qos(hdr11->frame_control)) { hdr[12] = le16_to_cpu(*((__le16 *)ieee80211_get_qos_ctl(hdr11))) & IEEE80211_QOS_CTL_TID_MASK; } else hdr[12] = 0; /* priority */ hdr[13] = hdr[14] = hdr[15] = 0; /* reserved */ } static int lib80211_michael_mic_add(struct sk_buff *skb, int hdr_len, void *priv) { struct lib80211_tkip_data *tkey = priv; u8 *pos; if (skb_tailroom(skb) < 8 || skb->len < hdr_len) { printk(KERN_DEBUG "Invalid packet for Michael MIC add " "(tailroom=%d hdr_len=%d skb->len=%d)\n", skb_tailroom(skb), hdr_len, skb->len); return -1; } michael_mic_hdr(skb, tkey->tx_hdr); pos = skb_put(skb, 8); if (michael_mic(tkey->tx_tfm_michael, &tkey->key[16], tkey->tx_hdr, skb->data + hdr_len, skb->len - 8 - hdr_len, pos)) return -1; return 0; } static void lib80211_michael_mic_failure(struct net_device *dev, struct ieee80211_hdr *hdr, int keyidx) { union iwreq_data wrqu; struct iw_michaelmicfailure ev; /* TODO: needed parameters: count, keyid, key type, TSC */ memset(&ev, 0, sizeof(ev)); ev.flags = keyidx & IW_MICFAILURE_KEY_ID; if (hdr->addr1[0] & 0x01) ev.flags |= IW_MICFAILURE_GROUP; else ev.flags |= IW_MICFAILURE_PAIRWISE; ev.src_addr.sa_family = ARPHRD_ETHER; memcpy(ev.src_addr.sa_data, hdr->addr2, ETH_ALEN); memset(&wrqu, 0, sizeof(wrqu)); wrqu.data.length = sizeof(ev); wireless_send_event(dev, IWEVMICHAELMICFAILURE, &wrqu, (char *)&ev); } static int lib80211_michael_mic_verify(struct sk_buff *skb, int keyidx, int hdr_len, void *priv) { struct lib80211_tkip_data *tkey = priv; u8 mic[8]; if (!tkey->key_set) return -1; michael_mic_hdr(skb, tkey->rx_hdr); if (michael_mic(tkey->rx_tfm_michael, &tkey->key[24], tkey->rx_hdr, skb->data + hdr_len, skb->len - 8 - hdr_len, mic)) return -1; if (memcmp(mic, skb->data + skb->len - 8, 8) != 0) { struct ieee80211_hdr *hdr; hdr = (struct ieee80211_hdr *)skb->data; printk(KERN_DEBUG "%s: Michael MIC verification failed for " "MSDU from %pM keyidx=%d\n", skb->dev ? skb->dev->name : "N/A", hdr->addr2, keyidx); if (skb->dev) lib80211_michael_mic_failure(skb->dev, hdr, keyidx); tkey->dot11RSNAStatsTKIPLocalMICFailures++; return -1; } /* Update TSC counters for RX now that the packet verification has * completed. */ tkey->rx_iv32 = tkey->rx_iv32_new; tkey->rx_iv16 = tkey->rx_iv16_new; skb_trim(skb, skb->len - 8); return 0; } static int lib80211_tkip_set_key(void *key, int len, u8 * seq, void *priv) { struct lib80211_tkip_data *tkey = priv; int keyidx; struct crypto_shash *tfm = tkey->tx_tfm_michael; struct arc4_ctx *tfm2 = &tkey->tx_ctx_arc4; struct crypto_shash *tfm3 = tkey->rx_tfm_michael; struct arc4_ctx *tfm4 = &tkey->rx_ctx_arc4; keyidx = tkey->key_idx; memset(tkey, 0, sizeof(*tkey)); tkey->key_idx = keyidx; tkey->tx_tfm_michael = tfm; tkey->tx_ctx_arc4 = *tfm2; tkey->rx_tfm_michael = tfm3; tkey->rx_ctx_arc4 = *tfm4; if (len == TKIP_KEY_LEN) { memcpy(tkey->key, key, TKIP_KEY_LEN); tkey->key_set = 1; tkey->tx_iv16 = 1; /* TSC is initialized to 1 */ if (seq) { tkey->rx_iv32 = (seq[5] << 24) | (seq[4] << 16) | (seq[3] << 8) | seq[2]; tkey->rx_iv16 = (seq[1] << 8) | seq[0]; } } else if (len == 0) tkey->key_set = 0; else return -1; return 0; } static int lib80211_tkip_get_key(void *key, int len, u8 * seq, void *priv) { struct lib80211_tkip_data *tkey = priv; if (len < TKIP_KEY_LEN) return -1; if (!tkey->key_set) return 0; memcpy(key, tkey->key, TKIP_KEY_LEN); if (seq) { /* Return the sequence number of the last transmitted frame. */ u16 iv16 = tkey->tx_iv16; u32 iv32 = tkey->tx_iv32; if (iv16 == 0) iv32--; iv16--; seq[0] = tkey->tx_iv16; seq[1] = tkey->tx_iv16 >> 8; seq[2] = tkey->tx_iv32; seq[3] = tkey->tx_iv32 >> 8; seq[4] = tkey->tx_iv32 >> 16; seq[5] = tkey->tx_iv32 >> 24; } return TKIP_KEY_LEN; } static void lib80211_tkip_print_stats(struct seq_file *m, void *priv) { struct lib80211_tkip_data *tkip = priv; seq_printf(m, "key[%d] alg=TKIP key_set=%d " "tx_pn=%02x%02x%02x%02x%02x%02x " "rx_pn=%02x%02x%02x%02x%02x%02x " "replays=%d icv_errors=%d local_mic_failures=%d\n", tkip->key_idx, tkip->key_set, (tkip->tx_iv32 >> 24) & 0xff, (tkip->tx_iv32 >> 16) & 0xff, (tkip->tx_iv32 >> 8) & 0xff, tkip->tx_iv32 & 0xff, (tkip->tx_iv16 >> 8) & 0xff, tkip->tx_iv16 & 0xff, (tkip->rx_iv32 >> 24) & 0xff, (tkip->rx_iv32 >> 16) & 0xff, (tkip->rx_iv32 >> 8) & 0xff, tkip->rx_iv32 & 0xff, (tkip->rx_iv16 >> 8) & 0xff, tkip->rx_iv16 & 0xff, tkip->dot11RSNAStatsTKIPReplays, tkip->dot11RSNAStatsTKIPICVErrors, tkip->dot11RSNAStatsTKIPLocalMICFailures); } static struct lib80211_crypto_ops lib80211_crypt_tkip = { .name = "TKIP", .init = lib80211_tkip_init, .deinit = lib80211_tkip_deinit, .encrypt_mpdu = lib80211_tkip_encrypt, .decrypt_mpdu = lib80211_tkip_decrypt, .encrypt_msdu = lib80211_michael_mic_add, .decrypt_msdu = lib80211_michael_mic_verify, .set_key = lib80211_tkip_set_key, .get_key = lib80211_tkip_get_key, .print_stats = lib80211_tkip_print_stats, .extra_mpdu_prefix_len = 4 + 4, /* IV + ExtIV */ .extra_mpdu_postfix_len = 4, /* ICV */ .extra_msdu_postfix_len = 8, /* MIC */ .get_flags = lib80211_tkip_get_flags, .set_flags = lib80211_tkip_set_flags, .owner = THIS_MODULE, }; static int __init lib80211_crypto_tkip_init(void) { return lib80211_register_crypto_ops(&lib80211_crypt_tkip); } static void __exit lib80211_crypto_tkip_exit(void) { lib80211_unregister_crypto_ops(&lib80211_crypt_tkip); } module_init(lib80211_crypto_tkip_init); module_exit(lib80211_crypto_tkip_exit);
linux-master
net/wireless/lib80211_crypt_tkip.c
/* * This file implement the Wireless Extensions proc API. * * Authors : Jean Tourrilhes - HPL - <[email protected]> * Copyright (c) 1997-2007 Jean Tourrilhes, All Rights Reserved. * * (As all part of the Linux kernel, this file is GPL) */ /* * The /proc/net/wireless file is a human readable user-space interface * exporting various wireless specific statistics from the wireless devices. * This is the most popular part of the Wireless Extensions ;-) * * This interface is a pure clone of /proc/net/dev (in net/core/dev.c). * The content of the file is basically the content of "struct iw_statistics". */ #include <linux/module.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/wireless.h> #include <linux/netdevice.h> #include <linux/rtnetlink.h> #include <net/iw_handler.h> #include <net/wext.h> static void wireless_seq_printf_stats(struct seq_file *seq, struct net_device *dev) { /* Get stats from the driver */ struct iw_statistics *stats = get_wireless_stats(dev); static struct iw_statistics nullstats = {}; /* show device if it's wireless regardless of current stats */ if (!stats) { #ifdef CONFIG_WIRELESS_EXT if (dev->wireless_handlers) stats = &nullstats; #endif #ifdef CONFIG_CFG80211 if (dev->ieee80211_ptr) stats = &nullstats; #endif } if (stats) { seq_printf(seq, "%6s: %04x %3d%c %3d%c %3d%c %6d %6d %6d " "%6d %6d %6d\n", dev->name, stats->status, stats->qual.qual, stats->qual.updated & IW_QUAL_QUAL_UPDATED ? '.' : ' ', ((__s32) stats->qual.level) - ((stats->qual.updated & IW_QUAL_DBM) ? 0x100 : 0), stats->qual.updated & IW_QUAL_LEVEL_UPDATED ? '.' : ' ', ((__s32) stats->qual.noise) - ((stats->qual.updated & IW_QUAL_DBM) ? 0x100 : 0), stats->qual.updated & IW_QUAL_NOISE_UPDATED ? '.' : ' ', stats->discard.nwid, stats->discard.code, stats->discard.fragment, stats->discard.retries, stats->discard.misc, stats->miss.beacon); if (stats != &nullstats) stats->qual.updated &= ~IW_QUAL_ALL_UPDATED; } } /* ---------------------------------------------------------------- */ /* * Print info for /proc/net/wireless (print all entries) */ static int wireless_dev_seq_show(struct seq_file *seq, void *v) { might_sleep(); if (v == SEQ_START_TOKEN) seq_printf(seq, "Inter-| sta-| Quality | Discarded " "packets | Missed | WE\n" " face | tus | link level noise | nwid " "crypt frag retry misc | beacon | %d\n", WIRELESS_EXT); else wireless_seq_printf_stats(seq, v); return 0; } static void *wireless_dev_seq_start(struct seq_file *seq, loff_t *pos) { struct net *net = seq_file_net(seq); loff_t off; struct net_device *dev; rtnl_lock(); if (!*pos) return SEQ_START_TOKEN; off = 1; for_each_netdev(net, dev) if (off++ == *pos) return dev; return NULL; } static void *wireless_dev_seq_next(struct seq_file *seq, void *v, loff_t *pos) { struct net *net = seq_file_net(seq); ++*pos; return v == SEQ_START_TOKEN ? first_net_device(net) : next_net_device(v); } static void wireless_dev_seq_stop(struct seq_file *seq, void *v) { rtnl_unlock(); } static const struct seq_operations wireless_seq_ops = { .start = wireless_dev_seq_start, .next = wireless_dev_seq_next, .stop = wireless_dev_seq_stop, .show = wireless_dev_seq_show, }; int __net_init wext_proc_init(struct net *net) { /* Create /proc/net/wireless entry */ if (!proc_create_net("wireless", 0444, net->proc_net, &wireless_seq_ops, sizeof(struct seq_net_private))) return -ENOMEM; return 0; } void __net_exit wext_proc_exit(struct net *net) { remove_proc_entry("wireless", net->proc_net); }
linux-master
net/wireless/wext-proc.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (C) 2011 Instituto Nokia de Tecnologia * * Authors: * Aloisio Almeida Jr <[email protected]> * Lauro Ramos Venancio <[email protected]> */ #define pr_fmt(fmt) KBUILD_MODNAME ": %s: " fmt, __func__ #include <net/tcp_states.h> #include <linux/nfc.h> #include <linux/export.h> #include <linux/kcov.h> #include "nfc.h" static struct nfc_sock_list raw_sk_list = { .lock = __RW_LOCK_UNLOCKED(raw_sk_list.lock) }; static void nfc_sock_link(struct nfc_sock_list *l, struct sock *sk) { write_lock(&l->lock); sk_add_node(sk, &l->head); write_unlock(&l->lock); } static void nfc_sock_unlink(struct nfc_sock_list *l, struct sock *sk) { write_lock(&l->lock); sk_del_node_init(sk); write_unlock(&l->lock); } static void rawsock_write_queue_purge(struct sock *sk) { pr_debug("sk=%p\n", sk); spin_lock_bh(&sk->sk_write_queue.lock); __skb_queue_purge(&sk->sk_write_queue); nfc_rawsock(sk)->tx_work_scheduled = false; spin_unlock_bh(&sk->sk_write_queue.lock); } static void rawsock_report_error(struct sock *sk, int err) { pr_debug("sk=%p err=%d\n", sk, err); sk->sk_shutdown = SHUTDOWN_MASK; sk->sk_err = -err; sk_error_report(sk); rawsock_write_queue_purge(sk); } static int rawsock_release(struct socket *sock) { struct sock *sk = sock->sk; pr_debug("sock=%p sk=%p\n", sock, sk); if (!sk) return 0; if (sock->type == SOCK_RAW) nfc_sock_unlink(&raw_sk_list, sk); sock_orphan(sk); sock_put(sk); return 0; } static int rawsock_connect(struct socket *sock, struct sockaddr *_addr, int len, int flags) { struct sock *sk = sock->sk; struct sockaddr_nfc *addr = (struct sockaddr_nfc *)_addr; struct nfc_dev *dev; int rc = 0; pr_debug("sock=%p sk=%p flags=%d\n", sock, sk, flags); if (!addr || len < sizeof(struct sockaddr_nfc) || addr->sa_family != AF_NFC) return -EINVAL; pr_debug("addr dev_idx=%u target_idx=%u protocol=%u\n", addr->dev_idx, addr->target_idx, addr->nfc_protocol); lock_sock(sk); if (sock->state == SS_CONNECTED) { rc = -EISCONN; goto error; } dev = nfc_get_device(addr->dev_idx); if (!dev) { rc = -ENODEV; goto error; } if (addr->target_idx > dev->target_next_idx - 1 || addr->target_idx < dev->target_next_idx - dev->n_targets) { rc = -EINVAL; goto put_dev; } rc = nfc_activate_target(dev, addr->target_idx, addr->nfc_protocol); if (rc) goto put_dev; nfc_rawsock(sk)->dev = dev; nfc_rawsock(sk)->target_idx = addr->target_idx; sock->state = SS_CONNECTED; sk->sk_state = TCP_ESTABLISHED; sk->sk_state_change(sk); release_sock(sk); return 0; put_dev: nfc_put_device(dev); error: release_sock(sk); return rc; } static int rawsock_add_header(struct sk_buff *skb) { *(u8 *)skb_push(skb, NFC_HEADER_SIZE) = 0; return 0; } static void rawsock_data_exchange_complete(void *context, struct sk_buff *skb, int err) { struct sock *sk = (struct sock *) context; BUG_ON(in_hardirq()); pr_debug("sk=%p err=%d\n", sk, err); if (err) goto error; err = rawsock_add_header(skb); if (err) goto error_skb; err = sock_queue_rcv_skb(sk, skb); if (err) goto error_skb; spin_lock_bh(&sk->sk_write_queue.lock); if (!skb_queue_empty(&sk->sk_write_queue)) schedule_work(&nfc_rawsock(sk)->tx_work); else nfc_rawsock(sk)->tx_work_scheduled = false; spin_unlock_bh(&sk->sk_write_queue.lock); sock_put(sk); return; error_skb: kfree_skb(skb); error: rawsock_report_error(sk, err); sock_put(sk); } static void rawsock_tx_work(struct work_struct *work) { struct sock *sk = to_rawsock_sk(work); struct nfc_dev *dev = nfc_rawsock(sk)->dev; u32 target_idx = nfc_rawsock(sk)->target_idx; struct sk_buff *skb; int rc; pr_debug("sk=%p target_idx=%u\n", sk, target_idx); if (sk->sk_shutdown & SEND_SHUTDOWN) { rawsock_write_queue_purge(sk); return; } skb = skb_dequeue(&sk->sk_write_queue); kcov_remote_start_common(skb_get_kcov_handle(skb)); sock_hold(sk); rc = nfc_data_exchange(dev, target_idx, skb, rawsock_data_exchange_complete, sk); if (rc) { rawsock_report_error(sk, rc); sock_put(sk); } kcov_remote_stop(); } static int rawsock_sendmsg(struct socket *sock, struct msghdr *msg, size_t len) { struct sock *sk = sock->sk; struct nfc_dev *dev = nfc_rawsock(sk)->dev; struct sk_buff *skb; int rc; pr_debug("sock=%p sk=%p len=%zu\n", sock, sk, len); if (msg->msg_namelen) return -EOPNOTSUPP; if (sock->state != SS_CONNECTED) return -ENOTCONN; skb = nfc_alloc_send_skb(dev, sk, msg->msg_flags, len, &rc); if (skb == NULL) return rc; rc = memcpy_from_msg(skb_put(skb, len), msg, len); if (rc < 0) { kfree_skb(skb); return rc; } spin_lock_bh(&sk->sk_write_queue.lock); __skb_queue_tail(&sk->sk_write_queue, skb); if (!nfc_rawsock(sk)->tx_work_scheduled) { schedule_work(&nfc_rawsock(sk)->tx_work); nfc_rawsock(sk)->tx_work_scheduled = true; } spin_unlock_bh(&sk->sk_write_queue.lock); return len; } static int rawsock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, int flags) { struct sock *sk = sock->sk; struct sk_buff *skb; int copied; int rc; pr_debug("sock=%p sk=%p len=%zu flags=%d\n", sock, sk, len, flags); skb = skb_recv_datagram(sk, flags, &rc); if (!skb) return rc; copied = skb->len; if (len < copied) { msg->msg_flags |= MSG_TRUNC; copied = len; } rc = skb_copy_datagram_msg(skb, 0, msg, copied); skb_free_datagram(sk, skb); return rc ? : copied; } static const struct proto_ops rawsock_ops = { .family = PF_NFC, .owner = THIS_MODULE, .release = rawsock_release, .bind = sock_no_bind, .connect = rawsock_connect, .socketpair = sock_no_socketpair, .accept = sock_no_accept, .getname = sock_no_getname, .poll = datagram_poll, .ioctl = sock_no_ioctl, .listen = sock_no_listen, .shutdown = sock_no_shutdown, .sendmsg = rawsock_sendmsg, .recvmsg = rawsock_recvmsg, .mmap = sock_no_mmap, }; static const struct proto_ops rawsock_raw_ops = { .family = PF_NFC, .owner = THIS_MODULE, .release = rawsock_release, .bind = sock_no_bind, .connect = sock_no_connect, .socketpair = sock_no_socketpair, .accept = sock_no_accept, .getname = sock_no_getname, .poll = datagram_poll, .ioctl = sock_no_ioctl, .listen = sock_no_listen, .shutdown = sock_no_shutdown, .sendmsg = sock_no_sendmsg, .recvmsg = rawsock_recvmsg, .mmap = sock_no_mmap, }; static void rawsock_destruct(struct sock *sk) { pr_debug("sk=%p\n", sk); if (sk->sk_state == TCP_ESTABLISHED) { nfc_deactivate_target(nfc_rawsock(sk)->dev, nfc_rawsock(sk)->target_idx, NFC_TARGET_MODE_IDLE); nfc_put_device(nfc_rawsock(sk)->dev); } skb_queue_purge(&sk->sk_receive_queue); if (!sock_flag(sk, SOCK_DEAD)) { pr_err("Freeing alive NFC raw socket %p\n", sk); return; } } static int rawsock_create(struct net *net, struct socket *sock, const struct nfc_protocol *nfc_proto, int kern) { struct sock *sk; pr_debug("sock=%p\n", sock); if ((sock->type != SOCK_SEQPACKET) && (sock->type != SOCK_RAW)) return -ESOCKTNOSUPPORT; if (sock->type == SOCK_RAW) { if (!ns_capable(net->user_ns, CAP_NET_RAW)) return -EPERM; sock->ops = &rawsock_raw_ops; } else { sock->ops = &rawsock_ops; } sk = sk_alloc(net, PF_NFC, GFP_ATOMIC, nfc_proto->proto, kern); if (!sk) return -ENOMEM; sock_init_data(sock, sk); sk->sk_protocol = nfc_proto->id; sk->sk_destruct = rawsock_destruct; sock->state = SS_UNCONNECTED; if (sock->type == SOCK_RAW) nfc_sock_link(&raw_sk_list, sk); else { INIT_WORK(&nfc_rawsock(sk)->tx_work, rawsock_tx_work); nfc_rawsock(sk)->tx_work_scheduled = false; } return 0; } void nfc_send_to_raw_sock(struct nfc_dev *dev, struct sk_buff *skb, u8 payload_type, u8 direction) { struct sk_buff *skb_copy = NULL, *nskb; struct sock *sk; u8 *data; read_lock(&raw_sk_list.lock); sk_for_each(sk, &raw_sk_list.head) { if (!skb_copy) { skb_copy = __pskb_copy_fclone(skb, NFC_RAW_HEADER_SIZE, GFP_ATOMIC, true); if (!skb_copy) continue; data = skb_push(skb_copy, NFC_RAW_HEADER_SIZE); data[0] = dev ? dev->idx : 0xFF; data[1] = direction & 0x01; data[1] |= (payload_type << 1); } nskb = skb_clone(skb_copy, GFP_ATOMIC); if (!nskb) continue; if (sock_queue_rcv_skb(sk, nskb)) kfree_skb(nskb); } read_unlock(&raw_sk_list.lock); kfree_skb(skb_copy); } EXPORT_SYMBOL(nfc_send_to_raw_sock); static struct proto rawsock_proto = { .name = "NFC_RAW", .owner = THIS_MODULE, .obj_size = sizeof(struct nfc_rawsock), }; static const struct nfc_protocol rawsock_nfc_proto = { .id = NFC_SOCKPROTO_RAW, .proto = &rawsock_proto, .owner = THIS_MODULE, .create = rawsock_create }; int __init rawsock_init(void) { int rc; rc = nfc_proto_register(&rawsock_nfc_proto); return rc; } void rawsock_exit(void) { nfc_proto_unregister(&rawsock_nfc_proto); }
linux-master
net/nfc/rawsock.c
// SPDX-License-Identifier: GPL-2.0-only /* * NFC Digital Protocol stack * Copyright (c) 2013, Intel Corporation. */ #define pr_fmt(fmt) "digital: %s: " fmt, __func__ #include <linux/module.h> #include "digital.h" #define DIGITAL_PROTO_NFCA_RF_TECH \ (NFC_PROTO_JEWEL_MASK | NFC_PROTO_MIFARE_MASK | \ NFC_PROTO_NFC_DEP_MASK | NFC_PROTO_ISO14443_MASK) #define DIGITAL_PROTO_NFCB_RF_TECH NFC_PROTO_ISO14443_B_MASK #define DIGITAL_PROTO_NFCF_RF_TECH \ (NFC_PROTO_FELICA_MASK | NFC_PROTO_NFC_DEP_MASK) #define DIGITAL_PROTO_ISO15693_RF_TECH NFC_PROTO_ISO15693_MASK /* Delay between each poll frame (ms) */ #define DIGITAL_POLL_INTERVAL 10 struct digital_cmd { struct list_head queue; u8 type; u8 pending; u16 timeout; struct sk_buff *req; struct sk_buff *resp; struct digital_tg_mdaa_params *mdaa_params; nfc_digital_cmd_complete_t cmd_cb; void *cb_context; }; struct sk_buff *digital_skb_alloc(struct nfc_digital_dev *ddev, unsigned int len) { struct sk_buff *skb; skb = alloc_skb(len + ddev->tx_headroom + ddev->tx_tailroom, GFP_KERNEL); if (skb) skb_reserve(skb, ddev->tx_headroom); return skb; } void digital_skb_add_crc(struct sk_buff *skb, crc_func_t crc_func, u16 init, u8 bitwise_inv, u8 msb_first) { u16 crc; crc = crc_func(init, skb->data, skb->len); if (bitwise_inv) crc = ~crc; if (msb_first) crc = __fswab16(crc); skb_put_u8(skb, crc & 0xFF); skb_put_u8(skb, (crc >> 8) & 0xFF); } int digital_skb_check_crc(struct sk_buff *skb, crc_func_t crc_func, u16 crc_init, u8 bitwise_inv, u8 msb_first) { int rc; u16 crc; if (skb->len <= 2) return -EIO; crc = crc_func(crc_init, skb->data, skb->len - 2); if (bitwise_inv) crc = ~crc; if (msb_first) crc = __swab16(crc); rc = (skb->data[skb->len - 2] - (crc & 0xFF)) + (skb->data[skb->len - 1] - ((crc >> 8) & 0xFF)); if (rc) return -EIO; skb_trim(skb, skb->len - 2); return 0; } static inline void digital_switch_rf(struct nfc_digital_dev *ddev, bool on) { ddev->ops->switch_rf(ddev, on); } static inline void digital_abort_cmd(struct nfc_digital_dev *ddev) { ddev->ops->abort_cmd(ddev); } static void digital_wq_cmd_complete(struct work_struct *work) { struct digital_cmd *cmd; struct nfc_digital_dev *ddev = container_of(work, struct nfc_digital_dev, cmd_complete_work); mutex_lock(&ddev->cmd_lock); cmd = list_first_entry_or_null(&ddev->cmd_queue, struct digital_cmd, queue); if (!cmd) { mutex_unlock(&ddev->cmd_lock); return; } list_del(&cmd->queue); mutex_unlock(&ddev->cmd_lock); if (!IS_ERR(cmd->resp)) print_hex_dump_debug("DIGITAL RX: ", DUMP_PREFIX_NONE, 16, 1, cmd->resp->data, cmd->resp->len, false); cmd->cmd_cb(ddev, cmd->cb_context, cmd->resp); kfree(cmd->mdaa_params); kfree(cmd); schedule_work(&ddev->cmd_work); } static void digital_send_cmd_complete(struct nfc_digital_dev *ddev, void *arg, struct sk_buff *resp) { struct digital_cmd *cmd = arg; cmd->resp = resp; schedule_work(&ddev->cmd_complete_work); } static void digital_wq_cmd(struct work_struct *work) { int rc; struct digital_cmd *cmd; struct digital_tg_mdaa_params *params; struct nfc_digital_dev *ddev = container_of(work, struct nfc_digital_dev, cmd_work); mutex_lock(&ddev->cmd_lock); cmd = list_first_entry_or_null(&ddev->cmd_queue, struct digital_cmd, queue); if (!cmd || cmd->pending) { mutex_unlock(&ddev->cmd_lock); return; } cmd->pending = 1; mutex_unlock(&ddev->cmd_lock); if (cmd->req) print_hex_dump_debug("DIGITAL TX: ", DUMP_PREFIX_NONE, 16, 1, cmd->req->data, cmd->req->len, false); switch (cmd->type) { case DIGITAL_CMD_IN_SEND: rc = ddev->ops->in_send_cmd(ddev, cmd->req, cmd->timeout, digital_send_cmd_complete, cmd); break; case DIGITAL_CMD_TG_SEND: rc = ddev->ops->tg_send_cmd(ddev, cmd->req, cmd->timeout, digital_send_cmd_complete, cmd); break; case DIGITAL_CMD_TG_LISTEN: rc = ddev->ops->tg_listen(ddev, cmd->timeout, digital_send_cmd_complete, cmd); break; case DIGITAL_CMD_TG_LISTEN_MDAA: params = cmd->mdaa_params; rc = ddev->ops->tg_listen_mdaa(ddev, params, cmd->timeout, digital_send_cmd_complete, cmd); break; case DIGITAL_CMD_TG_LISTEN_MD: rc = ddev->ops->tg_listen_md(ddev, cmd->timeout, digital_send_cmd_complete, cmd); break; default: pr_err("Unknown cmd type %d\n", cmd->type); return; } if (!rc) return; pr_err("in_send_command returned err %d\n", rc); mutex_lock(&ddev->cmd_lock); list_del(&cmd->queue); mutex_unlock(&ddev->cmd_lock); kfree_skb(cmd->req); kfree(cmd->mdaa_params); kfree(cmd); schedule_work(&ddev->cmd_work); } int digital_send_cmd(struct nfc_digital_dev *ddev, u8 cmd_type, struct sk_buff *skb, struct digital_tg_mdaa_params *params, u16 timeout, nfc_digital_cmd_complete_t cmd_cb, void *cb_context) { struct digital_cmd *cmd; cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); if (!cmd) return -ENOMEM; cmd->type = cmd_type; cmd->timeout = timeout; cmd->req = skb; cmd->mdaa_params = params; cmd->cmd_cb = cmd_cb; cmd->cb_context = cb_context; INIT_LIST_HEAD(&cmd->queue); mutex_lock(&ddev->cmd_lock); list_add_tail(&cmd->queue, &ddev->cmd_queue); mutex_unlock(&ddev->cmd_lock); schedule_work(&ddev->cmd_work); return 0; } int digital_in_configure_hw(struct nfc_digital_dev *ddev, int type, int param) { int rc; rc = ddev->ops->in_configure_hw(ddev, type, param); if (rc) pr_err("in_configure_hw failed: %d\n", rc); return rc; } int digital_tg_configure_hw(struct nfc_digital_dev *ddev, int type, int param) { int rc; rc = ddev->ops->tg_configure_hw(ddev, type, param); if (rc) pr_err("tg_configure_hw failed: %d\n", rc); return rc; } static int digital_tg_listen_mdaa(struct nfc_digital_dev *ddev, u8 rf_tech) { struct digital_tg_mdaa_params *params; int rc; params = kzalloc(sizeof(*params), GFP_KERNEL); if (!params) return -ENOMEM; params->sens_res = DIGITAL_SENS_RES_NFC_DEP; get_random_bytes(params->nfcid1, sizeof(params->nfcid1)); params->sel_res = DIGITAL_SEL_RES_NFC_DEP; params->nfcid2[0] = DIGITAL_SENSF_NFCID2_NFC_DEP_B1; params->nfcid2[1] = DIGITAL_SENSF_NFCID2_NFC_DEP_B2; get_random_bytes(params->nfcid2 + 2, NFC_NFCID2_MAXSIZE - 2); params->sc = DIGITAL_SENSF_FELICA_SC; rc = digital_send_cmd(ddev, DIGITAL_CMD_TG_LISTEN_MDAA, NULL, params, 500, digital_tg_recv_atr_req, NULL); if (rc) kfree(params); return rc; } static int digital_tg_listen_md(struct nfc_digital_dev *ddev, u8 rf_tech) { return digital_send_cmd(ddev, DIGITAL_CMD_TG_LISTEN_MD, NULL, NULL, 500, digital_tg_recv_md_req, NULL); } int digital_target_found(struct nfc_digital_dev *ddev, struct nfc_target *target, u8 protocol) { int rc; u8 framing; u8 rf_tech; u8 poll_tech_count; int (*check_crc)(struct sk_buff *skb); void (*add_crc)(struct sk_buff *skb); rf_tech = ddev->poll_techs[ddev->poll_tech_index].rf_tech; switch (protocol) { case NFC_PROTO_JEWEL: framing = NFC_DIGITAL_FRAMING_NFCA_T1T; check_crc = digital_skb_check_crc_b; add_crc = digital_skb_add_crc_b; break; case NFC_PROTO_MIFARE: framing = NFC_DIGITAL_FRAMING_NFCA_T2T; check_crc = digital_skb_check_crc_a; add_crc = digital_skb_add_crc_a; break; case NFC_PROTO_FELICA: framing = NFC_DIGITAL_FRAMING_NFCF_T3T; check_crc = digital_skb_check_crc_f; add_crc = digital_skb_add_crc_f; break; case NFC_PROTO_NFC_DEP: if (rf_tech == NFC_DIGITAL_RF_TECH_106A) { framing = NFC_DIGITAL_FRAMING_NFCA_NFC_DEP; check_crc = digital_skb_check_crc_a; add_crc = digital_skb_add_crc_a; } else { framing = NFC_DIGITAL_FRAMING_NFCF_NFC_DEP; check_crc = digital_skb_check_crc_f; add_crc = digital_skb_add_crc_f; } break; case NFC_PROTO_ISO15693: framing = NFC_DIGITAL_FRAMING_ISO15693_T5T; check_crc = digital_skb_check_crc_b; add_crc = digital_skb_add_crc_b; break; case NFC_PROTO_ISO14443: framing = NFC_DIGITAL_FRAMING_NFCA_T4T; check_crc = digital_skb_check_crc_a; add_crc = digital_skb_add_crc_a; break; case NFC_PROTO_ISO14443_B: framing = NFC_DIGITAL_FRAMING_NFCB_T4T; check_crc = digital_skb_check_crc_b; add_crc = digital_skb_add_crc_b; break; default: pr_err("Invalid protocol %d\n", protocol); return -EINVAL; } pr_debug("rf_tech=%d, protocol=%d\n", rf_tech, protocol); ddev->curr_rf_tech = rf_tech; if (DIGITAL_DRV_CAPS_IN_CRC(ddev)) { ddev->skb_add_crc = digital_skb_add_crc_none; ddev->skb_check_crc = digital_skb_check_crc_none; } else { ddev->skb_add_crc = add_crc; ddev->skb_check_crc = check_crc; } rc = digital_in_configure_hw(ddev, NFC_DIGITAL_CONFIG_FRAMING, framing); if (rc) return rc; target->supported_protocols = (1 << protocol); poll_tech_count = ddev->poll_tech_count; ddev->poll_tech_count = 0; rc = nfc_targets_found(ddev->nfc_dev, target, 1); if (rc) { ddev->poll_tech_count = poll_tech_count; return rc; } return 0; } void digital_poll_next_tech(struct nfc_digital_dev *ddev) { u8 rand_mod; digital_switch_rf(ddev, 0); mutex_lock(&ddev->poll_lock); if (!ddev->poll_tech_count) { mutex_unlock(&ddev->poll_lock); return; } get_random_bytes(&rand_mod, sizeof(rand_mod)); ddev->poll_tech_index = rand_mod % ddev->poll_tech_count; mutex_unlock(&ddev->poll_lock); schedule_delayed_work(&ddev->poll_work, msecs_to_jiffies(DIGITAL_POLL_INTERVAL)); } static void digital_wq_poll(struct work_struct *work) { int rc; struct digital_poll_tech *poll_tech; struct nfc_digital_dev *ddev = container_of(work, struct nfc_digital_dev, poll_work.work); mutex_lock(&ddev->poll_lock); if (!ddev->poll_tech_count) { mutex_unlock(&ddev->poll_lock); return; } poll_tech = &ddev->poll_techs[ddev->poll_tech_index]; mutex_unlock(&ddev->poll_lock); rc = poll_tech->poll_func(ddev, poll_tech->rf_tech); if (rc) digital_poll_next_tech(ddev); } static void digital_add_poll_tech(struct nfc_digital_dev *ddev, u8 rf_tech, digital_poll_t poll_func) { struct digital_poll_tech *poll_tech; if (ddev->poll_tech_count >= NFC_DIGITAL_POLL_MODE_COUNT_MAX) return; poll_tech = &ddev->poll_techs[ddev->poll_tech_count++]; poll_tech->rf_tech = rf_tech; poll_tech->poll_func = poll_func; } /** * digital_start_poll - start_poll operation * @nfc_dev: device to be polled * @im_protocols: bitset of nfc initiator protocols to be used for polling * @tm_protocols: bitset of nfc transport protocols to be used for polling * * For every supported protocol, the corresponding polling function is added * to the table of polling technologies (ddev->poll_techs[]) using * digital_add_poll_tech(). * When a polling function fails (by timeout or protocol error) the next one is * schedule by digital_poll_next_tech() on the poll workqueue (ddev->poll_work). */ static int digital_start_poll(struct nfc_dev *nfc_dev, __u32 im_protocols, __u32 tm_protocols) { struct nfc_digital_dev *ddev = nfc_get_drvdata(nfc_dev); u32 matching_im_protocols, matching_tm_protocols; pr_debug("protocols: im 0x%x, tm 0x%x, supported 0x%x\n", im_protocols, tm_protocols, ddev->protocols); matching_im_protocols = ddev->protocols & im_protocols; matching_tm_protocols = ddev->protocols & tm_protocols; if (!matching_im_protocols && !matching_tm_protocols) { pr_err("Unknown protocol\n"); return -EINVAL; } if (ddev->poll_tech_count) { pr_err("Already polling\n"); return -EBUSY; } if (ddev->curr_protocol) { pr_err("A target is already active\n"); return -EBUSY; } ddev->poll_tech_count = 0; ddev->poll_tech_index = 0; if (matching_im_protocols & DIGITAL_PROTO_NFCA_RF_TECH) digital_add_poll_tech(ddev, NFC_DIGITAL_RF_TECH_106A, digital_in_send_sens_req); if (matching_im_protocols & DIGITAL_PROTO_NFCB_RF_TECH) digital_add_poll_tech(ddev, NFC_DIGITAL_RF_TECH_106B, digital_in_send_sensb_req); if (matching_im_protocols & DIGITAL_PROTO_NFCF_RF_TECH) { digital_add_poll_tech(ddev, NFC_DIGITAL_RF_TECH_212F, digital_in_send_sensf_req); digital_add_poll_tech(ddev, NFC_DIGITAL_RF_TECH_424F, digital_in_send_sensf_req); } if (matching_im_protocols & DIGITAL_PROTO_ISO15693_RF_TECH) digital_add_poll_tech(ddev, NFC_DIGITAL_RF_TECH_ISO15693, digital_in_send_iso15693_inv_req); if (matching_tm_protocols & NFC_PROTO_NFC_DEP_MASK) { if (ddev->ops->tg_listen_mdaa) { digital_add_poll_tech(ddev, 0, digital_tg_listen_mdaa); } else if (ddev->ops->tg_listen_md) { digital_add_poll_tech(ddev, 0, digital_tg_listen_md); } else { digital_add_poll_tech(ddev, NFC_DIGITAL_RF_TECH_106A, digital_tg_listen_nfca); digital_add_poll_tech(ddev, NFC_DIGITAL_RF_TECH_212F, digital_tg_listen_nfcf); digital_add_poll_tech(ddev, NFC_DIGITAL_RF_TECH_424F, digital_tg_listen_nfcf); } } if (!ddev->poll_tech_count) { pr_err("Unsupported protocols: im=0x%x, tm=0x%x\n", matching_im_protocols, matching_tm_protocols); return -EINVAL; } schedule_delayed_work(&ddev->poll_work, 0); return 0; } static void digital_stop_poll(struct nfc_dev *nfc_dev) { struct nfc_digital_dev *ddev = nfc_get_drvdata(nfc_dev); mutex_lock(&ddev->poll_lock); if (!ddev->poll_tech_count) { pr_err("Polling operation was not running\n"); mutex_unlock(&ddev->poll_lock); return; } ddev->poll_tech_count = 0; mutex_unlock(&ddev->poll_lock); cancel_delayed_work_sync(&ddev->poll_work); digital_abort_cmd(ddev); } static int digital_dev_up(struct nfc_dev *nfc_dev) { struct nfc_digital_dev *ddev = nfc_get_drvdata(nfc_dev); digital_switch_rf(ddev, 1); return 0; } static int digital_dev_down(struct nfc_dev *nfc_dev) { struct nfc_digital_dev *ddev = nfc_get_drvdata(nfc_dev); digital_switch_rf(ddev, 0); return 0; } static int digital_dep_link_up(struct nfc_dev *nfc_dev, struct nfc_target *target, __u8 comm_mode, __u8 *gb, size_t gb_len) { struct nfc_digital_dev *ddev = nfc_get_drvdata(nfc_dev); int rc; rc = digital_in_send_atr_req(ddev, target, comm_mode, gb, gb_len); if (!rc) ddev->curr_protocol = NFC_PROTO_NFC_DEP; return rc; } static int digital_dep_link_down(struct nfc_dev *nfc_dev) { struct nfc_digital_dev *ddev = nfc_get_drvdata(nfc_dev); digital_abort_cmd(ddev); ddev->curr_protocol = 0; return 0; } static int digital_activate_target(struct nfc_dev *nfc_dev, struct nfc_target *target, __u32 protocol) { struct nfc_digital_dev *ddev = nfc_get_drvdata(nfc_dev); if (ddev->poll_tech_count) { pr_err("Can't activate a target while polling\n"); return -EBUSY; } if (ddev->curr_protocol) { pr_err("A target is already active\n"); return -EBUSY; } ddev->curr_protocol = protocol; return 0; } static void digital_deactivate_target(struct nfc_dev *nfc_dev, struct nfc_target *target, u8 mode) { struct nfc_digital_dev *ddev = nfc_get_drvdata(nfc_dev); if (!ddev->curr_protocol) { pr_err("No active target\n"); return; } digital_abort_cmd(ddev); ddev->curr_protocol = 0; } static int digital_tg_send(struct nfc_dev *dev, struct sk_buff *skb) { struct nfc_digital_dev *ddev = nfc_get_drvdata(dev); return digital_tg_send_dep_res(ddev, skb); } static void digital_in_send_complete(struct nfc_digital_dev *ddev, void *arg, struct sk_buff *resp) { struct digital_data_exch *data_exch = arg; int rc; if (IS_ERR(resp)) { rc = PTR_ERR(resp); resp = NULL; goto done; } if (ddev->curr_protocol == NFC_PROTO_MIFARE) { rc = digital_in_recv_mifare_res(resp); /* crc check is done in digital_in_recv_mifare_res() */ goto done; } if ((ddev->curr_protocol == NFC_PROTO_ISO14443) || (ddev->curr_protocol == NFC_PROTO_ISO14443_B)) { rc = digital_in_iso_dep_pull_sod(ddev, resp); if (rc) goto done; } rc = ddev->skb_check_crc(resp); done: if (rc) { kfree_skb(resp); resp = NULL; } data_exch->cb(data_exch->cb_context, resp, rc); kfree(data_exch); } static int digital_in_send(struct nfc_dev *nfc_dev, struct nfc_target *target, struct sk_buff *skb, data_exchange_cb_t cb, void *cb_context) { struct nfc_digital_dev *ddev = nfc_get_drvdata(nfc_dev); struct digital_data_exch *data_exch; int rc; data_exch = kzalloc(sizeof(*data_exch), GFP_KERNEL); if (!data_exch) return -ENOMEM; data_exch->cb = cb; data_exch->cb_context = cb_context; if (ddev->curr_protocol == NFC_PROTO_NFC_DEP) { rc = digital_in_send_dep_req(ddev, target, skb, data_exch); goto exit; } if ((ddev->curr_protocol == NFC_PROTO_ISO14443) || (ddev->curr_protocol == NFC_PROTO_ISO14443_B)) { rc = digital_in_iso_dep_push_sod(ddev, skb); if (rc) goto exit; } ddev->skb_add_crc(skb); rc = digital_in_send_cmd(ddev, skb, 500, digital_in_send_complete, data_exch); exit: if (rc) kfree(data_exch); return rc; } static const struct nfc_ops digital_nfc_ops = { .dev_up = digital_dev_up, .dev_down = digital_dev_down, .start_poll = digital_start_poll, .stop_poll = digital_stop_poll, .dep_link_up = digital_dep_link_up, .dep_link_down = digital_dep_link_down, .activate_target = digital_activate_target, .deactivate_target = digital_deactivate_target, .tm_send = digital_tg_send, .im_transceive = digital_in_send, }; struct nfc_digital_dev *nfc_digital_allocate_device(const struct nfc_digital_ops *ops, __u32 supported_protocols, __u32 driver_capabilities, int tx_headroom, int tx_tailroom) { struct nfc_digital_dev *ddev; if (!ops->in_configure_hw || !ops->in_send_cmd || !ops->tg_listen || !ops->tg_configure_hw || !ops->tg_send_cmd || !ops->abort_cmd || !ops->switch_rf || (ops->tg_listen_md && !ops->tg_get_rf_tech)) return NULL; ddev = kzalloc(sizeof(*ddev), GFP_KERNEL); if (!ddev) return NULL; ddev->driver_capabilities = driver_capabilities; ddev->ops = ops; mutex_init(&ddev->cmd_lock); INIT_LIST_HEAD(&ddev->cmd_queue); INIT_WORK(&ddev->cmd_work, digital_wq_cmd); INIT_WORK(&ddev->cmd_complete_work, digital_wq_cmd_complete); mutex_init(&ddev->poll_lock); INIT_DELAYED_WORK(&ddev->poll_work, digital_wq_poll); if (supported_protocols & NFC_PROTO_JEWEL_MASK) ddev->protocols |= NFC_PROTO_JEWEL_MASK; if (supported_protocols & NFC_PROTO_MIFARE_MASK) ddev->protocols |= NFC_PROTO_MIFARE_MASK; if (supported_protocols & NFC_PROTO_FELICA_MASK) ddev->protocols |= NFC_PROTO_FELICA_MASK; if (supported_protocols & NFC_PROTO_NFC_DEP_MASK) ddev->protocols |= NFC_PROTO_NFC_DEP_MASK; if (supported_protocols & NFC_PROTO_ISO15693_MASK) ddev->protocols |= NFC_PROTO_ISO15693_MASK; if (supported_protocols & NFC_PROTO_ISO14443_MASK) ddev->protocols |= NFC_PROTO_ISO14443_MASK; if (supported_protocols & NFC_PROTO_ISO14443_B_MASK) ddev->protocols |= NFC_PROTO_ISO14443_B_MASK; ddev->tx_headroom = tx_headroom + DIGITAL_MAX_HEADER_LEN; ddev->tx_tailroom = tx_tailroom + DIGITAL_CRC_LEN; ddev->nfc_dev = nfc_allocate_device(&digital_nfc_ops, ddev->protocols, ddev->tx_headroom, ddev->tx_tailroom); if (!ddev->nfc_dev) { pr_err("nfc_allocate_device failed\n"); goto free_dev; } nfc_set_drvdata(ddev->nfc_dev, ddev); return ddev; free_dev: kfree(ddev); return NULL; } EXPORT_SYMBOL(nfc_digital_allocate_device); void nfc_digital_free_device(struct nfc_digital_dev *ddev) { nfc_free_device(ddev->nfc_dev); kfree(ddev); } EXPORT_SYMBOL(nfc_digital_free_device); int nfc_digital_register_device(struct nfc_digital_dev *ddev) { return nfc_register_device(ddev->nfc_dev); } EXPORT_SYMBOL(nfc_digital_register_device); void nfc_digital_unregister_device(struct nfc_digital_dev *ddev) { struct digital_cmd *cmd, *n; nfc_unregister_device(ddev->nfc_dev); mutex_lock(&ddev->poll_lock); ddev->poll_tech_count = 0; mutex_unlock(&ddev->poll_lock); cancel_delayed_work_sync(&ddev->poll_work); cancel_work_sync(&ddev->cmd_work); cancel_work_sync(&ddev->cmd_complete_work); list_for_each_entry_safe(cmd, n, &ddev->cmd_queue, queue) { list_del(&cmd->queue); /* Call the command callback if any and pass it a ENODEV error. * This gives a chance to the command issuer to free any * allocated buffer. */ if (cmd->cmd_cb) cmd->cmd_cb(ddev, cmd->cb_context, ERR_PTR(-ENODEV)); kfree(cmd->mdaa_params); kfree(cmd); } } EXPORT_SYMBOL(nfc_digital_unregister_device); MODULE_LICENSE("GPL");
linux-master
net/nfc/digital_core.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (C) 2011 Instituto Nokia de Tecnologia * * Authors: * Lauro Ramos Venancio <[email protected]> * Aloisio Almeida Jr <[email protected]> */ #define pr_fmt(fmt) KBUILD_MODNAME ": %s: " fmt, __func__ #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/rfkill.h> #include <linux/nfc.h> #include <net/genetlink.h> #include "nfc.h" #define VERSION "0.1" #define NFC_CHECK_PRES_FREQ_MS 2000 int nfc_devlist_generation; DEFINE_MUTEX(nfc_devlist_mutex); /* NFC device ID bitmap */ static DEFINE_IDA(nfc_index_ida); int nfc_fw_download(struct nfc_dev *dev, const char *firmware_name) { int rc = 0; pr_debug("%s do firmware %s\n", dev_name(&dev->dev), firmware_name); device_lock(&dev->dev); if (dev->shutting_down) { rc = -ENODEV; goto error; } if (dev->dev_up) { rc = -EBUSY; goto error; } if (!dev->ops->fw_download) { rc = -EOPNOTSUPP; goto error; } dev->fw_download_in_progress = true; rc = dev->ops->fw_download(dev, firmware_name); if (rc) dev->fw_download_in_progress = false; error: device_unlock(&dev->dev); return rc; } /** * nfc_fw_download_done - inform that a firmware download was completed * * @dev: The nfc device to which firmware was downloaded * @firmware_name: The firmware filename * @result: The positive value of a standard errno value */ int nfc_fw_download_done(struct nfc_dev *dev, const char *firmware_name, u32 result) { dev->fw_download_in_progress = false; return nfc_genl_fw_download_done(dev, firmware_name, result); } EXPORT_SYMBOL(nfc_fw_download_done); /** * nfc_dev_up - turn on the NFC device * * @dev: The nfc device to be turned on * * The device remains up until the nfc_dev_down function is called. */ int nfc_dev_up(struct nfc_dev *dev) { int rc = 0; pr_debug("dev_name=%s\n", dev_name(&dev->dev)); device_lock(&dev->dev); if (dev->shutting_down) { rc = -ENODEV; goto error; } if (dev->rfkill && rfkill_blocked(dev->rfkill)) { rc = -ERFKILL; goto error; } if (dev->fw_download_in_progress) { rc = -EBUSY; goto error; } if (dev->dev_up) { rc = -EALREADY; goto error; } if (dev->ops->dev_up) rc = dev->ops->dev_up(dev); if (!rc) dev->dev_up = true; /* We have to enable the device before discovering SEs */ if (dev->ops->discover_se && dev->ops->discover_se(dev)) pr_err("SE discovery failed\n"); error: device_unlock(&dev->dev); return rc; } /** * nfc_dev_down - turn off the NFC device * * @dev: The nfc device to be turned off */ int nfc_dev_down(struct nfc_dev *dev) { int rc = 0; pr_debug("dev_name=%s\n", dev_name(&dev->dev)); device_lock(&dev->dev); if (dev->shutting_down) { rc = -ENODEV; goto error; } if (!dev->dev_up) { rc = -EALREADY; goto error; } if (dev->polling || dev->active_target) { rc = -EBUSY; goto error; } if (dev->ops->dev_down) dev->ops->dev_down(dev); dev->dev_up = false; error: device_unlock(&dev->dev); return rc; } static int nfc_rfkill_set_block(void *data, bool blocked) { struct nfc_dev *dev = data; pr_debug("%s blocked %d", dev_name(&dev->dev), blocked); if (!blocked) return 0; nfc_dev_down(dev); return 0; } static const struct rfkill_ops nfc_rfkill_ops = { .set_block = nfc_rfkill_set_block, }; /** * nfc_start_poll - start polling for nfc targets * * @dev: The nfc device that must start polling * @im_protocols: bitset of nfc initiator protocols to be used for polling * @tm_protocols: bitset of nfc transport protocols to be used for polling * * The device remains polling for targets until a target is found or * the nfc_stop_poll function is called. */ int nfc_start_poll(struct nfc_dev *dev, u32 im_protocols, u32 tm_protocols) { int rc; pr_debug("dev_name %s initiator protocols 0x%x target protocols 0x%x\n", dev_name(&dev->dev), im_protocols, tm_protocols); if (!im_protocols && !tm_protocols) return -EINVAL; device_lock(&dev->dev); if (dev->shutting_down) { rc = -ENODEV; goto error; } if (!dev->dev_up) { rc = -ENODEV; goto error; } if (dev->polling) { rc = -EBUSY; goto error; } rc = dev->ops->start_poll(dev, im_protocols, tm_protocols); if (!rc) { dev->polling = true; dev->rf_mode = NFC_RF_NONE; } error: device_unlock(&dev->dev); return rc; } /** * nfc_stop_poll - stop polling for nfc targets * * @dev: The nfc device that must stop polling */ int nfc_stop_poll(struct nfc_dev *dev) { int rc = 0; pr_debug("dev_name=%s\n", dev_name(&dev->dev)); device_lock(&dev->dev); if (dev->shutting_down) { rc = -ENODEV; goto error; } if (!dev->polling) { rc = -EINVAL; goto error; } dev->ops->stop_poll(dev); dev->polling = false; dev->rf_mode = NFC_RF_NONE; error: device_unlock(&dev->dev); return rc; } static struct nfc_target *nfc_find_target(struct nfc_dev *dev, u32 target_idx) { int i; for (i = 0; i < dev->n_targets; i++) { if (dev->targets[i].idx == target_idx) return &dev->targets[i]; } return NULL; } int nfc_dep_link_up(struct nfc_dev *dev, int target_index, u8 comm_mode) { int rc = 0; u8 *gb; size_t gb_len; struct nfc_target *target; pr_debug("dev_name=%s comm %d\n", dev_name(&dev->dev), comm_mode); if (!dev->ops->dep_link_up) return -EOPNOTSUPP; device_lock(&dev->dev); if (dev->shutting_down) { rc = -ENODEV; goto error; } if (dev->dep_link_up == true) { rc = -EALREADY; goto error; } gb = nfc_llcp_general_bytes(dev, &gb_len); if (gb_len > NFC_MAX_GT_LEN) { rc = -EINVAL; goto error; } target = nfc_find_target(dev, target_index); if (target == NULL) { rc = -ENOTCONN; goto error; } rc = dev->ops->dep_link_up(dev, target, comm_mode, gb, gb_len); if (!rc) { dev->active_target = target; dev->rf_mode = NFC_RF_INITIATOR; } error: device_unlock(&dev->dev); return rc; } int nfc_dep_link_down(struct nfc_dev *dev) { int rc = 0; pr_debug("dev_name=%s\n", dev_name(&dev->dev)); if (!dev->ops->dep_link_down) return -EOPNOTSUPP; device_lock(&dev->dev); if (dev->shutting_down) { rc = -ENODEV; goto error; } if (dev->dep_link_up == false) { rc = -EALREADY; goto error; } rc = dev->ops->dep_link_down(dev); if (!rc) { dev->dep_link_up = false; dev->active_target = NULL; dev->rf_mode = NFC_RF_NONE; nfc_llcp_mac_is_down(dev); nfc_genl_dep_link_down_event(dev); } error: device_unlock(&dev->dev); return rc; } int nfc_dep_link_is_up(struct nfc_dev *dev, u32 target_idx, u8 comm_mode, u8 rf_mode) { dev->dep_link_up = true; if (!dev->active_target && rf_mode == NFC_RF_INITIATOR) { struct nfc_target *target; target = nfc_find_target(dev, target_idx); if (target == NULL) return -ENOTCONN; dev->active_target = target; } dev->polling = false; dev->rf_mode = rf_mode; nfc_llcp_mac_is_up(dev, target_idx, comm_mode, rf_mode); return nfc_genl_dep_link_up_event(dev, target_idx, comm_mode, rf_mode); } EXPORT_SYMBOL(nfc_dep_link_is_up); /** * nfc_activate_target - prepare the target for data exchange * * @dev: The nfc device that found the target * @target_idx: index of the target that must be activated * @protocol: nfc protocol that will be used for data exchange */ int nfc_activate_target(struct nfc_dev *dev, u32 target_idx, u32 protocol) { int rc; struct nfc_target *target; pr_debug("dev_name=%s target_idx=%u protocol=%u\n", dev_name(&dev->dev), target_idx, protocol); device_lock(&dev->dev); if (dev->shutting_down) { rc = -ENODEV; goto error; } if (dev->active_target) { rc = -EBUSY; goto error; } target = nfc_find_target(dev, target_idx); if (target == NULL) { rc = -ENOTCONN; goto error; } rc = dev->ops->activate_target(dev, target, protocol); if (!rc) { dev->active_target = target; dev->rf_mode = NFC_RF_INITIATOR; if (dev->ops->check_presence && !dev->shutting_down) mod_timer(&dev->check_pres_timer, jiffies + msecs_to_jiffies(NFC_CHECK_PRES_FREQ_MS)); } error: device_unlock(&dev->dev); return rc; } /** * nfc_deactivate_target - deactivate a nfc target * * @dev: The nfc device that found the target * @target_idx: index of the target that must be deactivated * @mode: idle or sleep? */ int nfc_deactivate_target(struct nfc_dev *dev, u32 target_idx, u8 mode) { int rc = 0; pr_debug("dev_name=%s target_idx=%u\n", dev_name(&dev->dev), target_idx); device_lock(&dev->dev); if (dev->shutting_down) { rc = -ENODEV; goto error; } if (dev->active_target == NULL) { rc = -ENOTCONN; goto error; } if (dev->active_target->idx != target_idx) { rc = -ENOTCONN; goto error; } if (dev->ops->check_presence) del_timer_sync(&dev->check_pres_timer); dev->ops->deactivate_target(dev, dev->active_target, mode); dev->active_target = NULL; error: device_unlock(&dev->dev); return rc; } /** * nfc_data_exchange - transceive data * * @dev: The nfc device that found the target * @target_idx: index of the target * @skb: data to be sent * @cb: callback called when the response is received * @cb_context: parameter for the callback function * * The user must wait for the callback before calling this function again. */ int nfc_data_exchange(struct nfc_dev *dev, u32 target_idx, struct sk_buff *skb, data_exchange_cb_t cb, void *cb_context) { int rc; pr_debug("dev_name=%s target_idx=%u skb->len=%u\n", dev_name(&dev->dev), target_idx, skb->len); device_lock(&dev->dev); if (dev->shutting_down) { rc = -ENODEV; kfree_skb(skb); goto error; } if (dev->rf_mode == NFC_RF_INITIATOR && dev->active_target != NULL) { if (dev->active_target->idx != target_idx) { rc = -EADDRNOTAVAIL; kfree_skb(skb); goto error; } if (dev->ops->check_presence) del_timer_sync(&dev->check_pres_timer); rc = dev->ops->im_transceive(dev, dev->active_target, skb, cb, cb_context); if (!rc && dev->ops->check_presence && !dev->shutting_down) mod_timer(&dev->check_pres_timer, jiffies + msecs_to_jiffies(NFC_CHECK_PRES_FREQ_MS)); } else if (dev->rf_mode == NFC_RF_TARGET && dev->ops->tm_send != NULL) { rc = dev->ops->tm_send(dev, skb); } else { rc = -ENOTCONN; kfree_skb(skb); goto error; } error: device_unlock(&dev->dev); return rc; } struct nfc_se *nfc_find_se(struct nfc_dev *dev, u32 se_idx) { struct nfc_se *se; list_for_each_entry(se, &dev->secure_elements, list) if (se->idx == se_idx) return se; return NULL; } EXPORT_SYMBOL(nfc_find_se); int nfc_enable_se(struct nfc_dev *dev, u32 se_idx) { struct nfc_se *se; int rc; pr_debug("%s se index %d\n", dev_name(&dev->dev), se_idx); device_lock(&dev->dev); if (dev->shutting_down) { rc = -ENODEV; goto error; } if (!dev->dev_up) { rc = -ENODEV; goto error; } if (dev->polling) { rc = -EBUSY; goto error; } if (!dev->ops->enable_se || !dev->ops->disable_se) { rc = -EOPNOTSUPP; goto error; } se = nfc_find_se(dev, se_idx); if (!se) { rc = -EINVAL; goto error; } if (se->state == NFC_SE_ENABLED) { rc = -EALREADY; goto error; } rc = dev->ops->enable_se(dev, se_idx); if (rc >= 0) se->state = NFC_SE_ENABLED; error: device_unlock(&dev->dev); return rc; } int nfc_disable_se(struct nfc_dev *dev, u32 se_idx) { struct nfc_se *se; int rc; pr_debug("%s se index %d\n", dev_name(&dev->dev), se_idx); device_lock(&dev->dev); if (dev->shutting_down) { rc = -ENODEV; goto error; } if (!dev->dev_up) { rc = -ENODEV; goto error; } if (!dev->ops->enable_se || !dev->ops->disable_se) { rc = -EOPNOTSUPP; goto error; } se = nfc_find_se(dev, se_idx); if (!se) { rc = -EINVAL; goto error; } if (se->state == NFC_SE_DISABLED) { rc = -EALREADY; goto error; } rc = dev->ops->disable_se(dev, se_idx); if (rc >= 0) se->state = NFC_SE_DISABLED; error: device_unlock(&dev->dev); return rc; } int nfc_set_remote_general_bytes(struct nfc_dev *dev, const u8 *gb, u8 gb_len) { pr_debug("dev_name=%s gb_len=%d\n", dev_name(&dev->dev), gb_len); return nfc_llcp_set_remote_gb(dev, gb, gb_len); } EXPORT_SYMBOL(nfc_set_remote_general_bytes); u8 *nfc_get_local_general_bytes(struct nfc_dev *dev, size_t *gb_len) { pr_debug("dev_name=%s\n", dev_name(&dev->dev)); return nfc_llcp_general_bytes(dev, gb_len); } EXPORT_SYMBOL(nfc_get_local_general_bytes); int nfc_tm_data_received(struct nfc_dev *dev, struct sk_buff *skb) { /* Only LLCP target mode for now */ if (dev->dep_link_up == false) { kfree_skb(skb); return -ENOLINK; } return nfc_llcp_data_received(dev, skb); } EXPORT_SYMBOL(nfc_tm_data_received); int nfc_tm_activated(struct nfc_dev *dev, u32 protocol, u8 comm_mode, const u8 *gb, size_t gb_len) { int rc; device_lock(&dev->dev); dev->polling = false; if (gb != NULL) { rc = nfc_set_remote_general_bytes(dev, gb, gb_len); if (rc < 0) goto out; } dev->rf_mode = NFC_RF_TARGET; if (protocol == NFC_PROTO_NFC_DEP_MASK) nfc_dep_link_is_up(dev, 0, comm_mode, NFC_RF_TARGET); rc = nfc_genl_tm_activated(dev, protocol); out: device_unlock(&dev->dev); return rc; } EXPORT_SYMBOL(nfc_tm_activated); int nfc_tm_deactivated(struct nfc_dev *dev) { dev->dep_link_up = false; dev->rf_mode = NFC_RF_NONE; return nfc_genl_tm_deactivated(dev); } EXPORT_SYMBOL(nfc_tm_deactivated); /** * nfc_alloc_send_skb - allocate a skb for data exchange responses * * @dev: device sending the response * @sk: socket sending the response * @flags: MSG_DONTWAIT flag * @size: size to allocate * @err: pointer to memory to store the error code */ struct sk_buff *nfc_alloc_send_skb(struct nfc_dev *dev, struct sock *sk, unsigned int flags, unsigned int size, unsigned int *err) { struct sk_buff *skb; unsigned int total_size; total_size = size + dev->tx_headroom + dev->tx_tailroom + NFC_HEADER_SIZE; skb = sock_alloc_send_skb(sk, total_size, flags & MSG_DONTWAIT, err); if (skb) skb_reserve(skb, dev->tx_headroom + NFC_HEADER_SIZE); return skb; } /** * nfc_alloc_recv_skb - allocate a skb for data exchange responses * * @size: size to allocate * @gfp: gfp flags */ struct sk_buff *nfc_alloc_recv_skb(unsigned int size, gfp_t gfp) { struct sk_buff *skb; unsigned int total_size; total_size = size + 1; skb = alloc_skb(total_size, gfp); if (skb) skb_reserve(skb, 1); return skb; } EXPORT_SYMBOL(nfc_alloc_recv_skb); /** * nfc_targets_found - inform that targets were found * * @dev: The nfc device that found the targets * @targets: array of nfc targets found * @n_targets: targets array size * * The device driver must call this function when one or many nfc targets * are found. After calling this function, the device driver must stop * polling for targets. * NOTE: This function can be called with targets=NULL and n_targets=0 to * notify a driver error, meaning that the polling operation cannot complete. * IMPORTANT: this function must not be called from an atomic context. * In addition, it must also not be called from a context that would prevent * the NFC Core to call other nfc ops entry point concurrently. */ int nfc_targets_found(struct nfc_dev *dev, struct nfc_target *targets, int n_targets) { int i; pr_debug("dev_name=%s n_targets=%d\n", dev_name(&dev->dev), n_targets); for (i = 0; i < n_targets; i++) targets[i].idx = dev->target_next_idx++; device_lock(&dev->dev); if (dev->polling == false) { device_unlock(&dev->dev); return 0; } dev->polling = false; dev->targets_generation++; kfree(dev->targets); dev->targets = NULL; if (targets) { dev->targets = kmemdup(targets, n_targets * sizeof(struct nfc_target), GFP_ATOMIC); if (!dev->targets) { dev->n_targets = 0; device_unlock(&dev->dev); return -ENOMEM; } } dev->n_targets = n_targets; device_unlock(&dev->dev); nfc_genl_targets_found(dev); return 0; } EXPORT_SYMBOL(nfc_targets_found); /** * nfc_target_lost - inform that an activated target went out of field * * @dev: The nfc device that had the activated target in field * @target_idx: the nfc index of the target * * The device driver must call this function when the activated target * goes out of the field. * IMPORTANT: this function must not be called from an atomic context. * In addition, it must also not be called from a context that would prevent * the NFC Core to call other nfc ops entry point concurrently. */ int nfc_target_lost(struct nfc_dev *dev, u32 target_idx) { const struct nfc_target *tg; int i; pr_debug("dev_name %s n_target %d\n", dev_name(&dev->dev), target_idx); device_lock(&dev->dev); for (i = 0; i < dev->n_targets; i++) { tg = &dev->targets[i]; if (tg->idx == target_idx) break; } if (i == dev->n_targets) { device_unlock(&dev->dev); return -EINVAL; } dev->targets_generation++; dev->n_targets--; dev->active_target = NULL; if (dev->n_targets) { memcpy(&dev->targets[i], &dev->targets[i + 1], (dev->n_targets - i) * sizeof(struct nfc_target)); } else { kfree(dev->targets); dev->targets = NULL; } device_unlock(&dev->dev); nfc_genl_target_lost(dev, target_idx); return 0; } EXPORT_SYMBOL(nfc_target_lost); inline void nfc_driver_failure(struct nfc_dev *dev, int err) { nfc_targets_found(dev, NULL, 0); } EXPORT_SYMBOL(nfc_driver_failure); int nfc_add_se(struct nfc_dev *dev, u32 se_idx, u16 type) { struct nfc_se *se; int rc; pr_debug("%s se index %d\n", dev_name(&dev->dev), se_idx); se = nfc_find_se(dev, se_idx); if (se) return -EALREADY; se = kzalloc(sizeof(struct nfc_se), GFP_KERNEL); if (!se) return -ENOMEM; se->idx = se_idx; se->type = type; se->state = NFC_SE_DISABLED; INIT_LIST_HEAD(&se->list); list_add(&se->list, &dev->secure_elements); rc = nfc_genl_se_added(dev, se_idx, type); if (rc < 0) { list_del(&se->list); kfree(se); return rc; } return 0; } EXPORT_SYMBOL(nfc_add_se); int nfc_remove_se(struct nfc_dev *dev, u32 se_idx) { struct nfc_se *se, *n; int rc; pr_debug("%s se index %d\n", dev_name(&dev->dev), se_idx); list_for_each_entry_safe(se, n, &dev->secure_elements, list) if (se->idx == se_idx) { rc = nfc_genl_se_removed(dev, se_idx); if (rc < 0) return rc; list_del(&se->list); kfree(se); return 0; } return -EINVAL; } EXPORT_SYMBOL(nfc_remove_se); int nfc_se_transaction(struct nfc_dev *dev, u8 se_idx, struct nfc_evt_transaction *evt_transaction) { int rc; pr_debug("transaction: %x\n", se_idx); device_lock(&dev->dev); if (!evt_transaction) { rc = -EPROTO; goto out; } rc = nfc_genl_se_transaction(dev, se_idx, evt_transaction); out: device_unlock(&dev->dev); return rc; } EXPORT_SYMBOL(nfc_se_transaction); int nfc_se_connectivity(struct nfc_dev *dev, u8 se_idx) { int rc; pr_debug("connectivity: %x\n", se_idx); device_lock(&dev->dev); rc = nfc_genl_se_connectivity(dev, se_idx); device_unlock(&dev->dev); return rc; } EXPORT_SYMBOL(nfc_se_connectivity); static void nfc_release(struct device *d) { struct nfc_dev *dev = to_nfc_dev(d); struct nfc_se *se, *n; pr_debug("dev_name=%s\n", dev_name(&dev->dev)); nfc_genl_data_exit(&dev->genl_data); kfree(dev->targets); list_for_each_entry_safe(se, n, &dev->secure_elements, list) { nfc_genl_se_removed(dev, se->idx); list_del(&se->list); kfree(se); } ida_free(&nfc_index_ida, dev->idx); kfree(dev); } static void nfc_check_pres_work(struct work_struct *work) { struct nfc_dev *dev = container_of(work, struct nfc_dev, check_pres_work); int rc; device_lock(&dev->dev); if (dev->active_target && timer_pending(&dev->check_pres_timer) == 0) { rc = dev->ops->check_presence(dev, dev->active_target); if (rc == -EOPNOTSUPP) goto exit; if (rc) { u32 active_target_idx = dev->active_target->idx; device_unlock(&dev->dev); nfc_target_lost(dev, active_target_idx); return; } if (!dev->shutting_down) mod_timer(&dev->check_pres_timer, jiffies + msecs_to_jiffies(NFC_CHECK_PRES_FREQ_MS)); } exit: device_unlock(&dev->dev); } static void nfc_check_pres_timeout(struct timer_list *t) { struct nfc_dev *dev = from_timer(dev, t, check_pres_timer); schedule_work(&dev->check_pres_work); } struct class nfc_class = { .name = "nfc", .dev_release = nfc_release, }; EXPORT_SYMBOL(nfc_class); static int match_idx(struct device *d, const void *data) { struct nfc_dev *dev = to_nfc_dev(d); const unsigned int *idx = data; return dev->idx == *idx; } struct nfc_dev *nfc_get_device(unsigned int idx) { struct device *d; d = class_find_device(&nfc_class, NULL, &idx, match_idx); if (!d) return NULL; return to_nfc_dev(d); } /** * nfc_allocate_device - allocate a new nfc device * * @ops: device operations * @supported_protocols: NFC protocols supported by the device * @tx_headroom: reserved space at beginning of skb * @tx_tailroom: reserved space at end of skb */ struct nfc_dev *nfc_allocate_device(const struct nfc_ops *ops, u32 supported_protocols, int tx_headroom, int tx_tailroom) { struct nfc_dev *dev; int rc; if (!ops->start_poll || !ops->stop_poll || !ops->activate_target || !ops->deactivate_target || !ops->im_transceive) return NULL; if (!supported_protocols) return NULL; dev = kzalloc(sizeof(struct nfc_dev), GFP_KERNEL); if (!dev) return NULL; rc = ida_alloc(&nfc_index_ida, GFP_KERNEL); if (rc < 0) goto err_free_dev; dev->idx = rc; dev->dev.class = &nfc_class; dev_set_name(&dev->dev, "nfc%d", dev->idx); device_initialize(&dev->dev); dev->ops = ops; dev->supported_protocols = supported_protocols; dev->tx_headroom = tx_headroom; dev->tx_tailroom = tx_tailroom; INIT_LIST_HEAD(&dev->secure_elements); nfc_genl_data_init(&dev->genl_data); dev->rf_mode = NFC_RF_NONE; /* first generation must not be 0 */ dev->targets_generation = 1; if (ops->check_presence) { timer_setup(&dev->check_pres_timer, nfc_check_pres_timeout, 0); INIT_WORK(&dev->check_pres_work, nfc_check_pres_work); } return dev; err_free_dev: kfree(dev); return NULL; } EXPORT_SYMBOL(nfc_allocate_device); /** * nfc_register_device - register a nfc device in the nfc subsystem * * @dev: The nfc device to register */ int nfc_register_device(struct nfc_dev *dev) { int rc; pr_debug("dev_name=%s\n", dev_name(&dev->dev)); mutex_lock(&nfc_devlist_mutex); nfc_devlist_generation++; rc = device_add(&dev->dev); mutex_unlock(&nfc_devlist_mutex); if (rc < 0) return rc; rc = nfc_llcp_register_device(dev); if (rc) pr_err("Could not register llcp device\n"); device_lock(&dev->dev); dev->rfkill = rfkill_alloc(dev_name(&dev->dev), &dev->dev, RFKILL_TYPE_NFC, &nfc_rfkill_ops, dev); if (dev->rfkill) { if (rfkill_register(dev->rfkill) < 0) { rfkill_destroy(dev->rfkill); dev->rfkill = NULL; } } dev->shutting_down = false; device_unlock(&dev->dev); rc = nfc_genl_device_added(dev); if (rc) pr_debug("The userspace won't be notified that the device %s was added\n", dev_name(&dev->dev)); return 0; } EXPORT_SYMBOL(nfc_register_device); /** * nfc_unregister_device - unregister a nfc device in the nfc subsystem * * @dev: The nfc device to unregister */ void nfc_unregister_device(struct nfc_dev *dev) { int rc; pr_debug("dev_name=%s\n", dev_name(&dev->dev)); rc = nfc_genl_device_removed(dev); if (rc) pr_debug("The userspace won't be notified that the device %s " "was removed\n", dev_name(&dev->dev)); device_lock(&dev->dev); if (dev->rfkill) { rfkill_unregister(dev->rfkill); rfkill_destroy(dev->rfkill); dev->rfkill = NULL; } dev->shutting_down = true; device_unlock(&dev->dev); if (dev->ops->check_presence) { del_timer_sync(&dev->check_pres_timer); cancel_work_sync(&dev->check_pres_work); } nfc_llcp_unregister_device(dev); mutex_lock(&nfc_devlist_mutex); nfc_devlist_generation++; device_del(&dev->dev); mutex_unlock(&nfc_devlist_mutex); } EXPORT_SYMBOL(nfc_unregister_device); static int __init nfc_init(void) { int rc; pr_info("NFC Core ver %s\n", VERSION); rc = class_register(&nfc_class); if (rc) return rc; rc = nfc_genl_init(); if (rc) goto err_genl; /* the first generation must not be 0 */ nfc_devlist_generation = 1; rc = rawsock_init(); if (rc) goto err_rawsock; rc = nfc_llcp_init(); if (rc) goto err_llcp_sock; rc = af_nfc_init(); if (rc) goto err_af_nfc; return 0; err_af_nfc: nfc_llcp_exit(); err_llcp_sock: rawsock_exit(); err_rawsock: nfc_genl_exit(); err_genl: class_unregister(&nfc_class); return rc; } static void __exit nfc_exit(void) { af_nfc_exit(); nfc_llcp_exit(); rawsock_exit(); nfc_genl_exit(); class_unregister(&nfc_class); } subsys_initcall(nfc_init); module_exit(nfc_exit); MODULE_AUTHOR("Lauro Ramos Venancio <[email protected]>"); MODULE_DESCRIPTION("NFC Core ver " VERSION); MODULE_VERSION(VERSION); MODULE_LICENSE("GPL"); MODULE_ALIAS_NETPROTO(PF_NFC); MODULE_ALIAS_GENL_FAMILY(NFC_GENL_NAME);
linux-master
net/nfc/core.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (C) 2011 Instituto Nokia de Tecnologia * * Authors: * Aloisio Almeida Jr <[email protected]> * Lauro Ramos Venancio <[email protected]> */ #include <linux/nfc.h> #include <linux/module.h> #include "nfc.h" static DEFINE_RWLOCK(proto_tab_lock); static const struct nfc_protocol *proto_tab[NFC_SOCKPROTO_MAX]; static int nfc_sock_create(struct net *net, struct socket *sock, int proto, int kern) { int rc = -EPROTONOSUPPORT; if (net != &init_net) return -EAFNOSUPPORT; if (proto < 0 || proto >= NFC_SOCKPROTO_MAX) return -EINVAL; read_lock(&proto_tab_lock); if (proto_tab[proto] && try_module_get(proto_tab[proto]->owner)) { rc = proto_tab[proto]->create(net, sock, proto_tab[proto], kern); module_put(proto_tab[proto]->owner); } read_unlock(&proto_tab_lock); return rc; } static const struct net_proto_family nfc_sock_family_ops = { .owner = THIS_MODULE, .family = PF_NFC, .create = nfc_sock_create, }; int nfc_proto_register(const struct nfc_protocol *nfc_proto) { int rc; if (nfc_proto->id < 0 || nfc_proto->id >= NFC_SOCKPROTO_MAX) return -EINVAL; rc = proto_register(nfc_proto->proto, 0); if (rc) return rc; write_lock(&proto_tab_lock); if (proto_tab[nfc_proto->id]) rc = -EBUSY; else proto_tab[nfc_proto->id] = nfc_proto; write_unlock(&proto_tab_lock); if (rc) proto_unregister(nfc_proto->proto); return rc; } EXPORT_SYMBOL(nfc_proto_register); void nfc_proto_unregister(const struct nfc_protocol *nfc_proto) { write_lock(&proto_tab_lock); proto_tab[nfc_proto->id] = NULL; write_unlock(&proto_tab_lock); proto_unregister(nfc_proto->proto); } EXPORT_SYMBOL(nfc_proto_unregister); int __init af_nfc_init(void) { return sock_register(&nfc_sock_family_ops); } void __exit af_nfc_exit(void) { sock_unregister(PF_NFC); }
linux-master
net/nfc/af_nfc.c
// SPDX-License-Identifier: GPL-2.0-only /* * NFC Digital Protocol stack * Copyright (c) 2013, Intel Corporation. */ #define pr_fmt(fmt) "digital: %s: " fmt, __func__ #include "digital.h" #define DIGITAL_CMD_SENS_REQ 0x26 #define DIGITAL_CMD_ALL_REQ 0x52 #define DIGITAL_CMD_SEL_REQ_CL1 0x93 #define DIGITAL_CMD_SEL_REQ_CL2 0x95 #define DIGITAL_CMD_SEL_REQ_CL3 0x97 #define DIGITAL_SDD_REQ_SEL_PAR 0x20 #define DIGITAL_SDD_RES_CT 0x88 #define DIGITAL_SDD_RES_LEN 5 #define DIGITAL_SEL_RES_LEN 1 #define DIGITAL_SEL_RES_NFCID1_COMPLETE(sel_res) (!((sel_res) & 0x04)) #define DIGITAL_SEL_RES_IS_T2T(sel_res) (!((sel_res) & 0x60)) #define DIGITAL_SEL_RES_IS_T4T(sel_res) ((sel_res) & 0x20) #define DIGITAL_SEL_RES_IS_NFC_DEP(sel_res) ((sel_res) & 0x40) #define DIGITAL_SENS_RES_IS_T1T(sens_res) (((sens_res) & 0x0C00) == 0x0C00) #define DIGITAL_SENS_RES_IS_VALID(sens_res) \ ((!((sens_res) & 0x001F) && (((sens_res) & 0x0C00) == 0x0C00)) || \ (((sens_res) & 0x001F) && ((sens_res) & 0x0C00) != 0x0C00)) #define DIGITAL_MIFARE_READ_RES_LEN 16 #define DIGITAL_MIFARE_ACK_RES 0x0A #define DIGITAL_CMD_SENSB_REQ 0x05 #define DIGITAL_SENSB_ADVANCED BIT(5) #define DIGITAL_SENSB_EXTENDED BIT(4) #define DIGITAL_SENSB_ALLB_REQ BIT(3) #define DIGITAL_SENSB_N(n) ((n) & 0x7) #define DIGITAL_CMD_SENSB_RES 0x50 #define DIGITAL_CMD_ATTRIB_REQ 0x1D #define DIGITAL_ATTRIB_P1_TR0_DEFAULT (0x0 << 6) #define DIGITAL_ATTRIB_P1_TR1_DEFAULT (0x0 << 4) #define DIGITAL_ATTRIB_P1_SUPRESS_EOS BIT(3) #define DIGITAL_ATTRIB_P1_SUPRESS_SOS BIT(2) #define DIGITAL_ATTRIB_P2_LISTEN_POLL_1 (0x0 << 6) #define DIGITAL_ATTRIB_P2_POLL_LISTEN_1 (0x0 << 4) #define DIGITAL_ATTRIB_P2_MAX_FRAME_256 0x8 #define DIGITAL_ATTRIB_P4_DID(n) ((n) & 0xf) #define DIGITAL_CMD_SENSF_REQ 0x00 #define DIGITAL_CMD_SENSF_RES 0x01 #define DIGITAL_SENSF_RES_MIN_LENGTH 17 #define DIGITAL_SENSF_RES_RD_AP_B1 0x00 #define DIGITAL_SENSF_RES_RD_AP_B2 0x8F #define DIGITAL_SENSF_REQ_RC_NONE 0 #define DIGITAL_SENSF_REQ_RC_SC 1 #define DIGITAL_SENSF_REQ_RC_AP 2 #define DIGITAL_CMD_ISO15693_INVENTORY_REQ 0x01 #define DIGITAL_ISO15693_REQ_FLAG_DATA_RATE BIT(1) #define DIGITAL_ISO15693_REQ_FLAG_INVENTORY BIT(2) #define DIGITAL_ISO15693_REQ_FLAG_NB_SLOTS BIT(5) #define DIGITAL_ISO15693_RES_FLAG_ERROR BIT(0) #define DIGITAL_ISO15693_RES_IS_VALID(flags) \ (!((flags) & DIGITAL_ISO15693_RES_FLAG_ERROR)) #define DIGITAL_ISO_DEP_I_PCB 0x02 #define DIGITAL_ISO_DEP_PNI(pni) ((pni) & 0x01) #define DIGITAL_ISO_DEP_PCB_TYPE(pcb) ((pcb) & 0xC0) #define DIGITAL_ISO_DEP_I_BLOCK 0x00 #define DIGITAL_ISO_DEP_BLOCK_HAS_DID(pcb) ((pcb) & 0x08) static const u8 digital_ats_fsc[] = { 16, 24, 32, 40, 48, 64, 96, 128, }; #define DIGITAL_ATS_FSCI(t0) ((t0) & 0x0F) #define DIGITAL_SENSB_FSCI(pi2) (((pi2) & 0xF0) >> 4) #define DIGITAL_ATS_MAX_FSC 256 #define DIGITAL_RATS_BYTE1 0xE0 #define DIGITAL_RATS_PARAM 0x80 struct digital_sdd_res { u8 nfcid1[4]; u8 bcc; } __packed; struct digital_sel_req { u8 sel_cmd; u8 b2; u8 nfcid1[4]; u8 bcc; } __packed; struct digital_sensb_req { u8 cmd; u8 afi; u8 param; } __packed; struct digital_sensb_res { u8 cmd; u8 nfcid0[4]; u8 app_data[4]; u8 proto_info[3]; } __packed; struct digital_attrib_req { u8 cmd; u8 nfcid0[4]; u8 param1; u8 param2; u8 param3; u8 param4; } __packed; struct digital_attrib_res { u8 mbli_did; } __packed; struct digital_sensf_req { u8 cmd; u8 sc1; u8 sc2; u8 rc; u8 tsn; } __packed; struct digital_sensf_res { u8 cmd; u8 nfcid2[8]; u8 pad0[2]; u8 pad1[3]; u8 mrti_check; u8 mrti_update; u8 pad2; u8 rd[2]; } __packed; struct digital_iso15693_inv_req { u8 flags; u8 cmd; u8 mask_len; u64 mask; } __packed; struct digital_iso15693_inv_res { u8 flags; u8 dsfid; u64 uid; } __packed; static int digital_in_send_sdd_req(struct nfc_digital_dev *ddev, struct nfc_target *target); int digital_in_iso_dep_pull_sod(struct nfc_digital_dev *ddev, struct sk_buff *skb) { u8 pcb; u8 block_type; if (skb->len < 1) return -EIO; pcb = *skb->data; block_type = DIGITAL_ISO_DEP_PCB_TYPE(pcb); /* No support fo R-block nor S-block */ if (block_type != DIGITAL_ISO_DEP_I_BLOCK) { pr_err("ISO_DEP R-block and S-block not supported\n"); return -EIO; } if (DIGITAL_ISO_DEP_BLOCK_HAS_DID(pcb)) { pr_err("DID field in ISO_DEP PCB not supported\n"); return -EIO; } skb_pull(skb, 1); return 0; } int digital_in_iso_dep_push_sod(struct nfc_digital_dev *ddev, struct sk_buff *skb) { /* * Chaining not supported so skb->len + 1 PCB byte + 2 CRC bytes must * not be greater than remote FSC */ if (skb->len + 3 > ddev->target_fsc) return -EIO; skb_push(skb, 1); *skb->data = DIGITAL_ISO_DEP_I_PCB | ddev->curr_nfc_dep_pni; ddev->curr_nfc_dep_pni = DIGITAL_ISO_DEP_PNI(ddev->curr_nfc_dep_pni + 1); return 0; } static void digital_in_recv_ats(struct nfc_digital_dev *ddev, void *arg, struct sk_buff *resp) { struct nfc_target *target = arg; u8 fsdi; int rc; if (IS_ERR(resp)) { rc = PTR_ERR(resp); resp = NULL; goto exit; } if (resp->len < 2) { rc = -EIO; goto exit; } fsdi = DIGITAL_ATS_FSCI(resp->data[1]); if (fsdi >= 8) ddev->target_fsc = DIGITAL_ATS_MAX_FSC; else ddev->target_fsc = digital_ats_fsc[fsdi]; ddev->curr_nfc_dep_pni = 0; rc = digital_target_found(ddev, target, NFC_PROTO_ISO14443); exit: dev_kfree_skb(resp); kfree(target); if (rc) digital_poll_next_tech(ddev); } static int digital_in_send_rats(struct nfc_digital_dev *ddev, struct nfc_target *target) { int rc; struct sk_buff *skb; skb = digital_skb_alloc(ddev, 2); if (!skb) return -ENOMEM; skb_put_u8(skb, DIGITAL_RATS_BYTE1); skb_put_u8(skb, DIGITAL_RATS_PARAM); rc = digital_in_send_cmd(ddev, skb, 30, digital_in_recv_ats, target); if (rc) kfree_skb(skb); return rc; } static void digital_in_recv_sel_res(struct nfc_digital_dev *ddev, void *arg, struct sk_buff *resp) { struct nfc_target *target = arg; int rc; u8 sel_res; u8 nfc_proto; if (IS_ERR(resp)) { rc = PTR_ERR(resp); resp = NULL; goto exit; } if (!DIGITAL_DRV_CAPS_IN_CRC(ddev)) { rc = digital_skb_check_crc_a(resp); if (rc) { PROTOCOL_ERR("4.4.1.3"); goto exit; } } if (resp->len != DIGITAL_SEL_RES_LEN) { rc = -EIO; goto exit; } sel_res = resp->data[0]; if (!DIGITAL_SEL_RES_NFCID1_COMPLETE(sel_res)) { rc = digital_in_send_sdd_req(ddev, target); if (rc) goto exit; goto exit_free_skb; } target->sel_res = sel_res; if (DIGITAL_SEL_RES_IS_T2T(sel_res)) { nfc_proto = NFC_PROTO_MIFARE; } else if (DIGITAL_SEL_RES_IS_NFC_DEP(sel_res)) { nfc_proto = NFC_PROTO_NFC_DEP; } else if (DIGITAL_SEL_RES_IS_T4T(sel_res)) { rc = digital_in_send_rats(ddev, target); if (rc) goto exit; /* * Skip target_found and don't free it for now. This will be * done when receiving the ATS */ goto exit_free_skb; } else { rc = -EOPNOTSUPP; goto exit; } rc = digital_target_found(ddev, target, nfc_proto); exit: kfree(target); exit_free_skb: dev_kfree_skb(resp); if (rc) digital_poll_next_tech(ddev); } static int digital_in_send_sel_req(struct nfc_digital_dev *ddev, struct nfc_target *target, struct digital_sdd_res *sdd_res) { struct sk_buff *skb; struct digital_sel_req *sel_req; u8 sel_cmd; int rc; skb = digital_skb_alloc(ddev, sizeof(struct digital_sel_req)); if (!skb) return -ENOMEM; skb_put(skb, sizeof(struct digital_sel_req)); sel_req = (struct digital_sel_req *)skb->data; if (target->nfcid1_len <= 4) sel_cmd = DIGITAL_CMD_SEL_REQ_CL1; else if (target->nfcid1_len < 10) sel_cmd = DIGITAL_CMD_SEL_REQ_CL2; else sel_cmd = DIGITAL_CMD_SEL_REQ_CL3; sel_req->sel_cmd = sel_cmd; sel_req->b2 = 0x70; memcpy(sel_req->nfcid1, sdd_res->nfcid1, 4); sel_req->bcc = sdd_res->bcc; if (DIGITAL_DRV_CAPS_IN_CRC(ddev)) { rc = digital_in_configure_hw(ddev, NFC_DIGITAL_CONFIG_FRAMING, NFC_DIGITAL_FRAMING_NFCA_STANDARD_WITH_CRC_A); if (rc) goto exit; } else { digital_skb_add_crc_a(skb); } rc = digital_in_send_cmd(ddev, skb, 30, digital_in_recv_sel_res, target); exit: if (rc) kfree_skb(skb); return rc; } static void digital_in_recv_sdd_res(struct nfc_digital_dev *ddev, void *arg, struct sk_buff *resp) { struct nfc_target *target = arg; struct digital_sdd_res *sdd_res; int rc; u8 offset, size; u8 i, bcc; if (IS_ERR(resp)) { rc = PTR_ERR(resp); resp = NULL; goto exit; } if (resp->len < DIGITAL_SDD_RES_LEN) { PROTOCOL_ERR("4.7.2.8"); rc = -EINVAL; goto exit; } sdd_res = (struct digital_sdd_res *)resp->data; for (i = 0, bcc = 0; i < 4; i++) bcc ^= sdd_res->nfcid1[i]; if (bcc != sdd_res->bcc) { PROTOCOL_ERR("4.7.2.6"); rc = -EINVAL; goto exit; } if (sdd_res->nfcid1[0] == DIGITAL_SDD_RES_CT) { offset = 1; size = 3; } else { offset = 0; size = 4; } memcpy(target->nfcid1 + target->nfcid1_len, sdd_res->nfcid1 + offset, size); target->nfcid1_len += size; rc = digital_in_send_sel_req(ddev, target, sdd_res); exit: dev_kfree_skb(resp); if (rc) { kfree(target); digital_poll_next_tech(ddev); } } static int digital_in_send_sdd_req(struct nfc_digital_dev *ddev, struct nfc_target *target) { int rc; struct sk_buff *skb; u8 sel_cmd; rc = digital_in_configure_hw(ddev, NFC_DIGITAL_CONFIG_FRAMING, NFC_DIGITAL_FRAMING_NFCA_STANDARD); if (rc) return rc; skb = digital_skb_alloc(ddev, 2); if (!skb) return -ENOMEM; if (target->nfcid1_len == 0) sel_cmd = DIGITAL_CMD_SEL_REQ_CL1; else if (target->nfcid1_len == 3) sel_cmd = DIGITAL_CMD_SEL_REQ_CL2; else sel_cmd = DIGITAL_CMD_SEL_REQ_CL3; skb_put_u8(skb, sel_cmd); skb_put_u8(skb, DIGITAL_SDD_REQ_SEL_PAR); rc = digital_in_send_cmd(ddev, skb, 30, digital_in_recv_sdd_res, target); if (rc) kfree_skb(skb); return rc; } static void digital_in_recv_sens_res(struct nfc_digital_dev *ddev, void *arg, struct sk_buff *resp) { struct nfc_target *target = NULL; int rc; if (IS_ERR(resp)) { rc = PTR_ERR(resp); resp = NULL; goto exit; } if (resp->len < sizeof(u16)) { rc = -EIO; goto exit; } target = kzalloc(sizeof(struct nfc_target), GFP_KERNEL); if (!target) { rc = -ENOMEM; goto exit; } target->sens_res = __le16_to_cpu(*(__le16 *)resp->data); if (!DIGITAL_SENS_RES_IS_VALID(target->sens_res)) { PROTOCOL_ERR("4.6.3.3"); rc = -EINVAL; goto exit; } if (DIGITAL_SENS_RES_IS_T1T(target->sens_res)) rc = digital_target_found(ddev, target, NFC_PROTO_JEWEL); else rc = digital_in_send_sdd_req(ddev, target); exit: dev_kfree_skb(resp); if (rc) { kfree(target); digital_poll_next_tech(ddev); } } int digital_in_send_sens_req(struct nfc_digital_dev *ddev, u8 rf_tech) { struct sk_buff *skb; int rc; rc = digital_in_configure_hw(ddev, NFC_DIGITAL_CONFIG_RF_TECH, NFC_DIGITAL_RF_TECH_106A); if (rc) return rc; rc = digital_in_configure_hw(ddev, NFC_DIGITAL_CONFIG_FRAMING, NFC_DIGITAL_FRAMING_NFCA_SHORT); if (rc) return rc; skb = digital_skb_alloc(ddev, 1); if (!skb) return -ENOMEM; skb_put_u8(skb, DIGITAL_CMD_SENS_REQ); rc = digital_in_send_cmd(ddev, skb, 30, digital_in_recv_sens_res, NULL); if (rc) kfree_skb(skb); return rc; } int digital_in_recv_mifare_res(struct sk_buff *resp) { /* Successful READ command response is 16 data bytes + 2 CRC bytes long. * Since the driver can't differentiate a ACK/NACK response from a valid * READ response, the CRC calculation must be handled at digital level * even if the driver supports it for this technology. */ if (resp->len == DIGITAL_MIFARE_READ_RES_LEN + DIGITAL_CRC_LEN) { if (digital_skb_check_crc_a(resp)) { PROTOCOL_ERR("9.4.1.2"); return -EIO; } return 0; } /* ACK response (i.e. successful WRITE). */ if (resp->len == 1 && resp->data[0] == DIGITAL_MIFARE_ACK_RES) { resp->data[0] = 0; return 0; } /* NACK and any other responses are treated as error. */ return -EIO; } static void digital_in_recv_attrib_res(struct nfc_digital_dev *ddev, void *arg, struct sk_buff *resp) { struct nfc_target *target = arg; struct digital_attrib_res *attrib_res; int rc; if (IS_ERR(resp)) { rc = PTR_ERR(resp); resp = NULL; goto exit; } if (resp->len < sizeof(*attrib_res)) { PROTOCOL_ERR("12.6.2"); rc = -EIO; goto exit; } attrib_res = (struct digital_attrib_res *)resp->data; if (attrib_res->mbli_did & 0x0f) { PROTOCOL_ERR("12.6.2.1"); rc = -EIO; goto exit; } rc = digital_target_found(ddev, target, NFC_PROTO_ISO14443_B); exit: dev_kfree_skb(resp); kfree(target); if (rc) digital_poll_next_tech(ddev); } static int digital_in_send_attrib_req(struct nfc_digital_dev *ddev, struct nfc_target *target, struct digital_sensb_res *sensb_res) { struct digital_attrib_req *attrib_req; struct sk_buff *skb; int rc; skb = digital_skb_alloc(ddev, sizeof(*attrib_req)); if (!skb) return -ENOMEM; attrib_req = skb_put(skb, sizeof(*attrib_req)); attrib_req->cmd = DIGITAL_CMD_ATTRIB_REQ; memcpy(attrib_req->nfcid0, sensb_res->nfcid0, sizeof(attrib_req->nfcid0)); attrib_req->param1 = DIGITAL_ATTRIB_P1_TR0_DEFAULT | DIGITAL_ATTRIB_P1_TR1_DEFAULT; attrib_req->param2 = DIGITAL_ATTRIB_P2_LISTEN_POLL_1 | DIGITAL_ATTRIB_P2_POLL_LISTEN_1 | DIGITAL_ATTRIB_P2_MAX_FRAME_256; attrib_req->param3 = sensb_res->proto_info[1] & 0x07; attrib_req->param4 = DIGITAL_ATTRIB_P4_DID(0); rc = digital_in_send_cmd(ddev, skb, 30, digital_in_recv_attrib_res, target); if (rc) kfree_skb(skb); return rc; } static void digital_in_recv_sensb_res(struct nfc_digital_dev *ddev, void *arg, struct sk_buff *resp) { struct nfc_target *target = NULL; struct digital_sensb_res *sensb_res; u8 fsci; int rc; if (IS_ERR(resp)) { rc = PTR_ERR(resp); resp = NULL; goto exit; } if (resp->len != sizeof(*sensb_res)) { PROTOCOL_ERR("5.6.2.1"); rc = -EIO; goto exit; } sensb_res = (struct digital_sensb_res *)resp->data; if (sensb_res->cmd != DIGITAL_CMD_SENSB_RES) { PROTOCOL_ERR("5.6.2"); rc = -EIO; goto exit; } if (!(sensb_res->proto_info[1] & BIT(0))) { PROTOCOL_ERR("5.6.2.12"); rc = -EIO; goto exit; } if (sensb_res->proto_info[1] & BIT(3)) { PROTOCOL_ERR("5.6.2.16"); rc = -EIO; goto exit; } fsci = DIGITAL_SENSB_FSCI(sensb_res->proto_info[1]); if (fsci >= 8) ddev->target_fsc = DIGITAL_ATS_MAX_FSC; else ddev->target_fsc = digital_ats_fsc[fsci]; target = kzalloc(sizeof(struct nfc_target), GFP_KERNEL); if (!target) { rc = -ENOMEM; goto exit; } rc = digital_in_send_attrib_req(ddev, target, sensb_res); exit: dev_kfree_skb(resp); if (rc) { kfree(target); digital_poll_next_tech(ddev); } } int digital_in_send_sensb_req(struct nfc_digital_dev *ddev, u8 rf_tech) { struct digital_sensb_req *sensb_req; struct sk_buff *skb; int rc; rc = digital_in_configure_hw(ddev, NFC_DIGITAL_CONFIG_RF_TECH, NFC_DIGITAL_RF_TECH_106B); if (rc) return rc; rc = digital_in_configure_hw(ddev, NFC_DIGITAL_CONFIG_FRAMING, NFC_DIGITAL_FRAMING_NFCB); if (rc) return rc; skb = digital_skb_alloc(ddev, sizeof(*sensb_req)); if (!skb) return -ENOMEM; sensb_req = skb_put(skb, sizeof(*sensb_req)); sensb_req->cmd = DIGITAL_CMD_SENSB_REQ; sensb_req->afi = 0x00; /* All families and sub-families */ sensb_req->param = DIGITAL_SENSB_N(0); rc = digital_in_send_cmd(ddev, skb, 30, digital_in_recv_sensb_res, NULL); if (rc) kfree_skb(skb); return rc; } static void digital_in_recv_sensf_res(struct nfc_digital_dev *ddev, void *arg, struct sk_buff *resp) { int rc; u8 proto; struct nfc_target target; struct digital_sensf_res *sensf_res; if (IS_ERR(resp)) { rc = PTR_ERR(resp); resp = NULL; goto exit; } if (resp->len < DIGITAL_SENSF_RES_MIN_LENGTH) { rc = -EIO; goto exit; } if (!DIGITAL_DRV_CAPS_IN_CRC(ddev)) { rc = digital_skb_check_crc_f(resp); if (rc) { PROTOCOL_ERR("6.4.1.8"); goto exit; } } skb_pull(resp, 1); memset(&target, 0, sizeof(struct nfc_target)); sensf_res = (struct digital_sensf_res *)resp->data; memcpy(target.sensf_res, sensf_res, resp->len); target.sensf_res_len = resp->len; memcpy(target.nfcid2, sensf_res->nfcid2, NFC_NFCID2_MAXSIZE); target.nfcid2_len = NFC_NFCID2_MAXSIZE; if (target.nfcid2[0] == DIGITAL_SENSF_NFCID2_NFC_DEP_B1 && target.nfcid2[1] == DIGITAL_SENSF_NFCID2_NFC_DEP_B2) proto = NFC_PROTO_NFC_DEP; else proto = NFC_PROTO_FELICA; rc = digital_target_found(ddev, &target, proto); exit: dev_kfree_skb(resp); if (rc) digital_poll_next_tech(ddev); } int digital_in_send_sensf_req(struct nfc_digital_dev *ddev, u8 rf_tech) { struct digital_sensf_req *sensf_req; struct sk_buff *skb; int rc; u8 size; rc = digital_in_configure_hw(ddev, NFC_DIGITAL_CONFIG_RF_TECH, rf_tech); if (rc) return rc; rc = digital_in_configure_hw(ddev, NFC_DIGITAL_CONFIG_FRAMING, NFC_DIGITAL_FRAMING_NFCF); if (rc) return rc; size = sizeof(struct digital_sensf_req); skb = digital_skb_alloc(ddev, size); if (!skb) return -ENOMEM; skb_put(skb, size); sensf_req = (struct digital_sensf_req *)skb->data; sensf_req->cmd = DIGITAL_CMD_SENSF_REQ; sensf_req->sc1 = 0xFF; sensf_req->sc2 = 0xFF; sensf_req->rc = 0; sensf_req->tsn = 0; *(u8 *)skb_push(skb, 1) = size + 1; if (!DIGITAL_DRV_CAPS_IN_CRC(ddev)) digital_skb_add_crc_f(skb); rc = digital_in_send_cmd(ddev, skb, 30, digital_in_recv_sensf_res, NULL); if (rc) kfree_skb(skb); return rc; } static void digital_in_recv_iso15693_inv_res(struct nfc_digital_dev *ddev, void *arg, struct sk_buff *resp) { struct digital_iso15693_inv_res *res; struct nfc_target *target = NULL; int rc; if (IS_ERR(resp)) { rc = PTR_ERR(resp); resp = NULL; goto out_free_skb; } if (resp->len != sizeof(*res)) { rc = -EIO; goto out_free_skb; } res = (struct digital_iso15693_inv_res *)resp->data; if (!DIGITAL_ISO15693_RES_IS_VALID(res->flags)) { PROTOCOL_ERR("ISO15693 - 10.3.1"); rc = -EINVAL; goto out_free_skb; } target = kzalloc(sizeof(*target), GFP_KERNEL); if (!target) { rc = -ENOMEM; goto out_free_skb; } target->is_iso15693 = 1; target->iso15693_dsfid = res->dsfid; memcpy(target->iso15693_uid, &res->uid, sizeof(target->iso15693_uid)); rc = digital_target_found(ddev, target, NFC_PROTO_ISO15693); kfree(target); out_free_skb: dev_kfree_skb(resp); if (rc) digital_poll_next_tech(ddev); } int digital_in_send_iso15693_inv_req(struct nfc_digital_dev *ddev, u8 rf_tech) { struct digital_iso15693_inv_req *req; struct sk_buff *skb; int rc; rc = digital_in_configure_hw(ddev, NFC_DIGITAL_CONFIG_RF_TECH, NFC_DIGITAL_RF_TECH_ISO15693); if (rc) return rc; rc = digital_in_configure_hw(ddev, NFC_DIGITAL_CONFIG_FRAMING, NFC_DIGITAL_FRAMING_ISO15693_INVENTORY); if (rc) return rc; skb = digital_skb_alloc(ddev, sizeof(*req)); if (!skb) return -ENOMEM; skb_put(skb, sizeof(*req) - sizeof(req->mask)); /* No mask */ req = (struct digital_iso15693_inv_req *)skb->data; /* Single sub-carrier, high data rate, no AFI, single slot * Inventory command */ req->flags = DIGITAL_ISO15693_REQ_FLAG_DATA_RATE | DIGITAL_ISO15693_REQ_FLAG_INVENTORY | DIGITAL_ISO15693_REQ_FLAG_NB_SLOTS; req->cmd = DIGITAL_CMD_ISO15693_INVENTORY_REQ; req->mask_len = 0; rc = digital_in_send_cmd(ddev, skb, 30, digital_in_recv_iso15693_inv_res, NULL); if (rc) kfree_skb(skb); return rc; } static int digital_tg_send_sel_res(struct nfc_digital_dev *ddev) { struct sk_buff *skb; int rc; skb = digital_skb_alloc(ddev, 1); if (!skb) return -ENOMEM; skb_put_u8(skb, DIGITAL_SEL_RES_NFC_DEP); if (!DIGITAL_DRV_CAPS_TG_CRC(ddev)) digital_skb_add_crc_a(skb); rc = digital_tg_configure_hw(ddev, NFC_DIGITAL_CONFIG_FRAMING, NFC_DIGITAL_FRAMING_NFCA_ANTICOL_COMPLETE); if (rc) { kfree_skb(skb); return rc; } rc = digital_tg_send_cmd(ddev, skb, 300, digital_tg_recv_atr_req, NULL); if (rc) kfree_skb(skb); return rc; } static void digital_tg_recv_sel_req(struct nfc_digital_dev *ddev, void *arg, struct sk_buff *resp) { int rc; if (IS_ERR(resp)) { rc = PTR_ERR(resp); resp = NULL; goto exit; } if (!DIGITAL_DRV_CAPS_TG_CRC(ddev)) { rc = digital_skb_check_crc_a(resp); if (rc) { PROTOCOL_ERR("4.4.1.3"); goto exit; } } /* Silently ignore SEL_REQ content and send a SEL_RES for NFC-DEP */ rc = digital_tg_send_sel_res(ddev); exit: if (rc) digital_poll_next_tech(ddev); dev_kfree_skb(resp); } static int digital_tg_send_sdd_res(struct nfc_digital_dev *ddev) { struct sk_buff *skb; struct digital_sdd_res *sdd_res; int rc, i; skb = digital_skb_alloc(ddev, sizeof(struct digital_sdd_res)); if (!skb) return -ENOMEM; skb_put(skb, sizeof(struct digital_sdd_res)); sdd_res = (struct digital_sdd_res *)skb->data; sdd_res->nfcid1[0] = 0x08; get_random_bytes(sdd_res->nfcid1 + 1, 3); sdd_res->bcc = 0; for (i = 0; i < 4; i++) sdd_res->bcc ^= sdd_res->nfcid1[i]; rc = digital_tg_configure_hw(ddev, NFC_DIGITAL_CONFIG_FRAMING, NFC_DIGITAL_FRAMING_NFCA_STANDARD_WITH_CRC_A); if (rc) { kfree_skb(skb); return rc; } rc = digital_tg_send_cmd(ddev, skb, 300, digital_tg_recv_sel_req, NULL); if (rc) kfree_skb(skb); return rc; } static void digital_tg_recv_sdd_req(struct nfc_digital_dev *ddev, void *arg, struct sk_buff *resp) { u8 *sdd_req; int rc; if (IS_ERR(resp)) { rc = PTR_ERR(resp); resp = NULL; goto exit; } sdd_req = resp->data; if (resp->len < 2 || sdd_req[0] != DIGITAL_CMD_SEL_REQ_CL1 || sdd_req[1] != DIGITAL_SDD_REQ_SEL_PAR) { rc = -EINVAL; goto exit; } rc = digital_tg_send_sdd_res(ddev); exit: if (rc) digital_poll_next_tech(ddev); dev_kfree_skb(resp); } static int digital_tg_send_sens_res(struct nfc_digital_dev *ddev) { struct sk_buff *skb; u8 *sens_res; int rc; skb = digital_skb_alloc(ddev, 2); if (!skb) return -ENOMEM; sens_res = skb_put(skb, 2); sens_res[0] = (DIGITAL_SENS_RES_NFC_DEP >> 8) & 0xFF; sens_res[1] = DIGITAL_SENS_RES_NFC_DEP & 0xFF; rc = digital_tg_configure_hw(ddev, NFC_DIGITAL_CONFIG_FRAMING, NFC_DIGITAL_FRAMING_NFCA_STANDARD); if (rc) { kfree_skb(skb); return rc; } rc = digital_tg_send_cmd(ddev, skb, 300, digital_tg_recv_sdd_req, NULL); if (rc) kfree_skb(skb); return rc; } void digital_tg_recv_sens_req(struct nfc_digital_dev *ddev, void *arg, struct sk_buff *resp) { u8 sens_req; int rc; if (IS_ERR(resp)) { rc = PTR_ERR(resp); resp = NULL; goto exit; } sens_req = resp->data[0]; if (!resp->len || (sens_req != DIGITAL_CMD_SENS_REQ && sens_req != DIGITAL_CMD_ALL_REQ)) { rc = -EINVAL; goto exit; } rc = digital_tg_send_sens_res(ddev); exit: if (rc) digital_poll_next_tech(ddev); dev_kfree_skb(resp); } static void digital_tg_recv_atr_or_sensf_req(struct nfc_digital_dev *ddev, void *arg, struct sk_buff *resp) { if (!IS_ERR(resp) && (resp->len >= 2) && (resp->data[1] == DIGITAL_CMD_SENSF_REQ)) digital_tg_recv_sensf_req(ddev, arg, resp); else digital_tg_recv_atr_req(ddev, arg, resp); return; } static int digital_tg_send_sensf_res(struct nfc_digital_dev *ddev, struct digital_sensf_req *sensf_req) { struct sk_buff *skb; u8 size; int rc; struct digital_sensf_res *sensf_res; size = sizeof(struct digital_sensf_res); if (sensf_req->rc == DIGITAL_SENSF_REQ_RC_NONE) size -= sizeof(sensf_res->rd); skb = digital_skb_alloc(ddev, size); if (!skb) return -ENOMEM; skb_put(skb, size); sensf_res = (struct digital_sensf_res *)skb->data; memset(sensf_res, 0, size); sensf_res->cmd = DIGITAL_CMD_SENSF_RES; sensf_res->nfcid2[0] = DIGITAL_SENSF_NFCID2_NFC_DEP_B1; sensf_res->nfcid2[1] = DIGITAL_SENSF_NFCID2_NFC_DEP_B2; get_random_bytes(&sensf_res->nfcid2[2], 6); switch (sensf_req->rc) { case DIGITAL_SENSF_REQ_RC_SC: sensf_res->rd[0] = sensf_req->sc1; sensf_res->rd[1] = sensf_req->sc2; break; case DIGITAL_SENSF_REQ_RC_AP: sensf_res->rd[0] = DIGITAL_SENSF_RES_RD_AP_B1; sensf_res->rd[1] = DIGITAL_SENSF_RES_RD_AP_B2; break; } *(u8 *)skb_push(skb, sizeof(u8)) = size + 1; if (!DIGITAL_DRV_CAPS_TG_CRC(ddev)) digital_skb_add_crc_f(skb); rc = digital_tg_send_cmd(ddev, skb, 300, digital_tg_recv_atr_or_sensf_req, NULL); if (rc) kfree_skb(skb); return rc; } void digital_tg_recv_sensf_req(struct nfc_digital_dev *ddev, void *arg, struct sk_buff *resp) { struct digital_sensf_req *sensf_req; int rc; if (IS_ERR(resp)) { rc = PTR_ERR(resp); resp = NULL; goto exit; } if (!DIGITAL_DRV_CAPS_TG_CRC(ddev)) { rc = digital_skb_check_crc_f(resp); if (rc) { PROTOCOL_ERR("6.4.1.8"); goto exit; } } if (resp->len != sizeof(struct digital_sensf_req) + 1) { rc = -EINVAL; goto exit; } skb_pull(resp, 1); sensf_req = (struct digital_sensf_req *)resp->data; if (sensf_req->cmd != DIGITAL_CMD_SENSF_REQ) { rc = -EINVAL; goto exit; } rc = digital_tg_send_sensf_res(ddev, sensf_req); exit: if (rc) digital_poll_next_tech(ddev); dev_kfree_skb(resp); } static int digital_tg_config_nfca(struct nfc_digital_dev *ddev) { int rc; rc = digital_tg_configure_hw(ddev, NFC_DIGITAL_CONFIG_RF_TECH, NFC_DIGITAL_RF_TECH_106A); if (rc) return rc; return digital_tg_configure_hw(ddev, NFC_DIGITAL_CONFIG_FRAMING, NFC_DIGITAL_FRAMING_NFCA_NFC_DEP); } int digital_tg_listen_nfca(struct nfc_digital_dev *ddev, u8 rf_tech) { int rc; rc = digital_tg_config_nfca(ddev); if (rc) return rc; return digital_tg_listen(ddev, 300, digital_tg_recv_sens_req, NULL); } static int digital_tg_config_nfcf(struct nfc_digital_dev *ddev, u8 rf_tech) { int rc; rc = digital_tg_configure_hw(ddev, NFC_DIGITAL_CONFIG_RF_TECH, rf_tech); if (rc) return rc; return digital_tg_configure_hw(ddev, NFC_DIGITAL_CONFIG_FRAMING, NFC_DIGITAL_FRAMING_NFCF_NFC_DEP); } int digital_tg_listen_nfcf(struct nfc_digital_dev *ddev, u8 rf_tech) { int rc; rc = digital_tg_config_nfcf(ddev, rf_tech); if (rc) return rc; return digital_tg_listen(ddev, 300, digital_tg_recv_sensf_req, NULL); } void digital_tg_recv_md_req(struct nfc_digital_dev *ddev, void *arg, struct sk_buff *resp) { u8 rf_tech; int rc; if (IS_ERR(resp)) { resp = NULL; goto exit_free_skb; } rc = ddev->ops->tg_get_rf_tech(ddev, &rf_tech); if (rc) goto exit_free_skb; switch (rf_tech) { case NFC_DIGITAL_RF_TECH_106A: rc = digital_tg_config_nfca(ddev); if (rc) goto exit_free_skb; digital_tg_recv_sens_req(ddev, arg, resp); break; case NFC_DIGITAL_RF_TECH_212F: case NFC_DIGITAL_RF_TECH_424F: rc = digital_tg_config_nfcf(ddev, rf_tech); if (rc) goto exit_free_skb; digital_tg_recv_sensf_req(ddev, arg, resp); break; default: goto exit_free_skb; } return; exit_free_skb: digital_poll_next_tech(ddev); dev_kfree_skb(resp); }
linux-master
net/nfc/digital_technology.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (C) 2011 Instituto Nokia de Tecnologia * * Authors: * Lauro Ramos Venancio <[email protected]> * Aloisio Almeida Jr <[email protected]> * * Vendor commands implementation based on net/wireless/nl80211.c * which is: * * Copyright 2006-2010 Johannes Berg <[email protected]> * Copyright 2013-2014 Intel Mobile Communications GmbH */ #define pr_fmt(fmt) KBUILD_MODNAME ": %s: " fmt, __func__ #include <net/genetlink.h> #include <linux/nfc.h> #include <linux/slab.h> #include "nfc.h" #include "llcp.h" static const struct genl_multicast_group nfc_genl_mcgrps[] = { { .name = NFC_GENL_MCAST_EVENT_NAME, }, }; static struct genl_family nfc_genl_family; static const struct nla_policy nfc_genl_policy[NFC_ATTR_MAX + 1] = { [NFC_ATTR_DEVICE_INDEX] = { .type = NLA_U32 }, [NFC_ATTR_DEVICE_NAME] = { .type = NLA_STRING, .len = NFC_DEVICE_NAME_MAXSIZE }, [NFC_ATTR_PROTOCOLS] = { .type = NLA_U32 }, [NFC_ATTR_TARGET_INDEX] = { .type = NLA_U32 }, [NFC_ATTR_COMM_MODE] = { .type = NLA_U8 }, [NFC_ATTR_RF_MODE] = { .type = NLA_U8 }, [NFC_ATTR_DEVICE_POWERED] = { .type = NLA_U8 }, [NFC_ATTR_IM_PROTOCOLS] = { .type = NLA_U32 }, [NFC_ATTR_TM_PROTOCOLS] = { .type = NLA_U32 }, [NFC_ATTR_LLC_PARAM_LTO] = { .type = NLA_U8 }, [NFC_ATTR_LLC_PARAM_RW] = { .type = NLA_U8 }, [NFC_ATTR_LLC_PARAM_MIUX] = { .type = NLA_U16 }, [NFC_ATTR_LLC_SDP] = { .type = NLA_NESTED }, [NFC_ATTR_FIRMWARE_NAME] = { .type = NLA_STRING, .len = NFC_FIRMWARE_NAME_MAXSIZE }, [NFC_ATTR_SE_INDEX] = { .type = NLA_U32 }, [NFC_ATTR_SE_APDU] = { .type = NLA_BINARY }, [NFC_ATTR_VENDOR_ID] = { .type = NLA_U32 }, [NFC_ATTR_VENDOR_SUBCMD] = { .type = NLA_U32 }, [NFC_ATTR_VENDOR_DATA] = { .type = NLA_BINARY }, }; static const struct nla_policy nfc_sdp_genl_policy[NFC_SDP_ATTR_MAX + 1] = { [NFC_SDP_ATTR_URI] = { .type = NLA_STRING, .len = U8_MAX - 4 }, [NFC_SDP_ATTR_SAP] = { .type = NLA_U8 }, }; static int nfc_genl_send_target(struct sk_buff *msg, struct nfc_target *target, struct netlink_callback *cb, int flags) { void *hdr; hdr = genlmsg_put(msg, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, &nfc_genl_family, flags, NFC_CMD_GET_TARGET); if (!hdr) return -EMSGSIZE; genl_dump_check_consistent(cb, hdr); if (nla_put_u32(msg, NFC_ATTR_TARGET_INDEX, target->idx) || nla_put_u32(msg, NFC_ATTR_PROTOCOLS, target->supported_protocols) || nla_put_u16(msg, NFC_ATTR_TARGET_SENS_RES, target->sens_res) || nla_put_u8(msg, NFC_ATTR_TARGET_SEL_RES, target->sel_res)) goto nla_put_failure; if (target->nfcid1_len > 0 && nla_put(msg, NFC_ATTR_TARGET_NFCID1, target->nfcid1_len, target->nfcid1)) goto nla_put_failure; if (target->sensb_res_len > 0 && nla_put(msg, NFC_ATTR_TARGET_SENSB_RES, target->sensb_res_len, target->sensb_res)) goto nla_put_failure; if (target->sensf_res_len > 0 && nla_put(msg, NFC_ATTR_TARGET_SENSF_RES, target->sensf_res_len, target->sensf_res)) goto nla_put_failure; if (target->is_iso15693) { if (nla_put_u8(msg, NFC_ATTR_TARGET_ISO15693_DSFID, target->iso15693_dsfid) || nla_put(msg, NFC_ATTR_TARGET_ISO15693_UID, sizeof(target->iso15693_uid), target->iso15693_uid)) goto nla_put_failure; } genlmsg_end(msg, hdr); return 0; nla_put_failure: genlmsg_cancel(msg, hdr); return -EMSGSIZE; } static struct nfc_dev *__get_device_from_cb(struct netlink_callback *cb) { const struct genl_dumpit_info *info = genl_dumpit_info(cb); struct nfc_dev *dev; u32 idx; if (!info->info.attrs[NFC_ATTR_DEVICE_INDEX]) return ERR_PTR(-EINVAL); idx = nla_get_u32(info->info.attrs[NFC_ATTR_DEVICE_INDEX]); dev = nfc_get_device(idx); if (!dev) return ERR_PTR(-ENODEV); return dev; } static int nfc_genl_dump_targets(struct sk_buff *skb, struct netlink_callback *cb) { int i = cb->args[0]; struct nfc_dev *dev = (struct nfc_dev *) cb->args[1]; int rc; if (!dev) { dev = __get_device_from_cb(cb); if (IS_ERR(dev)) return PTR_ERR(dev); cb->args[1] = (long) dev; } device_lock(&dev->dev); cb->seq = dev->targets_generation; while (i < dev->n_targets) { rc = nfc_genl_send_target(skb, &dev->targets[i], cb, NLM_F_MULTI); if (rc < 0) break; i++; } device_unlock(&dev->dev); cb->args[0] = i; return skb->len; } static int nfc_genl_dump_targets_done(struct netlink_callback *cb) { struct nfc_dev *dev = (struct nfc_dev *) cb->args[1]; if (dev) nfc_put_device(dev); return 0; } int nfc_genl_targets_found(struct nfc_dev *dev) { struct sk_buff *msg; void *hdr; dev->genl_data.poll_req_portid = 0; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC); if (!msg) return -ENOMEM; hdr = genlmsg_put(msg, 0, 0, &nfc_genl_family, 0, NFC_EVENT_TARGETS_FOUND); if (!hdr) goto free_msg; if (nla_put_u32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx)) goto nla_put_failure; genlmsg_end(msg, hdr); return genlmsg_multicast(&nfc_genl_family, msg, 0, 0, GFP_ATOMIC); nla_put_failure: free_msg: nlmsg_free(msg); return -EMSGSIZE; } int nfc_genl_target_lost(struct nfc_dev *dev, u32 target_idx) { struct sk_buff *msg; void *hdr; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) return -ENOMEM; hdr = genlmsg_put(msg, 0, 0, &nfc_genl_family, 0, NFC_EVENT_TARGET_LOST); if (!hdr) goto free_msg; if (nla_put_string(msg, NFC_ATTR_DEVICE_NAME, nfc_device_name(dev)) || nla_put_u32(msg, NFC_ATTR_TARGET_INDEX, target_idx)) goto nla_put_failure; genlmsg_end(msg, hdr); genlmsg_multicast(&nfc_genl_family, msg, 0, 0, GFP_KERNEL); return 0; nla_put_failure: free_msg: nlmsg_free(msg); return -EMSGSIZE; } int nfc_genl_tm_activated(struct nfc_dev *dev, u32 protocol) { struct sk_buff *msg; void *hdr; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) return -ENOMEM; hdr = genlmsg_put(msg, 0, 0, &nfc_genl_family, 0, NFC_EVENT_TM_ACTIVATED); if (!hdr) goto free_msg; if (nla_put_u32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx)) goto nla_put_failure; if (nla_put_u32(msg, NFC_ATTR_TM_PROTOCOLS, protocol)) goto nla_put_failure; genlmsg_end(msg, hdr); genlmsg_multicast(&nfc_genl_family, msg, 0, 0, GFP_KERNEL); return 0; nla_put_failure: free_msg: nlmsg_free(msg); return -EMSGSIZE; } int nfc_genl_tm_deactivated(struct nfc_dev *dev) { struct sk_buff *msg; void *hdr; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) return -ENOMEM; hdr = genlmsg_put(msg, 0, 0, &nfc_genl_family, 0, NFC_EVENT_TM_DEACTIVATED); if (!hdr) goto free_msg; if (nla_put_u32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx)) goto nla_put_failure; genlmsg_end(msg, hdr); genlmsg_multicast(&nfc_genl_family, msg, 0, 0, GFP_KERNEL); return 0; nla_put_failure: free_msg: nlmsg_free(msg); return -EMSGSIZE; } static int nfc_genl_setup_device_added(struct nfc_dev *dev, struct sk_buff *msg) { if (nla_put_string(msg, NFC_ATTR_DEVICE_NAME, nfc_device_name(dev)) || nla_put_u32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx) || nla_put_u32(msg, NFC_ATTR_PROTOCOLS, dev->supported_protocols) || nla_put_u8(msg, NFC_ATTR_DEVICE_POWERED, dev->dev_up) || nla_put_u8(msg, NFC_ATTR_RF_MODE, dev->rf_mode)) return -1; return 0; } int nfc_genl_device_added(struct nfc_dev *dev) { struct sk_buff *msg; void *hdr; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) return -ENOMEM; hdr = genlmsg_put(msg, 0, 0, &nfc_genl_family, 0, NFC_EVENT_DEVICE_ADDED); if (!hdr) goto free_msg; if (nfc_genl_setup_device_added(dev, msg)) goto nla_put_failure; genlmsg_end(msg, hdr); genlmsg_multicast(&nfc_genl_family, msg, 0, 0, GFP_KERNEL); return 0; nla_put_failure: free_msg: nlmsg_free(msg); return -EMSGSIZE; } int nfc_genl_device_removed(struct nfc_dev *dev) { struct sk_buff *msg; void *hdr; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) return -ENOMEM; hdr = genlmsg_put(msg, 0, 0, &nfc_genl_family, 0, NFC_EVENT_DEVICE_REMOVED); if (!hdr) goto free_msg; if (nla_put_u32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx)) goto nla_put_failure; genlmsg_end(msg, hdr); genlmsg_multicast(&nfc_genl_family, msg, 0, 0, GFP_KERNEL); return 0; nla_put_failure: free_msg: nlmsg_free(msg); return -EMSGSIZE; } int nfc_genl_llc_send_sdres(struct nfc_dev *dev, struct hlist_head *sdres_list) { struct sk_buff *msg; struct nlattr *sdp_attr, *uri_attr; struct nfc_llcp_sdp_tlv *sdres; struct hlist_node *n; void *hdr; int rc = -EMSGSIZE; int i; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) return -ENOMEM; hdr = genlmsg_put(msg, 0, 0, &nfc_genl_family, 0, NFC_EVENT_LLC_SDRES); if (!hdr) goto free_msg; if (nla_put_u32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx)) goto nla_put_failure; sdp_attr = nla_nest_start_noflag(msg, NFC_ATTR_LLC_SDP); if (sdp_attr == NULL) { rc = -ENOMEM; goto nla_put_failure; } i = 1; hlist_for_each_entry_safe(sdres, n, sdres_list, node) { pr_debug("uri: %s, sap: %d\n", sdres->uri, sdres->sap); uri_attr = nla_nest_start_noflag(msg, i++); if (uri_attr == NULL) { rc = -ENOMEM; goto nla_put_failure; } if (nla_put_u8(msg, NFC_SDP_ATTR_SAP, sdres->sap)) goto nla_put_failure; if (nla_put_string(msg, NFC_SDP_ATTR_URI, sdres->uri)) goto nla_put_failure; nla_nest_end(msg, uri_attr); hlist_del(&sdres->node); nfc_llcp_free_sdp_tlv(sdres); } nla_nest_end(msg, sdp_attr); genlmsg_end(msg, hdr); return genlmsg_multicast(&nfc_genl_family, msg, 0, 0, GFP_ATOMIC); nla_put_failure: free_msg: nlmsg_free(msg); nfc_llcp_free_sdp_tlv_list(sdres_list); return rc; } int nfc_genl_se_added(struct nfc_dev *dev, u32 se_idx, u16 type) { struct sk_buff *msg; void *hdr; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) return -ENOMEM; hdr = genlmsg_put(msg, 0, 0, &nfc_genl_family, 0, NFC_EVENT_SE_ADDED); if (!hdr) goto free_msg; if (nla_put_u32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx) || nla_put_u32(msg, NFC_ATTR_SE_INDEX, se_idx) || nla_put_u8(msg, NFC_ATTR_SE_TYPE, type)) goto nla_put_failure; genlmsg_end(msg, hdr); genlmsg_multicast(&nfc_genl_family, msg, 0, 0, GFP_KERNEL); return 0; nla_put_failure: free_msg: nlmsg_free(msg); return -EMSGSIZE; } int nfc_genl_se_removed(struct nfc_dev *dev, u32 se_idx) { struct sk_buff *msg; void *hdr; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) return -ENOMEM; hdr = genlmsg_put(msg, 0, 0, &nfc_genl_family, 0, NFC_EVENT_SE_REMOVED); if (!hdr) goto free_msg; if (nla_put_u32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx) || nla_put_u32(msg, NFC_ATTR_SE_INDEX, se_idx)) goto nla_put_failure; genlmsg_end(msg, hdr); genlmsg_multicast(&nfc_genl_family, msg, 0, 0, GFP_KERNEL); return 0; nla_put_failure: free_msg: nlmsg_free(msg); return -EMSGSIZE; } int nfc_genl_se_transaction(struct nfc_dev *dev, u8 se_idx, struct nfc_evt_transaction *evt_transaction) { struct nfc_se *se; struct sk_buff *msg; void *hdr; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) return -ENOMEM; hdr = genlmsg_put(msg, 0, 0, &nfc_genl_family, 0, NFC_EVENT_SE_TRANSACTION); if (!hdr) goto free_msg; se = nfc_find_se(dev, se_idx); if (!se) goto free_msg; if (nla_put_u32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx) || nla_put_u32(msg, NFC_ATTR_SE_INDEX, se_idx) || nla_put_u8(msg, NFC_ATTR_SE_TYPE, se->type) || nla_put(msg, NFC_ATTR_SE_AID, evt_transaction->aid_len, evt_transaction->aid) || nla_put(msg, NFC_ATTR_SE_PARAMS, evt_transaction->params_len, evt_transaction->params)) goto nla_put_failure; /* evt_transaction is no more used */ devm_kfree(&dev->dev, evt_transaction); genlmsg_end(msg, hdr); genlmsg_multicast(&nfc_genl_family, msg, 0, 0, GFP_KERNEL); return 0; nla_put_failure: free_msg: /* evt_transaction is no more used */ devm_kfree(&dev->dev, evt_transaction); nlmsg_free(msg); return -EMSGSIZE; } int nfc_genl_se_connectivity(struct nfc_dev *dev, u8 se_idx) { const struct nfc_se *se; struct sk_buff *msg; void *hdr; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) return -ENOMEM; hdr = genlmsg_put(msg, 0, 0, &nfc_genl_family, 0, NFC_EVENT_SE_CONNECTIVITY); if (!hdr) goto free_msg; se = nfc_find_se(dev, se_idx); if (!se) goto free_msg; if (nla_put_u32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx) || nla_put_u32(msg, NFC_ATTR_SE_INDEX, se_idx) || nla_put_u8(msg, NFC_ATTR_SE_TYPE, se->type)) goto nla_put_failure; genlmsg_end(msg, hdr); genlmsg_multicast(&nfc_genl_family, msg, 0, 0, GFP_KERNEL); return 0; nla_put_failure: free_msg: nlmsg_free(msg); return -EMSGSIZE; } static int nfc_genl_send_device(struct sk_buff *msg, struct nfc_dev *dev, u32 portid, u32 seq, struct netlink_callback *cb, int flags) { void *hdr; hdr = genlmsg_put(msg, portid, seq, &nfc_genl_family, flags, NFC_CMD_GET_DEVICE); if (!hdr) return -EMSGSIZE; if (cb) genl_dump_check_consistent(cb, hdr); if (nfc_genl_setup_device_added(dev, msg)) goto nla_put_failure; genlmsg_end(msg, hdr); return 0; nla_put_failure: genlmsg_cancel(msg, hdr); return -EMSGSIZE; } static int nfc_genl_dump_devices(struct sk_buff *skb, struct netlink_callback *cb) { struct class_dev_iter *iter = (struct class_dev_iter *) cb->args[0]; struct nfc_dev *dev = (struct nfc_dev *) cb->args[1]; bool first_call = false; if (!iter) { first_call = true; iter = kmalloc(sizeof(struct class_dev_iter), GFP_KERNEL); if (!iter) return -ENOMEM; cb->args[0] = (long) iter; } mutex_lock(&nfc_devlist_mutex); cb->seq = nfc_devlist_generation; if (first_call) { nfc_device_iter_init(iter); dev = nfc_device_iter_next(iter); } while (dev) { int rc; rc = nfc_genl_send_device(skb, dev, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, cb, NLM_F_MULTI); if (rc < 0) break; dev = nfc_device_iter_next(iter); } mutex_unlock(&nfc_devlist_mutex); cb->args[1] = (long) dev; return skb->len; } static int nfc_genl_dump_devices_done(struct netlink_callback *cb) { struct class_dev_iter *iter = (struct class_dev_iter *) cb->args[0]; if (iter) { nfc_device_iter_exit(iter); kfree(iter); } return 0; } int nfc_genl_dep_link_up_event(struct nfc_dev *dev, u32 target_idx, u8 comm_mode, u8 rf_mode) { struct sk_buff *msg; void *hdr; pr_debug("DEP link is up\n"); msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC); if (!msg) return -ENOMEM; hdr = genlmsg_put(msg, 0, 0, &nfc_genl_family, 0, NFC_CMD_DEP_LINK_UP); if (!hdr) goto free_msg; if (nla_put_u32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx)) goto nla_put_failure; if (rf_mode == NFC_RF_INITIATOR && nla_put_u32(msg, NFC_ATTR_TARGET_INDEX, target_idx)) goto nla_put_failure; if (nla_put_u8(msg, NFC_ATTR_COMM_MODE, comm_mode) || nla_put_u8(msg, NFC_ATTR_RF_MODE, rf_mode)) goto nla_put_failure; genlmsg_end(msg, hdr); dev->dep_link_up = true; genlmsg_multicast(&nfc_genl_family, msg, 0, 0, GFP_ATOMIC); return 0; nla_put_failure: free_msg: nlmsg_free(msg); return -EMSGSIZE; } int nfc_genl_dep_link_down_event(struct nfc_dev *dev) { struct sk_buff *msg; void *hdr; pr_debug("DEP link is down\n"); msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC); if (!msg) return -ENOMEM; hdr = genlmsg_put(msg, 0, 0, &nfc_genl_family, 0, NFC_CMD_DEP_LINK_DOWN); if (!hdr) goto free_msg; if (nla_put_u32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx)) goto nla_put_failure; genlmsg_end(msg, hdr); genlmsg_multicast(&nfc_genl_family, msg, 0, 0, GFP_ATOMIC); return 0; nla_put_failure: free_msg: nlmsg_free(msg); return -EMSGSIZE; } static int nfc_genl_get_device(struct sk_buff *skb, struct genl_info *info) { struct sk_buff *msg; struct nfc_dev *dev; u32 idx; int rc = -ENOBUFS; if (!info->attrs[NFC_ATTR_DEVICE_INDEX]) return -EINVAL; idx = nla_get_u32(info->attrs[NFC_ATTR_DEVICE_INDEX]); dev = nfc_get_device(idx); if (!dev) return -ENODEV; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) { rc = -ENOMEM; goto out_putdev; } rc = nfc_genl_send_device(msg, dev, info->snd_portid, info->snd_seq, NULL, 0); if (rc < 0) goto out_free; nfc_put_device(dev); return genlmsg_reply(msg, info); out_free: nlmsg_free(msg); out_putdev: nfc_put_device(dev); return rc; } static int nfc_genl_dev_up(struct sk_buff *skb, struct genl_info *info) { struct nfc_dev *dev; int rc; u32 idx; if (!info->attrs[NFC_ATTR_DEVICE_INDEX]) return -EINVAL; idx = nla_get_u32(info->attrs[NFC_ATTR_DEVICE_INDEX]); dev = nfc_get_device(idx); if (!dev) return -ENODEV; rc = nfc_dev_up(dev); nfc_put_device(dev); return rc; } static int nfc_genl_dev_down(struct sk_buff *skb, struct genl_info *info) { struct nfc_dev *dev; int rc; u32 idx; if (!info->attrs[NFC_ATTR_DEVICE_INDEX]) return -EINVAL; idx = nla_get_u32(info->attrs[NFC_ATTR_DEVICE_INDEX]); dev = nfc_get_device(idx); if (!dev) return -ENODEV; rc = nfc_dev_down(dev); nfc_put_device(dev); return rc; } static int nfc_genl_start_poll(struct sk_buff *skb, struct genl_info *info) { struct nfc_dev *dev; int rc; u32 idx; u32 im_protocols = 0, tm_protocols = 0; pr_debug("Poll start\n"); if (!info->attrs[NFC_ATTR_DEVICE_INDEX] || ((!info->attrs[NFC_ATTR_IM_PROTOCOLS] && !info->attrs[NFC_ATTR_PROTOCOLS]) && !info->attrs[NFC_ATTR_TM_PROTOCOLS])) return -EINVAL; idx = nla_get_u32(info->attrs[NFC_ATTR_DEVICE_INDEX]); if (info->attrs[NFC_ATTR_TM_PROTOCOLS]) tm_protocols = nla_get_u32(info->attrs[NFC_ATTR_TM_PROTOCOLS]); if (info->attrs[NFC_ATTR_IM_PROTOCOLS]) im_protocols = nla_get_u32(info->attrs[NFC_ATTR_IM_PROTOCOLS]); else if (info->attrs[NFC_ATTR_PROTOCOLS]) im_protocols = nla_get_u32(info->attrs[NFC_ATTR_PROTOCOLS]); dev = nfc_get_device(idx); if (!dev) return -ENODEV; mutex_lock(&dev->genl_data.genl_data_mutex); rc = nfc_start_poll(dev, im_protocols, tm_protocols); if (!rc) dev->genl_data.poll_req_portid = info->snd_portid; mutex_unlock(&dev->genl_data.genl_data_mutex); nfc_put_device(dev); return rc; } static int nfc_genl_stop_poll(struct sk_buff *skb, struct genl_info *info) { struct nfc_dev *dev; int rc; u32 idx; if (!info->attrs[NFC_ATTR_DEVICE_INDEX]) return -EINVAL; idx = nla_get_u32(info->attrs[NFC_ATTR_DEVICE_INDEX]); dev = nfc_get_device(idx); if (!dev) return -ENODEV; device_lock(&dev->dev); if (!dev->polling) { device_unlock(&dev->dev); nfc_put_device(dev); return -EINVAL; } device_unlock(&dev->dev); mutex_lock(&dev->genl_data.genl_data_mutex); if (dev->genl_data.poll_req_portid != info->snd_portid) { rc = -EBUSY; goto out; } rc = nfc_stop_poll(dev); dev->genl_data.poll_req_portid = 0; out: mutex_unlock(&dev->genl_data.genl_data_mutex); nfc_put_device(dev); return rc; } static int nfc_genl_activate_target(struct sk_buff *skb, struct genl_info *info) { struct nfc_dev *dev; u32 device_idx, target_idx, protocol; int rc; if (!info->attrs[NFC_ATTR_DEVICE_INDEX] || !info->attrs[NFC_ATTR_TARGET_INDEX] || !info->attrs[NFC_ATTR_PROTOCOLS]) return -EINVAL; device_idx = nla_get_u32(info->attrs[NFC_ATTR_DEVICE_INDEX]); dev = nfc_get_device(device_idx); if (!dev) return -ENODEV; target_idx = nla_get_u32(info->attrs[NFC_ATTR_TARGET_INDEX]); protocol = nla_get_u32(info->attrs[NFC_ATTR_PROTOCOLS]); nfc_deactivate_target(dev, target_idx, NFC_TARGET_MODE_SLEEP); rc = nfc_activate_target(dev, target_idx, protocol); nfc_put_device(dev); return rc; } static int nfc_genl_deactivate_target(struct sk_buff *skb, struct genl_info *info) { struct nfc_dev *dev; u32 device_idx, target_idx; int rc; if (!info->attrs[NFC_ATTR_DEVICE_INDEX] || !info->attrs[NFC_ATTR_TARGET_INDEX]) return -EINVAL; device_idx = nla_get_u32(info->attrs[NFC_ATTR_DEVICE_INDEX]); dev = nfc_get_device(device_idx); if (!dev) return -ENODEV; target_idx = nla_get_u32(info->attrs[NFC_ATTR_TARGET_INDEX]); rc = nfc_deactivate_target(dev, target_idx, NFC_TARGET_MODE_SLEEP); nfc_put_device(dev); return rc; } static int nfc_genl_dep_link_up(struct sk_buff *skb, struct genl_info *info) { struct nfc_dev *dev; int rc, tgt_idx; u32 idx; u8 comm; pr_debug("DEP link up\n"); if (!info->attrs[NFC_ATTR_DEVICE_INDEX] || !info->attrs[NFC_ATTR_COMM_MODE]) return -EINVAL; idx = nla_get_u32(info->attrs[NFC_ATTR_DEVICE_INDEX]); if (!info->attrs[NFC_ATTR_TARGET_INDEX]) tgt_idx = NFC_TARGET_IDX_ANY; else tgt_idx = nla_get_u32(info->attrs[NFC_ATTR_TARGET_INDEX]); comm = nla_get_u8(info->attrs[NFC_ATTR_COMM_MODE]); if (comm != NFC_COMM_ACTIVE && comm != NFC_COMM_PASSIVE) return -EINVAL; dev = nfc_get_device(idx); if (!dev) return -ENODEV; rc = nfc_dep_link_up(dev, tgt_idx, comm); nfc_put_device(dev); return rc; } static int nfc_genl_dep_link_down(struct sk_buff *skb, struct genl_info *info) { struct nfc_dev *dev; int rc; u32 idx; if (!info->attrs[NFC_ATTR_DEVICE_INDEX] || !info->attrs[NFC_ATTR_TARGET_INDEX]) return -EINVAL; idx = nla_get_u32(info->attrs[NFC_ATTR_DEVICE_INDEX]); dev = nfc_get_device(idx); if (!dev) return -ENODEV; rc = nfc_dep_link_down(dev); nfc_put_device(dev); return rc; } static int nfc_genl_send_params(struct sk_buff *msg, struct nfc_llcp_local *local, u32 portid, u32 seq) { void *hdr; hdr = genlmsg_put(msg, portid, seq, &nfc_genl_family, 0, NFC_CMD_LLC_GET_PARAMS); if (!hdr) return -EMSGSIZE; if (nla_put_u32(msg, NFC_ATTR_DEVICE_INDEX, local->dev->idx) || nla_put_u8(msg, NFC_ATTR_LLC_PARAM_LTO, local->lto) || nla_put_u8(msg, NFC_ATTR_LLC_PARAM_RW, local->rw) || nla_put_u16(msg, NFC_ATTR_LLC_PARAM_MIUX, be16_to_cpu(local->miux))) goto nla_put_failure; genlmsg_end(msg, hdr); return 0; nla_put_failure: genlmsg_cancel(msg, hdr); return -EMSGSIZE; } static int nfc_genl_llc_get_params(struct sk_buff *skb, struct genl_info *info) { struct nfc_dev *dev; struct nfc_llcp_local *local; int rc = 0; struct sk_buff *msg = NULL; u32 idx; if (!info->attrs[NFC_ATTR_DEVICE_INDEX] || !info->attrs[NFC_ATTR_FIRMWARE_NAME]) return -EINVAL; idx = nla_get_u32(info->attrs[NFC_ATTR_DEVICE_INDEX]); dev = nfc_get_device(idx); if (!dev) return -ENODEV; device_lock(&dev->dev); local = nfc_llcp_find_local(dev); if (!local) { rc = -ENODEV; goto exit; } msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) { rc = -ENOMEM; goto put_local; } rc = nfc_genl_send_params(msg, local, info->snd_portid, info->snd_seq); put_local: nfc_llcp_local_put(local); exit: device_unlock(&dev->dev); nfc_put_device(dev); if (rc < 0) { if (msg) nlmsg_free(msg); return rc; } return genlmsg_reply(msg, info); } static int nfc_genl_llc_set_params(struct sk_buff *skb, struct genl_info *info) { struct nfc_dev *dev; struct nfc_llcp_local *local; u8 rw = 0; u16 miux = 0; u32 idx; int rc = 0; if (!info->attrs[NFC_ATTR_DEVICE_INDEX] || (!info->attrs[NFC_ATTR_LLC_PARAM_LTO] && !info->attrs[NFC_ATTR_LLC_PARAM_RW] && !info->attrs[NFC_ATTR_LLC_PARAM_MIUX])) return -EINVAL; if (info->attrs[NFC_ATTR_LLC_PARAM_RW]) { rw = nla_get_u8(info->attrs[NFC_ATTR_LLC_PARAM_RW]); if (rw > LLCP_MAX_RW) return -EINVAL; } if (info->attrs[NFC_ATTR_LLC_PARAM_MIUX]) { miux = nla_get_u16(info->attrs[NFC_ATTR_LLC_PARAM_MIUX]); if (miux > LLCP_MAX_MIUX) return -EINVAL; } idx = nla_get_u32(info->attrs[NFC_ATTR_DEVICE_INDEX]); dev = nfc_get_device(idx); if (!dev) return -ENODEV; device_lock(&dev->dev); local = nfc_llcp_find_local(dev); if (!local) { rc = -ENODEV; goto exit; } if (info->attrs[NFC_ATTR_LLC_PARAM_LTO]) { if (dev->dep_link_up) { rc = -EINPROGRESS; goto put_local; } local->lto = nla_get_u8(info->attrs[NFC_ATTR_LLC_PARAM_LTO]); } if (info->attrs[NFC_ATTR_LLC_PARAM_RW]) local->rw = rw; if (info->attrs[NFC_ATTR_LLC_PARAM_MIUX]) local->miux = cpu_to_be16(miux); put_local: nfc_llcp_local_put(local); exit: device_unlock(&dev->dev); nfc_put_device(dev); return rc; } static int nfc_genl_llc_sdreq(struct sk_buff *skb, struct genl_info *info) { struct nfc_dev *dev; struct nfc_llcp_local *local; struct nlattr *attr, *sdp_attrs[NFC_SDP_ATTR_MAX+1]; u32 idx; u8 tid; char *uri; int rc = 0, rem; size_t uri_len, tlvs_len; struct hlist_head sdreq_list; struct nfc_llcp_sdp_tlv *sdreq; if (!info->attrs[NFC_ATTR_DEVICE_INDEX] || !info->attrs[NFC_ATTR_LLC_SDP]) return -EINVAL; idx = nla_get_u32(info->attrs[NFC_ATTR_DEVICE_INDEX]); dev = nfc_get_device(idx); if (!dev) return -ENODEV; device_lock(&dev->dev); if (dev->dep_link_up == false) { rc = -ENOLINK; goto exit; } local = nfc_llcp_find_local(dev); if (!local) { rc = -ENODEV; goto exit; } INIT_HLIST_HEAD(&sdreq_list); tlvs_len = 0; nla_for_each_nested(attr, info->attrs[NFC_ATTR_LLC_SDP], rem) { rc = nla_parse_nested_deprecated(sdp_attrs, NFC_SDP_ATTR_MAX, attr, nfc_sdp_genl_policy, info->extack); if (rc != 0) { rc = -EINVAL; goto put_local; } if (!sdp_attrs[NFC_SDP_ATTR_URI]) continue; uri_len = nla_len(sdp_attrs[NFC_SDP_ATTR_URI]); if (uri_len == 0) continue; uri = nla_data(sdp_attrs[NFC_SDP_ATTR_URI]); if (uri == NULL || *uri == 0) continue; tid = local->sdreq_next_tid++; sdreq = nfc_llcp_build_sdreq_tlv(tid, uri, uri_len); if (sdreq == NULL) { rc = -ENOMEM; goto put_local; } tlvs_len += sdreq->tlv_len; hlist_add_head(&sdreq->node, &sdreq_list); } if (hlist_empty(&sdreq_list)) { rc = -EINVAL; goto put_local; } rc = nfc_llcp_send_snl_sdreq(local, &sdreq_list, tlvs_len); put_local: nfc_llcp_local_put(local); exit: device_unlock(&dev->dev); nfc_put_device(dev); return rc; } static int nfc_genl_fw_download(struct sk_buff *skb, struct genl_info *info) { struct nfc_dev *dev; int rc; u32 idx; char firmware_name[NFC_FIRMWARE_NAME_MAXSIZE + 1]; if (!info->attrs[NFC_ATTR_DEVICE_INDEX] || !info->attrs[NFC_ATTR_FIRMWARE_NAME]) return -EINVAL; idx = nla_get_u32(info->attrs[NFC_ATTR_DEVICE_INDEX]); dev = nfc_get_device(idx); if (!dev) return -ENODEV; nla_strscpy(firmware_name, info->attrs[NFC_ATTR_FIRMWARE_NAME], sizeof(firmware_name)); rc = nfc_fw_download(dev, firmware_name); nfc_put_device(dev); return rc; } int nfc_genl_fw_download_done(struct nfc_dev *dev, const char *firmware_name, u32 result) { struct sk_buff *msg; void *hdr; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC); if (!msg) return -ENOMEM; hdr = genlmsg_put(msg, 0, 0, &nfc_genl_family, 0, NFC_CMD_FW_DOWNLOAD); if (!hdr) goto free_msg; if (nla_put_string(msg, NFC_ATTR_FIRMWARE_NAME, firmware_name) || nla_put_u32(msg, NFC_ATTR_FIRMWARE_DOWNLOAD_STATUS, result) || nla_put_u32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx)) goto nla_put_failure; genlmsg_end(msg, hdr); genlmsg_multicast(&nfc_genl_family, msg, 0, 0, GFP_ATOMIC); return 0; nla_put_failure: free_msg: nlmsg_free(msg); return -EMSGSIZE; } static int nfc_genl_enable_se(struct sk_buff *skb, struct genl_info *info) { struct nfc_dev *dev; int rc; u32 idx, se_idx; if (!info->attrs[NFC_ATTR_DEVICE_INDEX] || !info->attrs[NFC_ATTR_SE_INDEX]) return -EINVAL; idx = nla_get_u32(info->attrs[NFC_ATTR_DEVICE_INDEX]); se_idx = nla_get_u32(info->attrs[NFC_ATTR_SE_INDEX]); dev = nfc_get_device(idx); if (!dev) return -ENODEV; rc = nfc_enable_se(dev, se_idx); nfc_put_device(dev); return rc; } static int nfc_genl_disable_se(struct sk_buff *skb, struct genl_info *info) { struct nfc_dev *dev; int rc; u32 idx, se_idx; if (!info->attrs[NFC_ATTR_DEVICE_INDEX] || !info->attrs[NFC_ATTR_SE_INDEX]) return -EINVAL; idx = nla_get_u32(info->attrs[NFC_ATTR_DEVICE_INDEX]); se_idx = nla_get_u32(info->attrs[NFC_ATTR_SE_INDEX]); dev = nfc_get_device(idx); if (!dev) return -ENODEV; rc = nfc_disable_se(dev, se_idx); nfc_put_device(dev); return rc; } static int nfc_genl_send_se(struct sk_buff *msg, struct nfc_dev *dev, u32 portid, u32 seq, struct netlink_callback *cb, int flags) { void *hdr; struct nfc_se *se, *n; list_for_each_entry_safe(se, n, &dev->secure_elements, list) { hdr = genlmsg_put(msg, portid, seq, &nfc_genl_family, flags, NFC_CMD_GET_SE); if (!hdr) goto nla_put_failure; if (cb) genl_dump_check_consistent(cb, hdr); if (nla_put_u32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx) || nla_put_u32(msg, NFC_ATTR_SE_INDEX, se->idx) || nla_put_u8(msg, NFC_ATTR_SE_TYPE, se->type)) goto nla_put_failure; genlmsg_end(msg, hdr); } return 0; nla_put_failure: genlmsg_cancel(msg, hdr); return -EMSGSIZE; } static int nfc_genl_dump_ses(struct sk_buff *skb, struct netlink_callback *cb) { struct class_dev_iter *iter = (struct class_dev_iter *) cb->args[0]; struct nfc_dev *dev = (struct nfc_dev *) cb->args[1]; bool first_call = false; if (!iter) { first_call = true; iter = kmalloc(sizeof(struct class_dev_iter), GFP_KERNEL); if (!iter) return -ENOMEM; cb->args[0] = (long) iter; } mutex_lock(&nfc_devlist_mutex); cb->seq = nfc_devlist_generation; if (first_call) { nfc_device_iter_init(iter); dev = nfc_device_iter_next(iter); } while (dev) { int rc; rc = nfc_genl_send_se(skb, dev, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, cb, NLM_F_MULTI); if (rc < 0) break; dev = nfc_device_iter_next(iter); } mutex_unlock(&nfc_devlist_mutex); cb->args[1] = (long) dev; return skb->len; } static int nfc_genl_dump_ses_done(struct netlink_callback *cb) { struct class_dev_iter *iter = (struct class_dev_iter *) cb->args[0]; if (iter) { nfc_device_iter_exit(iter); kfree(iter); } return 0; } static int nfc_se_io(struct nfc_dev *dev, u32 se_idx, u8 *apdu, size_t apdu_length, se_io_cb_t cb, void *cb_context) { struct nfc_se *se; int rc; pr_debug("%s se index %d\n", dev_name(&dev->dev), se_idx); device_lock(&dev->dev); if (!device_is_registered(&dev->dev)) { rc = -ENODEV; goto error; } if (!dev->dev_up) { rc = -ENODEV; goto error; } if (!dev->ops->se_io) { rc = -EOPNOTSUPP; goto error; } se = nfc_find_se(dev, se_idx); if (!se) { rc = -EINVAL; goto error; } if (se->state != NFC_SE_ENABLED) { rc = -ENODEV; goto error; } rc = dev->ops->se_io(dev, se_idx, apdu, apdu_length, cb, cb_context); device_unlock(&dev->dev); return rc; error: device_unlock(&dev->dev); kfree(cb_context); return rc; } struct se_io_ctx { u32 dev_idx; u32 se_idx; }; static void se_io_cb(void *context, u8 *apdu, size_t apdu_len, int err) { struct se_io_ctx *ctx = context; struct sk_buff *msg; void *hdr; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) { kfree(ctx); return; } hdr = genlmsg_put(msg, 0, 0, &nfc_genl_family, 0, NFC_CMD_SE_IO); if (!hdr) goto free_msg; if (nla_put_u32(msg, NFC_ATTR_DEVICE_INDEX, ctx->dev_idx) || nla_put_u32(msg, NFC_ATTR_SE_INDEX, ctx->se_idx) || nla_put(msg, NFC_ATTR_SE_APDU, apdu_len, apdu)) goto nla_put_failure; genlmsg_end(msg, hdr); genlmsg_multicast(&nfc_genl_family, msg, 0, 0, GFP_KERNEL); kfree(ctx); return; nla_put_failure: free_msg: nlmsg_free(msg); kfree(ctx); return; } static int nfc_genl_se_io(struct sk_buff *skb, struct genl_info *info) { struct nfc_dev *dev; struct se_io_ctx *ctx; u32 dev_idx, se_idx; u8 *apdu; size_t apdu_len; int rc; if (!info->attrs[NFC_ATTR_DEVICE_INDEX] || !info->attrs[NFC_ATTR_SE_INDEX] || !info->attrs[NFC_ATTR_SE_APDU]) return -EINVAL; dev_idx = nla_get_u32(info->attrs[NFC_ATTR_DEVICE_INDEX]); se_idx = nla_get_u32(info->attrs[NFC_ATTR_SE_INDEX]); dev = nfc_get_device(dev_idx); if (!dev) return -ENODEV; if (!dev->ops || !dev->ops->se_io) { rc = -EOPNOTSUPP; goto put_dev; } apdu_len = nla_len(info->attrs[NFC_ATTR_SE_APDU]); if (apdu_len == 0) { rc = -EINVAL; goto put_dev; } apdu = nla_data(info->attrs[NFC_ATTR_SE_APDU]); if (!apdu) { rc = -EINVAL; goto put_dev; } ctx = kzalloc(sizeof(struct se_io_ctx), GFP_KERNEL); if (!ctx) { rc = -ENOMEM; goto put_dev; } ctx->dev_idx = dev_idx; ctx->se_idx = se_idx; rc = nfc_se_io(dev, se_idx, apdu, apdu_len, se_io_cb, ctx); put_dev: nfc_put_device(dev); return rc; } static int nfc_genl_vendor_cmd(struct sk_buff *skb, struct genl_info *info) { struct nfc_dev *dev; const struct nfc_vendor_cmd *cmd; u32 dev_idx, vid, subcmd; u8 *data; size_t data_len; int i, err; if (!info->attrs[NFC_ATTR_DEVICE_INDEX] || !info->attrs[NFC_ATTR_VENDOR_ID] || !info->attrs[NFC_ATTR_VENDOR_SUBCMD]) return -EINVAL; dev_idx = nla_get_u32(info->attrs[NFC_ATTR_DEVICE_INDEX]); vid = nla_get_u32(info->attrs[NFC_ATTR_VENDOR_ID]); subcmd = nla_get_u32(info->attrs[NFC_ATTR_VENDOR_SUBCMD]); dev = nfc_get_device(dev_idx); if (!dev) return -ENODEV; if (!dev->vendor_cmds || !dev->n_vendor_cmds) { err = -ENODEV; goto put_dev; } if (info->attrs[NFC_ATTR_VENDOR_DATA]) { data = nla_data(info->attrs[NFC_ATTR_VENDOR_DATA]); data_len = nla_len(info->attrs[NFC_ATTR_VENDOR_DATA]); if (data_len == 0) { err = -EINVAL; goto put_dev; } } else { data = NULL; data_len = 0; } for (i = 0; i < dev->n_vendor_cmds; i++) { cmd = &dev->vendor_cmds[i]; if (cmd->vendor_id != vid || cmd->subcmd != subcmd) continue; dev->cur_cmd_info = info; err = cmd->doit(dev, data, data_len); dev->cur_cmd_info = NULL; goto put_dev; } err = -EOPNOTSUPP; put_dev: nfc_put_device(dev); return err; } /* message building helper */ static inline void *nfc_hdr_put(struct sk_buff *skb, u32 portid, u32 seq, int flags, u8 cmd) { /* since there is no private header just add the generic one */ return genlmsg_put(skb, portid, seq, &nfc_genl_family, flags, cmd); } static struct sk_buff * __nfc_alloc_vendor_cmd_skb(struct nfc_dev *dev, int approxlen, u32 portid, u32 seq, enum nfc_attrs attr, u32 oui, u32 subcmd, gfp_t gfp) { struct sk_buff *skb; void *hdr; skb = nlmsg_new(approxlen + 100, gfp); if (!skb) return NULL; hdr = nfc_hdr_put(skb, portid, seq, 0, NFC_CMD_VENDOR); if (!hdr) { kfree_skb(skb); return NULL; } if (nla_put_u32(skb, NFC_ATTR_DEVICE_INDEX, dev->idx)) goto nla_put_failure; if (nla_put_u32(skb, NFC_ATTR_VENDOR_ID, oui)) goto nla_put_failure; if (nla_put_u32(skb, NFC_ATTR_VENDOR_SUBCMD, subcmd)) goto nla_put_failure; ((void **)skb->cb)[0] = dev; ((void **)skb->cb)[1] = hdr; return skb; nla_put_failure: kfree_skb(skb); return NULL; } struct sk_buff *__nfc_alloc_vendor_cmd_reply_skb(struct nfc_dev *dev, enum nfc_attrs attr, u32 oui, u32 subcmd, int approxlen) { if (WARN_ON(!dev->cur_cmd_info)) return NULL; return __nfc_alloc_vendor_cmd_skb(dev, approxlen, dev->cur_cmd_info->snd_portid, dev->cur_cmd_info->snd_seq, attr, oui, subcmd, GFP_KERNEL); } EXPORT_SYMBOL(__nfc_alloc_vendor_cmd_reply_skb); int nfc_vendor_cmd_reply(struct sk_buff *skb) { struct nfc_dev *dev = ((void **)skb->cb)[0]; void *hdr = ((void **)skb->cb)[1]; /* clear CB data for netlink core to own from now on */ memset(skb->cb, 0, sizeof(skb->cb)); if (WARN_ON(!dev->cur_cmd_info)) { kfree_skb(skb); return -EINVAL; } genlmsg_end(skb, hdr); return genlmsg_reply(skb, dev->cur_cmd_info); } EXPORT_SYMBOL(nfc_vendor_cmd_reply); static const struct genl_ops nfc_genl_ops[] = { { .cmd = NFC_CMD_GET_DEVICE, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = nfc_genl_get_device, .dumpit = nfc_genl_dump_devices, .done = nfc_genl_dump_devices_done, }, { .cmd = NFC_CMD_DEV_UP, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = nfc_genl_dev_up, .flags = GENL_ADMIN_PERM, }, { .cmd = NFC_CMD_DEV_DOWN, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = nfc_genl_dev_down, .flags = GENL_ADMIN_PERM, }, { .cmd = NFC_CMD_START_POLL, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = nfc_genl_start_poll, .flags = GENL_ADMIN_PERM, }, { .cmd = NFC_CMD_STOP_POLL, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = nfc_genl_stop_poll, .flags = GENL_ADMIN_PERM, }, { .cmd = NFC_CMD_DEP_LINK_UP, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = nfc_genl_dep_link_up, .flags = GENL_ADMIN_PERM, }, { .cmd = NFC_CMD_DEP_LINK_DOWN, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = nfc_genl_dep_link_down, .flags = GENL_ADMIN_PERM, }, { .cmd = NFC_CMD_GET_TARGET, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP_STRICT, .dumpit = nfc_genl_dump_targets, .done = nfc_genl_dump_targets_done, }, { .cmd = NFC_CMD_LLC_GET_PARAMS, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = nfc_genl_llc_get_params, }, { .cmd = NFC_CMD_LLC_SET_PARAMS, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = nfc_genl_llc_set_params, .flags = GENL_ADMIN_PERM, }, { .cmd = NFC_CMD_LLC_SDREQ, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = nfc_genl_llc_sdreq, .flags = GENL_ADMIN_PERM, }, { .cmd = NFC_CMD_FW_DOWNLOAD, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = nfc_genl_fw_download, .flags = GENL_ADMIN_PERM, }, { .cmd = NFC_CMD_ENABLE_SE, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = nfc_genl_enable_se, .flags = GENL_ADMIN_PERM, }, { .cmd = NFC_CMD_DISABLE_SE, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = nfc_genl_disable_se, .flags = GENL_ADMIN_PERM, }, { .cmd = NFC_CMD_GET_SE, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .dumpit = nfc_genl_dump_ses, .done = nfc_genl_dump_ses_done, }, { .cmd = NFC_CMD_SE_IO, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = nfc_genl_se_io, .flags = GENL_ADMIN_PERM, }, { .cmd = NFC_CMD_ACTIVATE_TARGET, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = nfc_genl_activate_target, .flags = GENL_ADMIN_PERM, }, { .cmd = NFC_CMD_VENDOR, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = nfc_genl_vendor_cmd, .flags = GENL_ADMIN_PERM, }, { .cmd = NFC_CMD_DEACTIVATE_TARGET, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = nfc_genl_deactivate_target, .flags = GENL_ADMIN_PERM, }, }; static struct genl_family nfc_genl_family __ro_after_init = { .hdrsize = 0, .name = NFC_GENL_NAME, .version = NFC_GENL_VERSION, .maxattr = NFC_ATTR_MAX, .policy = nfc_genl_policy, .module = THIS_MODULE, .ops = nfc_genl_ops, .n_ops = ARRAY_SIZE(nfc_genl_ops), .resv_start_op = NFC_CMD_DEACTIVATE_TARGET + 1, .mcgrps = nfc_genl_mcgrps, .n_mcgrps = ARRAY_SIZE(nfc_genl_mcgrps), }; struct urelease_work { struct work_struct w; u32 portid; }; static void nfc_urelease_event_work(struct work_struct *work) { struct urelease_work *w = container_of(work, struct urelease_work, w); struct class_dev_iter iter; struct nfc_dev *dev; pr_debug("portid %d\n", w->portid); mutex_lock(&nfc_devlist_mutex); nfc_device_iter_init(&iter); dev = nfc_device_iter_next(&iter); while (dev) { mutex_lock(&dev->genl_data.genl_data_mutex); if (dev->genl_data.poll_req_portid == w->portid) { nfc_stop_poll(dev); dev->genl_data.poll_req_portid = 0; } mutex_unlock(&dev->genl_data.genl_data_mutex); dev = nfc_device_iter_next(&iter); } nfc_device_iter_exit(&iter); mutex_unlock(&nfc_devlist_mutex); kfree(w); } static int nfc_genl_rcv_nl_event(struct notifier_block *this, unsigned long event, void *ptr) { struct netlink_notify *n = ptr; struct urelease_work *w; if (event != NETLINK_URELEASE || n->protocol != NETLINK_GENERIC) goto out; pr_debug("NETLINK_URELEASE event from id %d\n", n->portid); w = kmalloc(sizeof(*w), GFP_ATOMIC); if (w) { INIT_WORK(&w->w, nfc_urelease_event_work); w->portid = n->portid; schedule_work(&w->w); } out: return NOTIFY_DONE; } void nfc_genl_data_init(struct nfc_genl_data *genl_data) { genl_data->poll_req_portid = 0; mutex_init(&genl_data->genl_data_mutex); } void nfc_genl_data_exit(struct nfc_genl_data *genl_data) { mutex_destroy(&genl_data->genl_data_mutex); } static struct notifier_block nl_notifier = { .notifier_call = nfc_genl_rcv_nl_event, }; /** * nfc_genl_init() - Initialize netlink interface * * This initialization function registers the nfc netlink family. */ int __init nfc_genl_init(void) { int rc; rc = genl_register_family(&nfc_genl_family); if (rc) return rc; netlink_register_notifier(&nl_notifier); return 0; } /** * nfc_genl_exit() - Deinitialize netlink interface * * This exit function unregisters the nfc netlink family. */ void nfc_genl_exit(void) { netlink_unregister_notifier(&nl_notifier); genl_unregister_family(&nfc_genl_family); }
linux-master
net/nfc/netlink.c
// SPDX-License-Identifier: GPL-2.0-only /* * NFC Digital Protocol stack * Copyright (c) 2013, Intel Corporation. */ #define pr_fmt(fmt) "digital: %s: " fmt, __func__ #include "digital.h" #define DIGITAL_NFC_DEP_N_RETRY_NACK 2 #define DIGITAL_NFC_DEP_N_RETRY_ATN 2 #define DIGITAL_NFC_DEP_FRAME_DIR_OUT 0xD4 #define DIGITAL_NFC_DEP_FRAME_DIR_IN 0xD5 #define DIGITAL_NFC_DEP_NFCA_SOD_SB 0xF0 #define DIGITAL_CMD_ATR_REQ 0x00 #define DIGITAL_CMD_ATR_RES 0x01 #define DIGITAL_CMD_PSL_REQ 0x04 #define DIGITAL_CMD_PSL_RES 0x05 #define DIGITAL_CMD_DEP_REQ 0x06 #define DIGITAL_CMD_DEP_RES 0x07 #define DIGITAL_ATR_REQ_MIN_SIZE 16 #define DIGITAL_ATR_REQ_MAX_SIZE 64 #define DIGITAL_ATR_RES_TO_WT(s) ((s) & 0xF) #define DIGITAL_DID_MAX 14 #define DIGITAL_PAYLOAD_SIZE_MAX 254 #define DIGITAL_PAYLOAD_BITS_TO_PP(s) (((s) & 0x3) << 4) #define DIGITAL_PAYLOAD_PP_TO_BITS(s) (((s) >> 4) & 0x3) #define DIGITAL_PAYLOAD_BITS_TO_FSL(s) ((s) & 0x3) #define DIGITAL_PAYLOAD_FSL_TO_BITS(s) ((s) & 0x3) #define DIGITAL_GB_BIT 0x02 #define DIGITAL_NFC_DEP_PFB_TYPE(pfb) ((pfb) & 0xE0) #define DIGITAL_NFC_DEP_PFB_TIMEOUT_BIT 0x10 #define DIGITAL_NFC_DEP_PFB_MI_BIT 0x10 #define DIGITAL_NFC_DEP_PFB_NACK_BIT 0x10 #define DIGITAL_NFC_DEP_PFB_DID_BIT 0x04 #define DIGITAL_NFC_DEP_PFB_IS_TIMEOUT(pfb) \ ((pfb) & DIGITAL_NFC_DEP_PFB_TIMEOUT_BIT) #define DIGITAL_NFC_DEP_MI_BIT_SET(pfb) ((pfb) & DIGITAL_NFC_DEP_PFB_MI_BIT) #define DIGITAL_NFC_DEP_NACK_BIT_SET(pfb) ((pfb) & DIGITAL_NFC_DEP_PFB_NACK_BIT) #define DIGITAL_NFC_DEP_NAD_BIT_SET(pfb) ((pfb) & 0x08) #define DIGITAL_NFC_DEP_DID_BIT_SET(pfb) ((pfb) & DIGITAL_NFC_DEP_PFB_DID_BIT) #define DIGITAL_NFC_DEP_PFB_PNI(pfb) ((pfb) & 0x03) #define DIGITAL_NFC_DEP_RTOX_VALUE(data) ((data) & 0x3F) #define DIGITAL_NFC_DEP_RTOX_MAX 59 #define DIGITAL_NFC_DEP_PFB_I_PDU 0x00 #define DIGITAL_NFC_DEP_PFB_ACK_NACK_PDU 0x40 #define DIGITAL_NFC_DEP_PFB_SUPERVISOR_PDU 0x80 struct digital_atr_req { u8 dir; u8 cmd; u8 nfcid3[10]; u8 did; u8 bs; u8 br; u8 pp; u8 gb[]; } __packed; struct digital_atr_res { u8 dir; u8 cmd; u8 nfcid3[10]; u8 did; u8 bs; u8 br; u8 to; u8 pp; u8 gb[]; } __packed; struct digital_psl_req { u8 dir; u8 cmd; u8 did; u8 brs; u8 fsl; } __packed; struct digital_psl_res { u8 dir; u8 cmd; u8 did; } __packed; struct digital_dep_req_res { u8 dir; u8 cmd; u8 pfb; } __packed; static void digital_in_recv_dep_res(struct nfc_digital_dev *ddev, void *arg, struct sk_buff *resp); static void digital_tg_recv_dep_req(struct nfc_digital_dev *ddev, void *arg, struct sk_buff *resp); static const u8 digital_payload_bits_map[4] = { [0] = 64, [1] = 128, [2] = 192, [3] = 254 }; /* Response Waiting Time for ATR_RES PDU in ms * * RWT(ATR_RES) = RWT(nfcdep,activation) + dRWT(nfcdep) + dT(nfcdep,initiator) * * with: * RWT(nfcdep,activation) = 4096 * 2^12 / f(c) s * dRWT(nfcdep) = 16 / f(c) s * dT(nfcdep,initiator) = 100 ms * f(c) = 13560000 Hz */ #define DIGITAL_ATR_RES_RWT 1337 /* Response Waiting Time for other DEP PDUs in ms * * max_rwt = rwt + dRWT(nfcdep) + dT(nfcdep,initiator) * * with: * rwt = (256 * 16 / f(c)) * 2^wt s * dRWT(nfcdep) = 16 / f(c) s * dT(nfcdep,initiator) = 100 ms * f(c) = 13560000 Hz * 0 <= wt <= 14 (given by the target by the TO field of ATR_RES response) */ #define DIGITAL_NFC_DEP_IN_MAX_WT 14 #define DIGITAL_NFC_DEP_TG_MAX_WT 14 static const u16 digital_rwt_map[DIGITAL_NFC_DEP_IN_MAX_WT + 1] = { 100, 101, 101, 102, 105, 110, 119, 139, 177, 255, 409, 719, 1337, 2575, 5049, }; static u8 digital_payload_bits_to_size(u8 payload_bits) { if (payload_bits >= ARRAY_SIZE(digital_payload_bits_map)) return 0; return digital_payload_bits_map[payload_bits]; } static u8 digital_payload_size_to_bits(u8 payload_size) { int i; for (i = 0; i < ARRAY_SIZE(digital_payload_bits_map); i++) if (digital_payload_bits_map[i] == payload_size) return i; return 0xff; } static void digital_skb_push_dep_sod(struct nfc_digital_dev *ddev, struct sk_buff *skb) { skb_push(skb, sizeof(u8)); skb->data[0] = skb->len; if (ddev->curr_rf_tech == NFC_DIGITAL_RF_TECH_106A) *(u8 *)skb_push(skb, sizeof(u8)) = DIGITAL_NFC_DEP_NFCA_SOD_SB; } static int digital_skb_pull_dep_sod(struct nfc_digital_dev *ddev, struct sk_buff *skb) { u8 size; if (skb->len < 2) return -EIO; if (ddev->curr_rf_tech == NFC_DIGITAL_RF_TECH_106A) skb_pull(skb, sizeof(u8)); size = skb->data[0]; if (size != skb->len) return -EIO; skb_pull(skb, sizeof(u8)); return 0; } static struct sk_buff * digital_send_dep_data_prep(struct nfc_digital_dev *ddev, struct sk_buff *skb, struct digital_dep_req_res *dep_req_res, struct digital_data_exch *data_exch) { struct sk_buff *new_skb; if (skb->len > ddev->remote_payload_max) { dep_req_res->pfb |= DIGITAL_NFC_DEP_PFB_MI_BIT; new_skb = digital_skb_alloc(ddev, ddev->remote_payload_max); if (!new_skb) { kfree_skb(ddev->chaining_skb); ddev->chaining_skb = NULL; return ERR_PTR(-ENOMEM); } skb_put_data(new_skb, skb->data, ddev->remote_payload_max); skb_pull(skb, ddev->remote_payload_max); ddev->chaining_skb = skb; ddev->data_exch = data_exch; } else { ddev->chaining_skb = NULL; new_skb = skb; } return new_skb; } static struct sk_buff * digital_recv_dep_data_gather(struct nfc_digital_dev *ddev, u8 pfb, struct sk_buff *resp, int (*send_ack)(struct nfc_digital_dev *ddev, struct digital_data_exch *data_exch), struct digital_data_exch *data_exch) { struct sk_buff *new_skb; int rc; if (DIGITAL_NFC_DEP_MI_BIT_SET(pfb) && (!ddev->chaining_skb)) { ddev->chaining_skb = nfc_alloc_recv_skb(8 * ddev->local_payload_max, GFP_KERNEL); if (!ddev->chaining_skb) { rc = -ENOMEM; goto error; } } if (ddev->chaining_skb) { if (resp->len > skb_tailroom(ddev->chaining_skb)) { new_skb = skb_copy_expand(ddev->chaining_skb, skb_headroom( ddev->chaining_skb), 8 * ddev->local_payload_max, GFP_KERNEL); if (!new_skb) { rc = -ENOMEM; goto error; } kfree_skb(ddev->chaining_skb); ddev->chaining_skb = new_skb; } skb_put_data(ddev->chaining_skb, resp->data, resp->len); kfree_skb(resp); resp = NULL; if (DIGITAL_NFC_DEP_MI_BIT_SET(pfb)) { rc = send_ack(ddev, data_exch); if (rc) goto error; return NULL; } resp = ddev->chaining_skb; ddev->chaining_skb = NULL; } return resp; error: kfree_skb(resp); kfree_skb(ddev->chaining_skb); ddev->chaining_skb = NULL; return ERR_PTR(rc); } static void digital_in_recv_psl_res(struct nfc_digital_dev *ddev, void *arg, struct sk_buff *resp) { struct nfc_target *target = arg; struct digital_psl_res *psl_res; int rc; if (IS_ERR(resp)) { rc = PTR_ERR(resp); resp = NULL; goto exit; } rc = ddev->skb_check_crc(resp); if (rc) { PROTOCOL_ERR("14.4.1.6"); goto exit; } rc = digital_skb_pull_dep_sod(ddev, resp); if (rc) { PROTOCOL_ERR("14.4.1.2"); goto exit; } psl_res = (struct digital_psl_res *)resp->data; if ((resp->len != sizeof(*psl_res)) || (psl_res->dir != DIGITAL_NFC_DEP_FRAME_DIR_IN) || (psl_res->cmd != DIGITAL_CMD_PSL_RES)) { rc = -EIO; goto exit; } rc = digital_in_configure_hw(ddev, NFC_DIGITAL_CONFIG_RF_TECH, NFC_DIGITAL_RF_TECH_424F); if (rc) goto exit; rc = digital_in_configure_hw(ddev, NFC_DIGITAL_CONFIG_FRAMING, NFC_DIGITAL_FRAMING_NFCF_NFC_DEP); if (rc) goto exit; if (!DIGITAL_DRV_CAPS_IN_CRC(ddev) && (ddev->curr_rf_tech == NFC_DIGITAL_RF_TECH_106A)) { ddev->skb_add_crc = digital_skb_add_crc_f; ddev->skb_check_crc = digital_skb_check_crc_f; } ddev->curr_rf_tech = NFC_DIGITAL_RF_TECH_424F; nfc_dep_link_is_up(ddev->nfc_dev, target->idx, NFC_COMM_ACTIVE, NFC_RF_INITIATOR); ddev->curr_nfc_dep_pni = 0; exit: dev_kfree_skb(resp); if (rc) ddev->curr_protocol = 0; } static int digital_in_send_psl_req(struct nfc_digital_dev *ddev, struct nfc_target *target) { struct sk_buff *skb; struct digital_psl_req *psl_req; int rc; u8 payload_size, payload_bits; skb = digital_skb_alloc(ddev, sizeof(*psl_req)); if (!skb) return -ENOMEM; skb_put(skb, sizeof(*psl_req)); psl_req = (struct digital_psl_req *)skb->data; psl_req->dir = DIGITAL_NFC_DEP_FRAME_DIR_OUT; psl_req->cmd = DIGITAL_CMD_PSL_REQ; psl_req->did = 0; psl_req->brs = (0x2 << 3) | 0x2; /* 424F both directions */ payload_size = min(ddev->local_payload_max, ddev->remote_payload_max); payload_bits = digital_payload_size_to_bits(payload_size); psl_req->fsl = DIGITAL_PAYLOAD_BITS_TO_FSL(payload_bits); ddev->local_payload_max = payload_size; ddev->remote_payload_max = payload_size; digital_skb_push_dep_sod(ddev, skb); ddev->skb_add_crc(skb); rc = digital_in_send_cmd(ddev, skb, ddev->dep_rwt, digital_in_recv_psl_res, target); if (rc) kfree_skb(skb); return rc; } static void digital_in_recv_atr_res(struct nfc_digital_dev *ddev, void *arg, struct sk_buff *resp) { struct nfc_target *target = arg; struct digital_atr_res *atr_res; u8 gb_len, payload_bits; u8 wt; int rc; if (IS_ERR(resp)) { rc = PTR_ERR(resp); resp = NULL; goto exit; } rc = ddev->skb_check_crc(resp); if (rc) { PROTOCOL_ERR("14.4.1.6"); goto exit; } rc = digital_skb_pull_dep_sod(ddev, resp); if (rc) { PROTOCOL_ERR("14.4.1.2"); goto exit; } if (resp->len < sizeof(struct digital_atr_res)) { rc = -EIO; goto exit; } gb_len = resp->len - sizeof(struct digital_atr_res); atr_res = (struct digital_atr_res *)resp->data; wt = DIGITAL_ATR_RES_TO_WT(atr_res->to); if (wt > DIGITAL_NFC_DEP_IN_MAX_WT) wt = DIGITAL_NFC_DEP_IN_MAX_WT; ddev->dep_rwt = digital_rwt_map[wt]; payload_bits = DIGITAL_PAYLOAD_PP_TO_BITS(atr_res->pp); ddev->remote_payload_max = digital_payload_bits_to_size(payload_bits); if (!ddev->remote_payload_max) { rc = -EINVAL; goto exit; } rc = nfc_set_remote_general_bytes(ddev->nfc_dev, atr_res->gb, gb_len); if (rc) goto exit; if ((ddev->protocols & NFC_PROTO_FELICA_MASK) && (ddev->curr_rf_tech != NFC_DIGITAL_RF_TECH_424F)) { rc = digital_in_send_psl_req(ddev, target); if (!rc) goto exit; } rc = nfc_dep_link_is_up(ddev->nfc_dev, target->idx, NFC_COMM_ACTIVE, NFC_RF_INITIATOR); ddev->curr_nfc_dep_pni = 0; exit: dev_kfree_skb(resp); if (rc) ddev->curr_protocol = 0; } int digital_in_send_atr_req(struct nfc_digital_dev *ddev, struct nfc_target *target, __u8 comm_mode, __u8 *gb, size_t gb_len) { struct sk_buff *skb; struct digital_atr_req *atr_req; uint size; int rc; u8 payload_bits; size = DIGITAL_ATR_REQ_MIN_SIZE + gb_len; if (size > DIGITAL_ATR_REQ_MAX_SIZE) { PROTOCOL_ERR("14.6.1.1"); return -EINVAL; } skb = digital_skb_alloc(ddev, size); if (!skb) return -ENOMEM; skb_put(skb, sizeof(struct digital_atr_req)); atr_req = (struct digital_atr_req *)skb->data; memset(atr_req, 0, sizeof(struct digital_atr_req)); atr_req->dir = DIGITAL_NFC_DEP_FRAME_DIR_OUT; atr_req->cmd = DIGITAL_CMD_ATR_REQ; if (target->nfcid2_len) memcpy(atr_req->nfcid3, target->nfcid2, NFC_NFCID2_MAXSIZE); else get_random_bytes(atr_req->nfcid3, NFC_NFCID3_MAXSIZE); atr_req->did = 0; atr_req->bs = 0; atr_req->br = 0; ddev->local_payload_max = DIGITAL_PAYLOAD_SIZE_MAX; payload_bits = digital_payload_size_to_bits(ddev->local_payload_max); atr_req->pp = DIGITAL_PAYLOAD_BITS_TO_PP(payload_bits); if (gb_len) { atr_req->pp |= DIGITAL_GB_BIT; skb_put_data(skb, gb, gb_len); } digital_skb_push_dep_sod(ddev, skb); ddev->skb_add_crc(skb); rc = digital_in_send_cmd(ddev, skb, DIGITAL_ATR_RES_RWT, digital_in_recv_atr_res, target); if (rc) kfree_skb(skb); return rc; } static int digital_in_send_ack(struct nfc_digital_dev *ddev, struct digital_data_exch *data_exch) { struct digital_dep_req_res *dep_req; struct sk_buff *skb; int rc; skb = digital_skb_alloc(ddev, 1); if (!skb) return -ENOMEM; skb_push(skb, sizeof(struct digital_dep_req_res)); dep_req = (struct digital_dep_req_res *)skb->data; dep_req->dir = DIGITAL_NFC_DEP_FRAME_DIR_OUT; dep_req->cmd = DIGITAL_CMD_DEP_REQ; dep_req->pfb = DIGITAL_NFC_DEP_PFB_ACK_NACK_PDU | ddev->curr_nfc_dep_pni; digital_skb_push_dep_sod(ddev, skb); ddev->skb_add_crc(skb); ddev->saved_skb = pskb_copy(skb, GFP_KERNEL); rc = digital_in_send_cmd(ddev, skb, ddev->dep_rwt, digital_in_recv_dep_res, data_exch); if (rc) { kfree_skb(skb); kfree_skb(ddev->saved_skb); ddev->saved_skb = NULL; } return rc; } static int digital_in_send_nack(struct nfc_digital_dev *ddev, struct digital_data_exch *data_exch) { struct digital_dep_req_res *dep_req; struct sk_buff *skb; int rc; skb = digital_skb_alloc(ddev, 1); if (!skb) return -ENOMEM; skb_push(skb, sizeof(struct digital_dep_req_res)); dep_req = (struct digital_dep_req_res *)skb->data; dep_req->dir = DIGITAL_NFC_DEP_FRAME_DIR_OUT; dep_req->cmd = DIGITAL_CMD_DEP_REQ; dep_req->pfb = DIGITAL_NFC_DEP_PFB_ACK_NACK_PDU | DIGITAL_NFC_DEP_PFB_NACK_BIT | ddev->curr_nfc_dep_pni; digital_skb_push_dep_sod(ddev, skb); ddev->skb_add_crc(skb); rc = digital_in_send_cmd(ddev, skb, ddev->dep_rwt, digital_in_recv_dep_res, data_exch); if (rc) kfree_skb(skb); return rc; } static int digital_in_send_atn(struct nfc_digital_dev *ddev, struct digital_data_exch *data_exch) { struct digital_dep_req_res *dep_req; struct sk_buff *skb; int rc; skb = digital_skb_alloc(ddev, 1); if (!skb) return -ENOMEM; skb_push(skb, sizeof(struct digital_dep_req_res)); dep_req = (struct digital_dep_req_res *)skb->data; dep_req->dir = DIGITAL_NFC_DEP_FRAME_DIR_OUT; dep_req->cmd = DIGITAL_CMD_DEP_REQ; dep_req->pfb = DIGITAL_NFC_DEP_PFB_SUPERVISOR_PDU; digital_skb_push_dep_sod(ddev, skb); ddev->skb_add_crc(skb); rc = digital_in_send_cmd(ddev, skb, ddev->dep_rwt, digital_in_recv_dep_res, data_exch); if (rc) kfree_skb(skb); return rc; } static int digital_in_send_rtox(struct nfc_digital_dev *ddev, struct digital_data_exch *data_exch, u8 rtox) { struct digital_dep_req_res *dep_req; struct sk_buff *skb; int rc; u16 rwt_int; rwt_int = ddev->dep_rwt * rtox; if (rwt_int > digital_rwt_map[DIGITAL_NFC_DEP_IN_MAX_WT]) rwt_int = digital_rwt_map[DIGITAL_NFC_DEP_IN_MAX_WT]; skb = digital_skb_alloc(ddev, 1); if (!skb) return -ENOMEM; skb_put_u8(skb, rtox); skb_push(skb, sizeof(struct digital_dep_req_res)); dep_req = (struct digital_dep_req_res *)skb->data; dep_req->dir = DIGITAL_NFC_DEP_FRAME_DIR_OUT; dep_req->cmd = DIGITAL_CMD_DEP_REQ; dep_req->pfb = DIGITAL_NFC_DEP_PFB_SUPERVISOR_PDU | DIGITAL_NFC_DEP_PFB_TIMEOUT_BIT; digital_skb_push_dep_sod(ddev, skb); ddev->skb_add_crc(skb); rc = digital_in_send_cmd(ddev, skb, rwt_int, digital_in_recv_dep_res, data_exch); if (rc) kfree_skb(skb); return rc; } static int digital_in_send_saved_skb(struct nfc_digital_dev *ddev, struct digital_data_exch *data_exch) { int rc; if (!ddev->saved_skb) return -EINVAL; skb_get(ddev->saved_skb); rc = digital_in_send_cmd(ddev, ddev->saved_skb, ddev->dep_rwt, digital_in_recv_dep_res, data_exch); if (rc) kfree_skb(ddev->saved_skb); return rc; } static void digital_in_recv_dep_res(struct nfc_digital_dev *ddev, void *arg, struct sk_buff *resp) { struct digital_data_exch *data_exch = arg; struct digital_dep_req_res *dep_res; u8 pfb; uint size; int rc; u8 rtox; if (IS_ERR(resp)) { rc = PTR_ERR(resp); resp = NULL; if ((rc == -EIO || (rc == -ETIMEDOUT && ddev->nack_count)) && (ddev->nack_count++ < DIGITAL_NFC_DEP_N_RETRY_NACK)) { ddev->atn_count = 0; rc = digital_in_send_nack(ddev, data_exch); if (rc) goto error; return; } else if ((rc == -ETIMEDOUT) && (ddev->atn_count++ < DIGITAL_NFC_DEP_N_RETRY_ATN)) { ddev->nack_count = 0; rc = digital_in_send_atn(ddev, data_exch); if (rc) goto error; return; } goto exit; } rc = digital_skb_pull_dep_sod(ddev, resp); if (rc) { PROTOCOL_ERR("14.4.1.2"); goto exit; } rc = ddev->skb_check_crc(resp); if (rc) { if ((resp->len >= 4) && (ddev->nack_count++ < DIGITAL_NFC_DEP_N_RETRY_NACK)) { ddev->atn_count = 0; rc = digital_in_send_nack(ddev, data_exch); if (rc) goto error; kfree_skb(resp); return; } PROTOCOL_ERR("14.4.1.6"); goto error; } ddev->atn_count = 0; ddev->nack_count = 0; if (resp->len > ddev->local_payload_max) { rc = -EMSGSIZE; goto exit; } size = sizeof(struct digital_dep_req_res); dep_res = (struct digital_dep_req_res *)resp->data; if (resp->len < size || dep_res->dir != DIGITAL_NFC_DEP_FRAME_DIR_IN || dep_res->cmd != DIGITAL_CMD_DEP_RES) { rc = -EIO; goto error; } pfb = dep_res->pfb; if (DIGITAL_NFC_DEP_DID_BIT_SET(pfb)) { PROTOCOL_ERR("14.8.2.1"); rc = -EIO; goto error; } if (DIGITAL_NFC_DEP_NAD_BIT_SET(pfb)) { rc = -EIO; goto exit; } if (size > resp->len) { rc = -EIO; goto error; } skb_pull(resp, size); switch (DIGITAL_NFC_DEP_PFB_TYPE(pfb)) { case DIGITAL_NFC_DEP_PFB_I_PDU: if (DIGITAL_NFC_DEP_PFB_PNI(pfb) != ddev->curr_nfc_dep_pni) { PROTOCOL_ERR("14.12.3.3"); rc = -EIO; goto error; } ddev->curr_nfc_dep_pni = DIGITAL_NFC_DEP_PFB_PNI(ddev->curr_nfc_dep_pni + 1); kfree_skb(ddev->saved_skb); ddev->saved_skb = NULL; resp = digital_recv_dep_data_gather(ddev, pfb, resp, digital_in_send_ack, data_exch); if (IS_ERR(resp)) { rc = PTR_ERR(resp); resp = NULL; goto error; } /* If resp is NULL then we're still chaining so return and * wait for the next part of the PDU. Else, the PDU is * complete so pass it up. */ if (!resp) return; rc = 0; break; case DIGITAL_NFC_DEP_PFB_ACK_NACK_PDU: if (DIGITAL_NFC_DEP_NACK_BIT_SET(pfb)) { PROTOCOL_ERR("14.12.4.5"); rc = -EIO; goto exit; } if (DIGITAL_NFC_DEP_PFB_PNI(pfb) != ddev->curr_nfc_dep_pni) { PROTOCOL_ERR("14.12.3.3"); rc = -EIO; goto exit; } ddev->curr_nfc_dep_pni = DIGITAL_NFC_DEP_PFB_PNI(ddev->curr_nfc_dep_pni + 1); if (!ddev->chaining_skb) { PROTOCOL_ERR("14.12.4.3"); rc = -EIO; goto exit; } /* The initiator has received a valid ACK. Free the last sent * PDU and keep on sending chained skb. */ kfree_skb(ddev->saved_skb); ddev->saved_skb = NULL; rc = digital_in_send_dep_req(ddev, NULL, ddev->chaining_skb, ddev->data_exch); if (rc) goto error; goto free_resp; case DIGITAL_NFC_DEP_PFB_SUPERVISOR_PDU: if (!DIGITAL_NFC_DEP_PFB_IS_TIMEOUT(pfb)) { /* ATN */ rc = digital_in_send_saved_skb(ddev, data_exch); if (rc) goto error; goto free_resp; } if (ddev->atn_count || ddev->nack_count) { PROTOCOL_ERR("14.12.4.4"); rc = -EIO; goto error; } rtox = DIGITAL_NFC_DEP_RTOX_VALUE(resp->data[0]); if (!rtox || rtox > DIGITAL_NFC_DEP_RTOX_MAX) { PROTOCOL_ERR("14.8.4.1"); rc = -EIO; goto error; } rc = digital_in_send_rtox(ddev, data_exch, rtox); if (rc) goto error; goto free_resp; } exit: data_exch->cb(data_exch->cb_context, resp, rc); error: kfree(data_exch); kfree_skb(ddev->chaining_skb); ddev->chaining_skb = NULL; kfree_skb(ddev->saved_skb); ddev->saved_skb = NULL; if (rc) kfree_skb(resp); return; free_resp: dev_kfree_skb(resp); } int digital_in_send_dep_req(struct nfc_digital_dev *ddev, struct nfc_target *target, struct sk_buff *skb, struct digital_data_exch *data_exch) { struct digital_dep_req_res *dep_req; struct sk_buff *chaining_skb, *tmp_skb; int rc; skb_push(skb, sizeof(struct digital_dep_req_res)); dep_req = (struct digital_dep_req_res *)skb->data; dep_req->dir = DIGITAL_NFC_DEP_FRAME_DIR_OUT; dep_req->cmd = DIGITAL_CMD_DEP_REQ; dep_req->pfb = ddev->curr_nfc_dep_pni; ddev->atn_count = 0; ddev->nack_count = 0; chaining_skb = ddev->chaining_skb; tmp_skb = digital_send_dep_data_prep(ddev, skb, dep_req, data_exch); if (IS_ERR(tmp_skb)) return PTR_ERR(tmp_skb); digital_skb_push_dep_sod(ddev, tmp_skb); ddev->skb_add_crc(tmp_skb); ddev->saved_skb = pskb_copy(tmp_skb, GFP_KERNEL); rc = digital_in_send_cmd(ddev, tmp_skb, ddev->dep_rwt, digital_in_recv_dep_res, data_exch); if (rc) { if (tmp_skb != skb) kfree_skb(tmp_skb); kfree_skb(chaining_skb); ddev->chaining_skb = NULL; kfree_skb(ddev->saved_skb); ddev->saved_skb = NULL; } return rc; } static void digital_tg_set_rf_tech(struct nfc_digital_dev *ddev, u8 rf_tech) { ddev->curr_rf_tech = rf_tech; ddev->skb_add_crc = digital_skb_add_crc_none; ddev->skb_check_crc = digital_skb_check_crc_none; if (DIGITAL_DRV_CAPS_TG_CRC(ddev)) return; switch (ddev->curr_rf_tech) { case NFC_DIGITAL_RF_TECH_106A: ddev->skb_add_crc = digital_skb_add_crc_a; ddev->skb_check_crc = digital_skb_check_crc_a; break; case NFC_DIGITAL_RF_TECH_212F: case NFC_DIGITAL_RF_TECH_424F: ddev->skb_add_crc = digital_skb_add_crc_f; ddev->skb_check_crc = digital_skb_check_crc_f; break; default: break; } } static int digital_tg_send_ack(struct nfc_digital_dev *ddev, struct digital_data_exch *data_exch) { struct digital_dep_req_res *dep_res; struct sk_buff *skb; int rc; skb = digital_skb_alloc(ddev, 1); if (!skb) return -ENOMEM; skb_push(skb, sizeof(struct digital_dep_req_res)); dep_res = (struct digital_dep_req_res *)skb->data; dep_res->dir = DIGITAL_NFC_DEP_FRAME_DIR_IN; dep_res->cmd = DIGITAL_CMD_DEP_RES; dep_res->pfb = DIGITAL_NFC_DEP_PFB_ACK_NACK_PDU | ddev->curr_nfc_dep_pni; if (ddev->did) { dep_res->pfb |= DIGITAL_NFC_DEP_PFB_DID_BIT; skb_put_data(skb, &ddev->did, sizeof(ddev->did)); } ddev->curr_nfc_dep_pni = DIGITAL_NFC_DEP_PFB_PNI(ddev->curr_nfc_dep_pni + 1); digital_skb_push_dep_sod(ddev, skb); ddev->skb_add_crc(skb); ddev->saved_skb = pskb_copy(skb, GFP_KERNEL); rc = digital_tg_send_cmd(ddev, skb, 1500, digital_tg_recv_dep_req, data_exch); if (rc) { kfree_skb(skb); kfree_skb(ddev->saved_skb); ddev->saved_skb = NULL; } return rc; } static int digital_tg_send_atn(struct nfc_digital_dev *ddev) { struct digital_dep_req_res *dep_res; struct sk_buff *skb; int rc; skb = digital_skb_alloc(ddev, 1); if (!skb) return -ENOMEM; skb_push(skb, sizeof(struct digital_dep_req_res)); dep_res = (struct digital_dep_req_res *)skb->data; dep_res->dir = DIGITAL_NFC_DEP_FRAME_DIR_IN; dep_res->cmd = DIGITAL_CMD_DEP_RES; dep_res->pfb = DIGITAL_NFC_DEP_PFB_SUPERVISOR_PDU; if (ddev->did) { dep_res->pfb |= DIGITAL_NFC_DEP_PFB_DID_BIT; skb_put_data(skb, &ddev->did, sizeof(ddev->did)); } digital_skb_push_dep_sod(ddev, skb); ddev->skb_add_crc(skb); rc = digital_tg_send_cmd(ddev, skb, 1500, digital_tg_recv_dep_req, NULL); if (rc) kfree_skb(skb); return rc; } static int digital_tg_send_saved_skb(struct nfc_digital_dev *ddev) { int rc; if (!ddev->saved_skb) return -EINVAL; skb_get(ddev->saved_skb); rc = digital_tg_send_cmd(ddev, ddev->saved_skb, 1500, digital_tg_recv_dep_req, NULL); if (rc) kfree_skb(ddev->saved_skb); return rc; } static void digital_tg_recv_dep_req(struct nfc_digital_dev *ddev, void *arg, struct sk_buff *resp) { int rc; struct digital_dep_req_res *dep_req; u8 pfb; size_t size; if (IS_ERR(resp)) { rc = PTR_ERR(resp); resp = NULL; goto exit; } rc = ddev->skb_check_crc(resp); if (rc) { PROTOCOL_ERR("14.4.1.6"); goto exit; } rc = digital_skb_pull_dep_sod(ddev, resp); if (rc) { PROTOCOL_ERR("14.4.1.2"); goto exit; } if (resp->len > ddev->local_payload_max) { rc = -EMSGSIZE; goto exit; } size = sizeof(struct digital_dep_req_res); dep_req = (struct digital_dep_req_res *)resp->data; if (resp->len < size || dep_req->dir != DIGITAL_NFC_DEP_FRAME_DIR_OUT || dep_req->cmd != DIGITAL_CMD_DEP_REQ) { rc = -EIO; goto exit; } pfb = dep_req->pfb; if (DIGITAL_NFC_DEP_DID_BIT_SET(pfb)) { if (ddev->did && (ddev->did == resp->data[3])) { size++; } else { rc = -EIO; goto exit; } } else if (ddev->did) { rc = -EIO; goto exit; } if (DIGITAL_NFC_DEP_NAD_BIT_SET(pfb)) { rc = -EIO; goto exit; } if (size > resp->len) { rc = -EIO; goto exit; } skb_pull(resp, size); switch (DIGITAL_NFC_DEP_PFB_TYPE(pfb)) { case DIGITAL_NFC_DEP_PFB_I_PDU: pr_debug("DIGITAL_NFC_DEP_PFB_I_PDU\n"); if (ddev->atn_count) { /* The target has received (and replied to) at least one * ATN DEP_REQ. */ ddev->atn_count = 0; /* pni of resp PDU equal to the target current pni - 1 * means resp is the previous DEP_REQ PDU received from * the initiator so the target replies with saved_skb * which is the previous DEP_RES saved in * digital_tg_send_dep_res(). */ if (DIGITAL_NFC_DEP_PFB_PNI(pfb) == DIGITAL_NFC_DEP_PFB_PNI(ddev->curr_nfc_dep_pni - 1)) { rc = digital_tg_send_saved_skb(ddev); if (rc) goto exit; goto free_resp; } /* atn_count > 0 and PDU pni != curr_nfc_dep_pni - 1 * means the target probably did not received the last * DEP_REQ PDU sent by the initiator. The target * fallbacks to normal processing then. */ } if (DIGITAL_NFC_DEP_PFB_PNI(pfb) != ddev->curr_nfc_dep_pni) { PROTOCOL_ERR("14.12.3.4"); rc = -EIO; goto exit; } kfree_skb(ddev->saved_skb); ddev->saved_skb = NULL; resp = digital_recv_dep_data_gather(ddev, pfb, resp, digital_tg_send_ack, NULL); if (IS_ERR(resp)) { rc = PTR_ERR(resp); resp = NULL; goto exit; } /* If resp is NULL then we're still chaining so return and * wait for the next part of the PDU. Else, the PDU is * complete so pass it up. */ if (!resp) return; rc = 0; break; case DIGITAL_NFC_DEP_PFB_ACK_NACK_PDU: if (DIGITAL_NFC_DEP_NACK_BIT_SET(pfb)) { /* NACK */ if (DIGITAL_NFC_DEP_PFB_PNI(pfb + 1) != ddev->curr_nfc_dep_pni) { rc = -EIO; goto exit; } ddev->atn_count = 0; rc = digital_tg_send_saved_skb(ddev); if (rc) goto exit; goto free_resp; } /* ACK */ if (ddev->atn_count) { /* The target has previously received one or more ATN * PDUs. */ ddev->atn_count = 0; /* If the ACK PNI is equal to the target PNI - 1 means * that the initiator did not receive the previous PDU * sent by the target so re-send it. */ if (DIGITAL_NFC_DEP_PFB_PNI(pfb + 1) == ddev->curr_nfc_dep_pni) { rc = digital_tg_send_saved_skb(ddev); if (rc) goto exit; goto free_resp; } /* Otherwise, the target did not receive the previous * ACK PDU from the initiator. Fallback to normal * processing of chained PDU then. */ } /* Keep on sending chained PDU */ if (!ddev->chaining_skb || DIGITAL_NFC_DEP_PFB_PNI(pfb) != ddev->curr_nfc_dep_pni) { rc = -EIO; goto exit; } kfree_skb(ddev->saved_skb); ddev->saved_skb = NULL; rc = digital_tg_send_dep_res(ddev, ddev->chaining_skb); if (rc) goto exit; goto free_resp; case DIGITAL_NFC_DEP_PFB_SUPERVISOR_PDU: if (DIGITAL_NFC_DEP_PFB_IS_TIMEOUT(pfb)) { rc = -EINVAL; goto exit; } rc = digital_tg_send_atn(ddev); if (rc) goto exit; ddev->atn_count++; goto free_resp; } rc = nfc_tm_data_received(ddev->nfc_dev, resp); if (rc) resp = NULL; exit: kfree_skb(ddev->chaining_skb); ddev->chaining_skb = NULL; ddev->atn_count = 0; kfree_skb(ddev->saved_skb); ddev->saved_skb = NULL; if (rc) kfree_skb(resp); return; free_resp: dev_kfree_skb(resp); } int digital_tg_send_dep_res(struct nfc_digital_dev *ddev, struct sk_buff *skb) { struct digital_dep_req_res *dep_res; struct sk_buff *chaining_skb, *tmp_skb; int rc; skb_push(skb, sizeof(struct digital_dep_req_res)); dep_res = (struct digital_dep_req_res *)skb->data; dep_res->dir = DIGITAL_NFC_DEP_FRAME_DIR_IN; dep_res->cmd = DIGITAL_CMD_DEP_RES; dep_res->pfb = ddev->curr_nfc_dep_pni; if (ddev->did) { dep_res->pfb |= DIGITAL_NFC_DEP_PFB_DID_BIT; skb_put_data(skb, &ddev->did, sizeof(ddev->did)); } ddev->curr_nfc_dep_pni = DIGITAL_NFC_DEP_PFB_PNI(ddev->curr_nfc_dep_pni + 1); chaining_skb = ddev->chaining_skb; tmp_skb = digital_send_dep_data_prep(ddev, skb, dep_res, NULL); if (IS_ERR(tmp_skb)) return PTR_ERR(tmp_skb); digital_skb_push_dep_sod(ddev, tmp_skb); ddev->skb_add_crc(tmp_skb); ddev->saved_skb = pskb_copy(tmp_skb, GFP_KERNEL); rc = digital_tg_send_cmd(ddev, tmp_skb, 1500, digital_tg_recv_dep_req, NULL); if (rc) { if (tmp_skb != skb) kfree_skb(tmp_skb); kfree_skb(chaining_skb); ddev->chaining_skb = NULL; kfree_skb(ddev->saved_skb); ddev->saved_skb = NULL; } return rc; } static void digital_tg_send_psl_res_complete(struct nfc_digital_dev *ddev, void *arg, struct sk_buff *resp) { u8 rf_tech = (unsigned long)arg; if (IS_ERR(resp)) return; digital_tg_set_rf_tech(ddev, rf_tech); digital_tg_configure_hw(ddev, NFC_DIGITAL_CONFIG_RF_TECH, rf_tech); digital_tg_listen(ddev, 1500, digital_tg_recv_dep_req, NULL); dev_kfree_skb(resp); } static int digital_tg_send_psl_res(struct nfc_digital_dev *ddev, u8 did, u8 rf_tech) { struct digital_psl_res *psl_res; struct sk_buff *skb; int rc; skb = digital_skb_alloc(ddev, sizeof(struct digital_psl_res)); if (!skb) return -ENOMEM; skb_put(skb, sizeof(struct digital_psl_res)); psl_res = (struct digital_psl_res *)skb->data; psl_res->dir = DIGITAL_NFC_DEP_FRAME_DIR_IN; psl_res->cmd = DIGITAL_CMD_PSL_RES; psl_res->did = did; digital_skb_push_dep_sod(ddev, skb); ddev->skb_add_crc(skb); ddev->curr_nfc_dep_pni = 0; rc = digital_tg_send_cmd(ddev, skb, 0, digital_tg_send_psl_res_complete, (void *)(unsigned long)rf_tech); if (rc) kfree_skb(skb); return rc; } static void digital_tg_recv_psl_req(struct nfc_digital_dev *ddev, void *arg, struct sk_buff *resp) { int rc; struct digital_psl_req *psl_req; u8 rf_tech; u8 dsi, payload_size, payload_bits; if (IS_ERR(resp)) { rc = PTR_ERR(resp); resp = NULL; goto exit; } rc = ddev->skb_check_crc(resp); if (rc) { PROTOCOL_ERR("14.4.1.6"); goto exit; } rc = digital_skb_pull_dep_sod(ddev, resp); if (rc) { PROTOCOL_ERR("14.4.1.2"); goto exit; } psl_req = (struct digital_psl_req *)resp->data; if (resp->len != sizeof(struct digital_psl_req) || psl_req->dir != DIGITAL_NFC_DEP_FRAME_DIR_OUT || psl_req->cmd != DIGITAL_CMD_PSL_REQ) { rc = -EIO; goto exit; } dsi = (psl_req->brs >> 3) & 0x07; switch (dsi) { case 0: rf_tech = NFC_DIGITAL_RF_TECH_106A; break; case 1: rf_tech = NFC_DIGITAL_RF_TECH_212F; break; case 2: rf_tech = NFC_DIGITAL_RF_TECH_424F; break; default: pr_err("Unsupported dsi value %d\n", dsi); goto exit; } payload_bits = DIGITAL_PAYLOAD_FSL_TO_BITS(psl_req->fsl); payload_size = digital_payload_bits_to_size(payload_bits); if (!payload_size || (payload_size > min(ddev->local_payload_max, ddev->remote_payload_max))) { rc = -EINVAL; goto exit; } ddev->local_payload_max = payload_size; ddev->remote_payload_max = payload_size; rc = digital_tg_send_psl_res(ddev, psl_req->did, rf_tech); exit: kfree_skb(resp); } static void digital_tg_send_atr_res_complete(struct nfc_digital_dev *ddev, void *arg, struct sk_buff *resp) { int offset; if (IS_ERR(resp)) { digital_poll_next_tech(ddev); return; } offset = 2; if (resp->data[0] == DIGITAL_NFC_DEP_NFCA_SOD_SB) offset++; ddev->atn_count = 0; if (resp->data[offset] == DIGITAL_CMD_PSL_REQ) digital_tg_recv_psl_req(ddev, arg, resp); else digital_tg_recv_dep_req(ddev, arg, resp); } static int digital_tg_send_atr_res(struct nfc_digital_dev *ddev, struct digital_atr_req *atr_req) { struct digital_atr_res *atr_res; struct sk_buff *skb; u8 *gb, payload_bits; size_t gb_len; int rc; gb = nfc_get_local_general_bytes(ddev->nfc_dev, &gb_len); if (!gb) gb_len = 0; skb = digital_skb_alloc(ddev, sizeof(struct digital_atr_res) + gb_len); if (!skb) return -ENOMEM; skb_put(skb, sizeof(struct digital_atr_res)); atr_res = (struct digital_atr_res *)skb->data; memset(atr_res, 0, sizeof(struct digital_atr_res)); atr_res->dir = DIGITAL_NFC_DEP_FRAME_DIR_IN; atr_res->cmd = DIGITAL_CMD_ATR_RES; memcpy(atr_res->nfcid3, atr_req->nfcid3, sizeof(atr_req->nfcid3)); atr_res->to = DIGITAL_NFC_DEP_TG_MAX_WT; ddev->local_payload_max = DIGITAL_PAYLOAD_SIZE_MAX; payload_bits = digital_payload_size_to_bits(ddev->local_payload_max); atr_res->pp = DIGITAL_PAYLOAD_BITS_TO_PP(payload_bits); if (gb_len) { skb_put(skb, gb_len); atr_res->pp |= DIGITAL_GB_BIT; memcpy(atr_res->gb, gb, gb_len); } digital_skb_push_dep_sod(ddev, skb); ddev->skb_add_crc(skb); ddev->curr_nfc_dep_pni = 0; rc = digital_tg_send_cmd(ddev, skb, 999, digital_tg_send_atr_res_complete, NULL); if (rc) kfree_skb(skb); return rc; } void digital_tg_recv_atr_req(struct nfc_digital_dev *ddev, void *arg, struct sk_buff *resp) { int rc; struct digital_atr_req *atr_req; size_t gb_len, min_size; u8 poll_tech_count, payload_bits; if (IS_ERR(resp)) { rc = PTR_ERR(resp); resp = NULL; goto exit; } if (!resp->len) { rc = -EIO; goto exit; } if (resp->data[0] == DIGITAL_NFC_DEP_NFCA_SOD_SB) { min_size = DIGITAL_ATR_REQ_MIN_SIZE + 2; digital_tg_set_rf_tech(ddev, NFC_DIGITAL_RF_TECH_106A); } else { min_size = DIGITAL_ATR_REQ_MIN_SIZE + 1; digital_tg_set_rf_tech(ddev, NFC_DIGITAL_RF_TECH_212F); } if (resp->len < min_size) { rc = -EIO; goto exit; } ddev->curr_protocol = NFC_PROTO_NFC_DEP_MASK; rc = ddev->skb_check_crc(resp); if (rc) { PROTOCOL_ERR("14.4.1.6"); goto exit; } rc = digital_skb_pull_dep_sod(ddev, resp); if (rc) { PROTOCOL_ERR("14.4.1.2"); goto exit; } atr_req = (struct digital_atr_req *)resp->data; if (atr_req->dir != DIGITAL_NFC_DEP_FRAME_DIR_OUT || atr_req->cmd != DIGITAL_CMD_ATR_REQ || atr_req->did > DIGITAL_DID_MAX) { rc = -EINVAL; goto exit; } payload_bits = DIGITAL_PAYLOAD_PP_TO_BITS(atr_req->pp); ddev->remote_payload_max = digital_payload_bits_to_size(payload_bits); if (!ddev->remote_payload_max) { rc = -EINVAL; goto exit; } ddev->did = atr_req->did; rc = digital_tg_configure_hw(ddev, NFC_DIGITAL_CONFIG_FRAMING, NFC_DIGITAL_FRAMING_NFC_DEP_ACTIVATED); if (rc) goto exit; rc = digital_tg_send_atr_res(ddev, atr_req); if (rc) goto exit; gb_len = resp->len - sizeof(struct digital_atr_req); poll_tech_count = ddev->poll_tech_count; ddev->poll_tech_count = 0; rc = nfc_tm_activated(ddev->nfc_dev, NFC_PROTO_NFC_DEP_MASK, NFC_COMM_PASSIVE, atr_req->gb, gb_len); if (rc) { ddev->poll_tech_count = poll_tech_count; goto exit; } rc = 0; exit: if (rc) digital_poll_next_tech(ddev); dev_kfree_skb(resp); }
linux-master
net/nfc/digital_dep.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (C) 2011 Intel Corporation. All rights reserved. * Copyright (C) 2014 Marvell International Ltd. */ #define pr_fmt(fmt) "llcp: %s: " fmt, __func__ #include <linux/init.h> #include <linux/kernel.h> #include <linux/list.h> #include <linux/nfc.h> #include "nfc.h" #include "llcp.h" static u8 llcp_magic[3] = {0x46, 0x66, 0x6d}; static LIST_HEAD(llcp_devices); /* Protects llcp_devices list */ static DEFINE_SPINLOCK(llcp_devices_lock); static void nfc_llcp_rx_skb(struct nfc_llcp_local *local, struct sk_buff *skb); void nfc_llcp_sock_link(struct llcp_sock_list *l, struct sock *sk) { write_lock(&l->lock); sk_add_node(sk, &l->head); write_unlock(&l->lock); } void nfc_llcp_sock_unlink(struct llcp_sock_list *l, struct sock *sk) { write_lock(&l->lock); sk_del_node_init(sk); write_unlock(&l->lock); } void nfc_llcp_socket_remote_param_init(struct nfc_llcp_sock *sock) { sock->remote_rw = LLCP_DEFAULT_RW; sock->remote_miu = LLCP_MAX_MIU + 1; } static void nfc_llcp_socket_purge(struct nfc_llcp_sock *sock) { struct nfc_llcp_local *local = sock->local; struct sk_buff *s, *tmp; skb_queue_purge(&sock->tx_queue); skb_queue_purge(&sock->tx_pending_queue); if (local == NULL) return; /* Search for local pending SKBs that are related to this socket */ skb_queue_walk_safe(&local->tx_queue, s, tmp) { if (s->sk != &sock->sk) continue; skb_unlink(s, &local->tx_queue); kfree_skb(s); } } static void nfc_llcp_socket_release(struct nfc_llcp_local *local, bool device, int err) { struct sock *sk; struct hlist_node *tmp; struct nfc_llcp_sock *llcp_sock; skb_queue_purge(&local->tx_queue); write_lock(&local->sockets.lock); sk_for_each_safe(sk, tmp, &local->sockets.head) { llcp_sock = nfc_llcp_sock(sk); bh_lock_sock(sk); nfc_llcp_socket_purge(llcp_sock); if (sk->sk_state == LLCP_CONNECTED) nfc_put_device(llcp_sock->dev); if (sk->sk_state == LLCP_LISTEN) { struct nfc_llcp_sock *lsk, *n; struct sock *accept_sk; list_for_each_entry_safe(lsk, n, &llcp_sock->accept_queue, accept_queue) { accept_sk = &lsk->sk; bh_lock_sock(accept_sk); nfc_llcp_accept_unlink(accept_sk); if (err) accept_sk->sk_err = err; accept_sk->sk_state = LLCP_CLOSED; accept_sk->sk_state_change(sk); bh_unlock_sock(accept_sk); } } if (err) sk->sk_err = err; sk->sk_state = LLCP_CLOSED; sk->sk_state_change(sk); bh_unlock_sock(sk); sk_del_node_init(sk); } write_unlock(&local->sockets.lock); /* If we still have a device, we keep the RAW sockets alive */ if (device == true) return; write_lock(&local->raw_sockets.lock); sk_for_each_safe(sk, tmp, &local->raw_sockets.head) { llcp_sock = nfc_llcp_sock(sk); bh_lock_sock(sk); nfc_llcp_socket_purge(llcp_sock); if (err) sk->sk_err = err; sk->sk_state = LLCP_CLOSED; sk->sk_state_change(sk); bh_unlock_sock(sk); sk_del_node_init(sk); } write_unlock(&local->raw_sockets.lock); } static struct nfc_llcp_local *nfc_llcp_local_get(struct nfc_llcp_local *local) { kref_get(&local->ref); return local; } static void local_cleanup(struct nfc_llcp_local *local) { nfc_llcp_socket_release(local, false, ENXIO); del_timer_sync(&local->link_timer); skb_queue_purge(&local->tx_queue); cancel_work_sync(&local->tx_work); cancel_work_sync(&local->rx_work); cancel_work_sync(&local->timeout_work); kfree_skb(local->rx_pending); local->rx_pending = NULL; del_timer_sync(&local->sdreq_timer); cancel_work_sync(&local->sdreq_timeout_work); nfc_llcp_free_sdp_tlv_list(&local->pending_sdreqs); } static void local_release(struct kref *ref) { struct nfc_llcp_local *local; local = container_of(ref, struct nfc_llcp_local, ref); local_cleanup(local); kfree(local); } int nfc_llcp_local_put(struct nfc_llcp_local *local) { if (local == NULL) return 0; return kref_put(&local->ref, local_release); } static struct nfc_llcp_sock *nfc_llcp_sock_get(struct nfc_llcp_local *local, u8 ssap, u8 dsap) { struct sock *sk; struct nfc_llcp_sock *llcp_sock, *tmp_sock; pr_debug("ssap dsap %d %d\n", ssap, dsap); if (ssap == 0 && dsap == 0) return NULL; read_lock(&local->sockets.lock); llcp_sock = NULL; sk_for_each(sk, &local->sockets.head) { tmp_sock = nfc_llcp_sock(sk); if (tmp_sock->ssap == ssap && tmp_sock->dsap == dsap) { llcp_sock = tmp_sock; break; } } read_unlock(&local->sockets.lock); if (llcp_sock == NULL) return NULL; sock_hold(&llcp_sock->sk); return llcp_sock; } static void nfc_llcp_sock_put(struct nfc_llcp_sock *sock) { sock_put(&sock->sk); } static void nfc_llcp_timeout_work(struct work_struct *work) { struct nfc_llcp_local *local = container_of(work, struct nfc_llcp_local, timeout_work); nfc_dep_link_down(local->dev); } static void nfc_llcp_symm_timer(struct timer_list *t) { struct nfc_llcp_local *local = from_timer(local, t, link_timer); pr_err("SYMM timeout\n"); schedule_work(&local->timeout_work); } static void nfc_llcp_sdreq_timeout_work(struct work_struct *work) { unsigned long time; HLIST_HEAD(nl_sdres_list); struct hlist_node *n; struct nfc_llcp_sdp_tlv *sdp; struct nfc_llcp_local *local = container_of(work, struct nfc_llcp_local, sdreq_timeout_work); mutex_lock(&local->sdreq_lock); time = jiffies - msecs_to_jiffies(3 * local->remote_lto); hlist_for_each_entry_safe(sdp, n, &local->pending_sdreqs, node) { if (time_after(sdp->time, time)) continue; sdp->sap = LLCP_SDP_UNBOUND; hlist_del(&sdp->node); hlist_add_head(&sdp->node, &nl_sdres_list); } if (!hlist_empty(&local->pending_sdreqs)) mod_timer(&local->sdreq_timer, jiffies + msecs_to_jiffies(3 * local->remote_lto)); mutex_unlock(&local->sdreq_lock); if (!hlist_empty(&nl_sdres_list)) nfc_genl_llc_send_sdres(local->dev, &nl_sdres_list); } static void nfc_llcp_sdreq_timer(struct timer_list *t) { struct nfc_llcp_local *local = from_timer(local, t, sdreq_timer); schedule_work(&local->sdreq_timeout_work); } struct nfc_llcp_local *nfc_llcp_find_local(struct nfc_dev *dev) { struct nfc_llcp_local *local; struct nfc_llcp_local *res = NULL; spin_lock(&llcp_devices_lock); list_for_each_entry(local, &llcp_devices, list) if (local->dev == dev) { res = nfc_llcp_local_get(local); break; } spin_unlock(&llcp_devices_lock); return res; } static struct nfc_llcp_local *nfc_llcp_remove_local(struct nfc_dev *dev) { struct nfc_llcp_local *local, *tmp; spin_lock(&llcp_devices_lock); list_for_each_entry_safe(local, tmp, &llcp_devices, list) if (local->dev == dev) { list_del(&local->list); spin_unlock(&llcp_devices_lock); return local; } spin_unlock(&llcp_devices_lock); pr_warn("Shutting down device not found\n"); return NULL; } static char *wks[] = { NULL, NULL, /* SDP */ "urn:nfc:sn:ip", "urn:nfc:sn:obex", "urn:nfc:sn:snep", }; static int nfc_llcp_wks_sap(const char *service_name, size_t service_name_len) { int sap, num_wks; pr_debug("%s\n", service_name); if (service_name == NULL) return -EINVAL; num_wks = ARRAY_SIZE(wks); for (sap = 0; sap < num_wks; sap++) { if (wks[sap] == NULL) continue; if (strncmp(wks[sap], service_name, service_name_len) == 0) return sap; } return -EINVAL; } static struct nfc_llcp_sock *nfc_llcp_sock_from_sn(struct nfc_llcp_local *local, const u8 *sn, size_t sn_len) { struct sock *sk; struct nfc_llcp_sock *llcp_sock, *tmp_sock; pr_debug("sn %zd %p\n", sn_len, sn); if (sn == NULL || sn_len == 0) return NULL; read_lock(&local->sockets.lock); llcp_sock = NULL; sk_for_each(sk, &local->sockets.head) { tmp_sock = nfc_llcp_sock(sk); pr_debug("llcp sock %p\n", tmp_sock); if (tmp_sock->sk.sk_type == SOCK_STREAM && tmp_sock->sk.sk_state != LLCP_LISTEN) continue; if (tmp_sock->sk.sk_type == SOCK_DGRAM && tmp_sock->sk.sk_state != LLCP_BOUND) continue; if (tmp_sock->service_name == NULL || tmp_sock->service_name_len == 0) continue; if (tmp_sock->service_name_len != sn_len) continue; if (memcmp(sn, tmp_sock->service_name, sn_len) == 0) { llcp_sock = tmp_sock; break; } } read_unlock(&local->sockets.lock); pr_debug("Found llcp sock %p\n", llcp_sock); return llcp_sock; } u8 nfc_llcp_get_sdp_ssap(struct nfc_llcp_local *local, struct nfc_llcp_sock *sock) { mutex_lock(&local->sdp_lock); if (sock->service_name != NULL && sock->service_name_len > 0) { int ssap = nfc_llcp_wks_sap(sock->service_name, sock->service_name_len); if (ssap > 0) { pr_debug("WKS %d\n", ssap); /* This is a WKS, let's check if it's free */ if (test_bit(ssap, &local->local_wks)) { mutex_unlock(&local->sdp_lock); return LLCP_SAP_MAX; } set_bit(ssap, &local->local_wks); mutex_unlock(&local->sdp_lock); return ssap; } /* * Check if there already is a non WKS socket bound * to this service name. */ if (nfc_llcp_sock_from_sn(local, sock->service_name, sock->service_name_len) != NULL) { mutex_unlock(&local->sdp_lock); return LLCP_SAP_MAX; } mutex_unlock(&local->sdp_lock); return LLCP_SDP_UNBOUND; } else if (sock->ssap != 0 && sock->ssap < LLCP_WKS_NUM_SAP) { if (!test_bit(sock->ssap, &local->local_wks)) { set_bit(sock->ssap, &local->local_wks); mutex_unlock(&local->sdp_lock); return sock->ssap; } } mutex_unlock(&local->sdp_lock); return LLCP_SAP_MAX; } u8 nfc_llcp_get_local_ssap(struct nfc_llcp_local *local) { u8 local_ssap; mutex_lock(&local->sdp_lock); local_ssap = find_first_zero_bit(&local->local_sap, LLCP_LOCAL_NUM_SAP); if (local_ssap == LLCP_LOCAL_NUM_SAP) { mutex_unlock(&local->sdp_lock); return LLCP_SAP_MAX; } set_bit(local_ssap, &local->local_sap); mutex_unlock(&local->sdp_lock); return local_ssap + LLCP_LOCAL_SAP_OFFSET; } void nfc_llcp_put_ssap(struct nfc_llcp_local *local, u8 ssap) { u8 local_ssap; unsigned long *sdp; if (ssap < LLCP_WKS_NUM_SAP) { local_ssap = ssap; sdp = &local->local_wks; } else if (ssap < LLCP_LOCAL_NUM_SAP) { atomic_t *client_cnt; local_ssap = ssap - LLCP_WKS_NUM_SAP; sdp = &local->local_sdp; client_cnt = &local->local_sdp_cnt[local_ssap]; pr_debug("%d clients\n", atomic_read(client_cnt)); mutex_lock(&local->sdp_lock); if (atomic_dec_and_test(client_cnt)) { struct nfc_llcp_sock *l_sock; pr_debug("No more clients for SAP %d\n", ssap); clear_bit(local_ssap, sdp); /* Find the listening sock and set it back to UNBOUND */ l_sock = nfc_llcp_sock_get(local, ssap, LLCP_SAP_SDP); if (l_sock) { l_sock->ssap = LLCP_SDP_UNBOUND; nfc_llcp_sock_put(l_sock); } } mutex_unlock(&local->sdp_lock); return; } else if (ssap < LLCP_MAX_SAP) { local_ssap = ssap - LLCP_LOCAL_NUM_SAP; sdp = &local->local_sap; } else { return; } mutex_lock(&local->sdp_lock); clear_bit(local_ssap, sdp); mutex_unlock(&local->sdp_lock); } static u8 nfc_llcp_reserve_sdp_ssap(struct nfc_llcp_local *local) { u8 ssap; mutex_lock(&local->sdp_lock); ssap = find_first_zero_bit(&local->local_sdp, LLCP_SDP_NUM_SAP); if (ssap == LLCP_SDP_NUM_SAP) { mutex_unlock(&local->sdp_lock); return LLCP_SAP_MAX; } pr_debug("SDP ssap %d\n", LLCP_WKS_NUM_SAP + ssap); set_bit(ssap, &local->local_sdp); mutex_unlock(&local->sdp_lock); return LLCP_WKS_NUM_SAP + ssap; } static int nfc_llcp_build_gb(struct nfc_llcp_local *local) { u8 *gb_cur, version, version_length; u8 lto_length, wks_length, miux_length; const u8 *version_tlv = NULL, *lto_tlv = NULL, *wks_tlv = NULL, *miux_tlv = NULL; __be16 wks = cpu_to_be16(local->local_wks); u8 gb_len = 0; int ret = 0; version = LLCP_VERSION_11; version_tlv = nfc_llcp_build_tlv(LLCP_TLV_VERSION, &version, 1, &version_length); if (!version_tlv) { ret = -ENOMEM; goto out; } gb_len += version_length; lto_tlv = nfc_llcp_build_tlv(LLCP_TLV_LTO, &local->lto, 1, &lto_length); if (!lto_tlv) { ret = -ENOMEM; goto out; } gb_len += lto_length; pr_debug("Local wks 0x%lx\n", local->local_wks); wks_tlv = nfc_llcp_build_tlv(LLCP_TLV_WKS, (u8 *)&wks, 2, &wks_length); if (!wks_tlv) { ret = -ENOMEM; goto out; } gb_len += wks_length; miux_tlv = nfc_llcp_build_tlv(LLCP_TLV_MIUX, (u8 *)&local->miux, 0, &miux_length); if (!miux_tlv) { ret = -ENOMEM; goto out; } gb_len += miux_length; gb_len += ARRAY_SIZE(llcp_magic); if (gb_len > NFC_MAX_GT_LEN) { ret = -EINVAL; goto out; } gb_cur = local->gb; memcpy(gb_cur, llcp_magic, ARRAY_SIZE(llcp_magic)); gb_cur += ARRAY_SIZE(llcp_magic); memcpy(gb_cur, version_tlv, version_length); gb_cur += version_length; memcpy(gb_cur, lto_tlv, lto_length); gb_cur += lto_length; memcpy(gb_cur, wks_tlv, wks_length); gb_cur += wks_length; memcpy(gb_cur, miux_tlv, miux_length); gb_cur += miux_length; local->gb_len = gb_len; out: kfree(version_tlv); kfree(lto_tlv); kfree(wks_tlv); kfree(miux_tlv); return ret; } u8 *nfc_llcp_general_bytes(struct nfc_dev *dev, size_t *general_bytes_len) { struct nfc_llcp_local *local; local = nfc_llcp_find_local(dev); if (local == NULL) { *general_bytes_len = 0; return NULL; } nfc_llcp_build_gb(local); *general_bytes_len = local->gb_len; nfc_llcp_local_put(local); return local->gb; } int nfc_llcp_set_remote_gb(struct nfc_dev *dev, const u8 *gb, u8 gb_len) { struct nfc_llcp_local *local; int err; if (gb_len < 3 || gb_len > NFC_MAX_GT_LEN) return -EINVAL; local = nfc_llcp_find_local(dev); if (local == NULL) { pr_err("No LLCP device\n"); return -ENODEV; } memset(local->remote_gb, 0, NFC_MAX_GT_LEN); memcpy(local->remote_gb, gb, gb_len); local->remote_gb_len = gb_len; if (memcmp(local->remote_gb, llcp_magic, 3)) { pr_err("MAC does not support LLCP\n"); err = -EINVAL; goto out; } err = nfc_llcp_parse_gb_tlv(local, &local->remote_gb[3], local->remote_gb_len - 3); out: nfc_llcp_local_put(local); return err; } static u8 nfc_llcp_dsap(const struct sk_buff *pdu) { return (pdu->data[0] & 0xfc) >> 2; } static u8 nfc_llcp_ptype(const struct sk_buff *pdu) { return ((pdu->data[0] & 0x03) << 2) | ((pdu->data[1] & 0xc0) >> 6); } static u8 nfc_llcp_ssap(const struct sk_buff *pdu) { return pdu->data[1] & 0x3f; } static u8 nfc_llcp_ns(const struct sk_buff *pdu) { return pdu->data[2] >> 4; } static u8 nfc_llcp_nr(const struct sk_buff *pdu) { return pdu->data[2] & 0xf; } static void nfc_llcp_set_nrns(struct nfc_llcp_sock *sock, struct sk_buff *pdu) { pdu->data[2] = (sock->send_n << 4) | (sock->recv_n); sock->send_n = (sock->send_n + 1) % 16; sock->recv_ack_n = (sock->recv_n - 1) % 16; } void nfc_llcp_send_to_raw_sock(struct nfc_llcp_local *local, struct sk_buff *skb, u8 direction) { struct sk_buff *skb_copy = NULL, *nskb; struct sock *sk; u8 *data; read_lock(&local->raw_sockets.lock); sk_for_each(sk, &local->raw_sockets.head) { if (sk->sk_state != LLCP_BOUND) continue; if (skb_copy == NULL) { skb_copy = __pskb_copy_fclone(skb, NFC_RAW_HEADER_SIZE, GFP_ATOMIC, true); if (skb_copy == NULL) continue; data = skb_push(skb_copy, NFC_RAW_HEADER_SIZE); data[0] = local->dev ? local->dev->idx : 0xFF; data[1] = direction & 0x01; data[1] |= (RAW_PAYLOAD_LLCP << 1); } nskb = skb_clone(skb_copy, GFP_ATOMIC); if (!nskb) continue; if (sock_queue_rcv_skb(sk, nskb)) kfree_skb(nskb); } read_unlock(&local->raw_sockets.lock); kfree_skb(skb_copy); } static void nfc_llcp_tx_work(struct work_struct *work) { struct nfc_llcp_local *local = container_of(work, struct nfc_llcp_local, tx_work); struct sk_buff *skb; struct sock *sk; struct nfc_llcp_sock *llcp_sock; skb = skb_dequeue(&local->tx_queue); if (skb != NULL) { sk = skb->sk; llcp_sock = nfc_llcp_sock(sk); if (llcp_sock == NULL && nfc_llcp_ptype(skb) == LLCP_PDU_I) { kfree_skb(skb); nfc_llcp_send_symm(local->dev); } else if (llcp_sock && !llcp_sock->remote_ready) { skb_queue_head(&local->tx_queue, skb); nfc_llcp_send_symm(local->dev); } else { struct sk_buff *copy_skb = NULL; u8 ptype = nfc_llcp_ptype(skb); int ret; pr_debug("Sending pending skb\n"); print_hex_dump_debug("LLCP Tx: ", DUMP_PREFIX_OFFSET, 16, 1, skb->data, skb->len, true); if (ptype == LLCP_PDU_I) copy_skb = skb_copy(skb, GFP_ATOMIC); __net_timestamp(skb); nfc_llcp_send_to_raw_sock(local, skb, NFC_DIRECTION_TX); ret = nfc_data_exchange(local->dev, local->target_idx, skb, nfc_llcp_recv, local); if (ret) { kfree_skb(copy_skb); goto out; } if (ptype == LLCP_PDU_I && copy_skb) skb_queue_tail(&llcp_sock->tx_pending_queue, copy_skb); } } else { nfc_llcp_send_symm(local->dev); } out: mod_timer(&local->link_timer, jiffies + msecs_to_jiffies(2 * local->remote_lto)); } static struct nfc_llcp_sock *nfc_llcp_connecting_sock_get(struct nfc_llcp_local *local, u8 ssap) { struct sock *sk; struct nfc_llcp_sock *llcp_sock; read_lock(&local->connecting_sockets.lock); sk_for_each(sk, &local->connecting_sockets.head) { llcp_sock = nfc_llcp_sock(sk); if (llcp_sock->ssap == ssap) { sock_hold(&llcp_sock->sk); goto out; } } llcp_sock = NULL; out: read_unlock(&local->connecting_sockets.lock); return llcp_sock; } static struct nfc_llcp_sock *nfc_llcp_sock_get_sn(struct nfc_llcp_local *local, const u8 *sn, size_t sn_len) { struct nfc_llcp_sock *llcp_sock; llcp_sock = nfc_llcp_sock_from_sn(local, sn, sn_len); if (llcp_sock == NULL) return NULL; sock_hold(&llcp_sock->sk); return llcp_sock; } static const u8 *nfc_llcp_connect_sn(const struct sk_buff *skb, size_t *sn_len) { u8 type, length; const u8 *tlv = &skb->data[2]; size_t tlv_array_len = skb->len - LLCP_HEADER_SIZE, offset = 0; while (offset < tlv_array_len) { type = tlv[0]; length = tlv[1]; pr_debug("type 0x%x length %d\n", type, length); if (type == LLCP_TLV_SN) { *sn_len = length; return &tlv[2]; } offset += length + 2; tlv += length + 2; } return NULL; } static void nfc_llcp_recv_ui(struct nfc_llcp_local *local, struct sk_buff *skb) { struct nfc_llcp_sock *llcp_sock; struct nfc_llcp_ui_cb *ui_cb; u8 dsap, ssap; dsap = nfc_llcp_dsap(skb); ssap = nfc_llcp_ssap(skb); ui_cb = nfc_llcp_ui_skb_cb(skb); ui_cb->dsap = dsap; ui_cb->ssap = ssap; pr_debug("%d %d\n", dsap, ssap); /* We're looking for a bound socket, not a client one */ llcp_sock = nfc_llcp_sock_get(local, dsap, LLCP_SAP_SDP); if (llcp_sock == NULL || llcp_sock->sk.sk_type != SOCK_DGRAM) return; /* There is no sequence with UI frames */ skb_pull(skb, LLCP_HEADER_SIZE); if (!sock_queue_rcv_skb(&llcp_sock->sk, skb)) { /* * UI frames will be freed from the socket layer, so we * need to keep them alive until someone receives them. */ skb_get(skb); } else { pr_err("Receive queue is full\n"); } nfc_llcp_sock_put(llcp_sock); } static void nfc_llcp_recv_connect(struct nfc_llcp_local *local, const struct sk_buff *skb) { struct sock *new_sk, *parent; struct nfc_llcp_sock *sock, *new_sock; u8 dsap, ssap, reason; dsap = nfc_llcp_dsap(skb); ssap = nfc_llcp_ssap(skb); pr_debug("%d %d\n", dsap, ssap); if (dsap != LLCP_SAP_SDP) { sock = nfc_llcp_sock_get(local, dsap, LLCP_SAP_SDP); if (sock == NULL || sock->sk.sk_state != LLCP_LISTEN) { reason = LLCP_DM_NOBOUND; goto fail; } } else { const u8 *sn; size_t sn_len; sn = nfc_llcp_connect_sn(skb, &sn_len); if (sn == NULL) { reason = LLCP_DM_NOBOUND; goto fail; } pr_debug("Service name length %zu\n", sn_len); sock = nfc_llcp_sock_get_sn(local, sn, sn_len); if (sock == NULL) { reason = LLCP_DM_NOBOUND; goto fail; } } lock_sock(&sock->sk); parent = &sock->sk; if (sk_acceptq_is_full(parent)) { reason = LLCP_DM_REJ; release_sock(&sock->sk); sock_put(&sock->sk); goto fail; } if (sock->ssap == LLCP_SDP_UNBOUND) { u8 ssap = nfc_llcp_reserve_sdp_ssap(local); pr_debug("First client, reserving %d\n", ssap); if (ssap == LLCP_SAP_MAX) { reason = LLCP_DM_REJ; release_sock(&sock->sk); sock_put(&sock->sk); goto fail; } sock->ssap = ssap; } new_sk = nfc_llcp_sock_alloc(NULL, parent->sk_type, GFP_ATOMIC, 0); if (new_sk == NULL) { reason = LLCP_DM_REJ; release_sock(&sock->sk); sock_put(&sock->sk); goto fail; } new_sock = nfc_llcp_sock(new_sk); new_sock->dev = local->dev; new_sock->local = nfc_llcp_local_get(local); new_sock->rw = sock->rw; new_sock->miux = sock->miux; new_sock->nfc_protocol = sock->nfc_protocol; new_sock->dsap = ssap; new_sock->target_idx = local->target_idx; new_sock->parent = parent; new_sock->ssap = sock->ssap; if (sock->ssap < LLCP_LOCAL_NUM_SAP && sock->ssap >= LLCP_WKS_NUM_SAP) { atomic_t *client_count; pr_debug("reserved_ssap %d for %p\n", sock->ssap, new_sock); client_count = &local->local_sdp_cnt[sock->ssap - LLCP_WKS_NUM_SAP]; atomic_inc(client_count); new_sock->reserved_ssap = sock->ssap; } nfc_llcp_parse_connection_tlv(new_sock, &skb->data[LLCP_HEADER_SIZE], skb->len - LLCP_HEADER_SIZE); pr_debug("new sock %p sk %p\n", new_sock, &new_sock->sk); nfc_llcp_sock_link(&local->sockets, new_sk); nfc_llcp_accept_enqueue(&sock->sk, new_sk); nfc_get_device(local->dev->idx); new_sk->sk_state = LLCP_CONNECTED; /* Wake the listening processes */ parent->sk_data_ready(parent); /* Send CC */ nfc_llcp_send_cc(new_sock); release_sock(&sock->sk); sock_put(&sock->sk); return; fail: /* Send DM */ nfc_llcp_send_dm(local, dsap, ssap, reason); } int nfc_llcp_queue_i_frames(struct nfc_llcp_sock *sock) { int nr_frames = 0; struct nfc_llcp_local *local = sock->local; pr_debug("Remote ready %d tx queue len %d remote rw %d", sock->remote_ready, skb_queue_len(&sock->tx_pending_queue), sock->remote_rw); /* Try to queue some I frames for transmission */ while (sock->remote_ready && skb_queue_len(&sock->tx_pending_queue) < sock->remote_rw) { struct sk_buff *pdu; pdu = skb_dequeue(&sock->tx_queue); if (pdu == NULL) break; /* Update N(S)/N(R) */ nfc_llcp_set_nrns(sock, pdu); skb_queue_tail(&local->tx_queue, pdu); nr_frames++; } return nr_frames; } static void nfc_llcp_recv_hdlc(struct nfc_llcp_local *local, struct sk_buff *skb) { struct nfc_llcp_sock *llcp_sock; struct sock *sk; u8 dsap, ssap, ptype, ns, nr; ptype = nfc_llcp_ptype(skb); dsap = nfc_llcp_dsap(skb); ssap = nfc_llcp_ssap(skb); ns = nfc_llcp_ns(skb); nr = nfc_llcp_nr(skb); pr_debug("%d %d R %d S %d\n", dsap, ssap, nr, ns); llcp_sock = nfc_llcp_sock_get(local, dsap, ssap); if (llcp_sock == NULL) { nfc_llcp_send_dm(local, dsap, ssap, LLCP_DM_NOCONN); return; } sk = &llcp_sock->sk; lock_sock(sk); if (sk->sk_state == LLCP_CLOSED) { release_sock(sk); nfc_llcp_sock_put(llcp_sock); } /* Pass the payload upstream */ if (ptype == LLCP_PDU_I) { pr_debug("I frame, queueing on %p\n", &llcp_sock->sk); if (ns == llcp_sock->recv_n) llcp_sock->recv_n = (llcp_sock->recv_n + 1) % 16; else pr_err("Received out of sequence I PDU\n"); skb_pull(skb, LLCP_HEADER_SIZE + LLCP_SEQUENCE_SIZE); if (!sock_queue_rcv_skb(&llcp_sock->sk, skb)) { /* * I frames will be freed from the socket layer, so we * need to keep them alive until someone receives them. */ skb_get(skb); } else { pr_err("Receive queue is full\n"); } } /* Remove skbs from the pending queue */ if (llcp_sock->send_ack_n != nr) { struct sk_buff *s, *tmp; u8 n; llcp_sock->send_ack_n = nr; /* Remove and free all skbs until ns == nr */ skb_queue_walk_safe(&llcp_sock->tx_pending_queue, s, tmp) { n = nfc_llcp_ns(s); skb_unlink(s, &llcp_sock->tx_pending_queue); kfree_skb(s); if (n == nr) break; } /* Re-queue the remaining skbs for transmission */ skb_queue_reverse_walk_safe(&llcp_sock->tx_pending_queue, s, tmp) { skb_unlink(s, &llcp_sock->tx_pending_queue); skb_queue_head(&local->tx_queue, s); } } if (ptype == LLCP_PDU_RR) llcp_sock->remote_ready = true; else if (ptype == LLCP_PDU_RNR) llcp_sock->remote_ready = false; if (nfc_llcp_queue_i_frames(llcp_sock) == 0 && ptype == LLCP_PDU_I) nfc_llcp_send_rr(llcp_sock); release_sock(sk); nfc_llcp_sock_put(llcp_sock); } static void nfc_llcp_recv_disc(struct nfc_llcp_local *local, const struct sk_buff *skb) { struct nfc_llcp_sock *llcp_sock; struct sock *sk; u8 dsap, ssap; dsap = nfc_llcp_dsap(skb); ssap = nfc_llcp_ssap(skb); if ((dsap == 0) && (ssap == 0)) { pr_debug("Connection termination"); nfc_dep_link_down(local->dev); return; } llcp_sock = nfc_llcp_sock_get(local, dsap, ssap); if (llcp_sock == NULL) { nfc_llcp_send_dm(local, dsap, ssap, LLCP_DM_NOCONN); return; } sk = &llcp_sock->sk; lock_sock(sk); nfc_llcp_socket_purge(llcp_sock); if (sk->sk_state == LLCP_CLOSED) { release_sock(sk); nfc_llcp_sock_put(llcp_sock); } if (sk->sk_state == LLCP_CONNECTED) { nfc_put_device(local->dev); sk->sk_state = LLCP_CLOSED; sk->sk_state_change(sk); } nfc_llcp_send_dm(local, dsap, ssap, LLCP_DM_DISC); release_sock(sk); nfc_llcp_sock_put(llcp_sock); } static void nfc_llcp_recv_cc(struct nfc_llcp_local *local, const struct sk_buff *skb) { struct nfc_llcp_sock *llcp_sock; struct sock *sk; u8 dsap, ssap; dsap = nfc_llcp_dsap(skb); ssap = nfc_llcp_ssap(skb); llcp_sock = nfc_llcp_connecting_sock_get(local, dsap); if (llcp_sock == NULL) { pr_err("Invalid CC\n"); nfc_llcp_send_dm(local, dsap, ssap, LLCP_DM_NOCONN); return; } sk = &llcp_sock->sk; /* Unlink from connecting and link to the client array */ nfc_llcp_sock_unlink(&local->connecting_sockets, sk); nfc_llcp_sock_link(&local->sockets, sk); llcp_sock->dsap = ssap; nfc_llcp_parse_connection_tlv(llcp_sock, &skb->data[LLCP_HEADER_SIZE], skb->len - LLCP_HEADER_SIZE); sk->sk_state = LLCP_CONNECTED; sk->sk_state_change(sk); nfc_llcp_sock_put(llcp_sock); } static void nfc_llcp_recv_dm(struct nfc_llcp_local *local, const struct sk_buff *skb) { struct nfc_llcp_sock *llcp_sock; struct sock *sk; u8 dsap, ssap, reason; dsap = nfc_llcp_dsap(skb); ssap = nfc_llcp_ssap(skb); reason = skb->data[2]; pr_debug("%d %d reason %d\n", ssap, dsap, reason); switch (reason) { case LLCP_DM_NOBOUND: case LLCP_DM_REJ: llcp_sock = nfc_llcp_connecting_sock_get(local, dsap); break; default: llcp_sock = nfc_llcp_sock_get(local, dsap, ssap); break; } if (llcp_sock == NULL) { pr_debug("Already closed\n"); return; } sk = &llcp_sock->sk; sk->sk_err = ENXIO; sk->sk_state = LLCP_CLOSED; sk->sk_state_change(sk); nfc_llcp_sock_put(llcp_sock); } static void nfc_llcp_recv_snl(struct nfc_llcp_local *local, const struct sk_buff *skb) { struct nfc_llcp_sock *llcp_sock; u8 dsap, ssap, type, length, tid, sap; const u8 *tlv; u16 tlv_len, offset; const char *service_name; size_t service_name_len; struct nfc_llcp_sdp_tlv *sdp; HLIST_HEAD(llc_sdres_list); size_t sdres_tlvs_len; HLIST_HEAD(nl_sdres_list); dsap = nfc_llcp_dsap(skb); ssap = nfc_llcp_ssap(skb); pr_debug("%d %d\n", dsap, ssap); if (dsap != LLCP_SAP_SDP || ssap != LLCP_SAP_SDP) { pr_err("Wrong SNL SAP\n"); return; } tlv = &skb->data[LLCP_HEADER_SIZE]; tlv_len = skb->len - LLCP_HEADER_SIZE; offset = 0; sdres_tlvs_len = 0; while (offset < tlv_len) { type = tlv[0]; length = tlv[1]; switch (type) { case LLCP_TLV_SDREQ: tid = tlv[2]; service_name = (char *) &tlv[3]; service_name_len = length - 1; pr_debug("Looking for %.16s\n", service_name); if (service_name_len == strlen("urn:nfc:sn:sdp") && !strncmp(service_name, "urn:nfc:sn:sdp", service_name_len)) { sap = 1; goto add_snl; } llcp_sock = nfc_llcp_sock_from_sn(local, service_name, service_name_len); if (!llcp_sock) { sap = 0; goto add_snl; } /* * We found a socket but its ssap has not been reserved * yet. We need to assign it for good and send a reply. * The ssap will be freed when the socket is closed. */ if (llcp_sock->ssap == LLCP_SDP_UNBOUND) { atomic_t *client_count; sap = nfc_llcp_reserve_sdp_ssap(local); pr_debug("Reserving %d\n", sap); if (sap == LLCP_SAP_MAX) { sap = 0; goto add_snl; } client_count = &local->local_sdp_cnt[sap - LLCP_WKS_NUM_SAP]; atomic_inc(client_count); llcp_sock->ssap = sap; llcp_sock->reserved_ssap = sap; } else { sap = llcp_sock->ssap; } pr_debug("%p %d\n", llcp_sock, sap); add_snl: sdp = nfc_llcp_build_sdres_tlv(tid, sap); if (sdp == NULL) goto exit; sdres_tlvs_len += sdp->tlv_len; hlist_add_head(&sdp->node, &llc_sdres_list); break; case LLCP_TLV_SDRES: mutex_lock(&local->sdreq_lock); pr_debug("LLCP_TLV_SDRES: searching tid %d\n", tlv[2]); hlist_for_each_entry(sdp, &local->pending_sdreqs, node) { if (sdp->tid != tlv[2]) continue; sdp->sap = tlv[3]; pr_debug("Found: uri=%s, sap=%d\n", sdp->uri, sdp->sap); hlist_del(&sdp->node); hlist_add_head(&sdp->node, &nl_sdres_list); break; } mutex_unlock(&local->sdreq_lock); break; default: pr_err("Invalid SNL tlv value 0x%x\n", type); break; } offset += length + 2; tlv += length + 2; } exit: if (!hlist_empty(&nl_sdres_list)) nfc_genl_llc_send_sdres(local->dev, &nl_sdres_list); if (!hlist_empty(&llc_sdres_list)) nfc_llcp_send_snl_sdres(local, &llc_sdres_list, sdres_tlvs_len); } static void nfc_llcp_recv_agf(struct nfc_llcp_local *local, struct sk_buff *skb) { u8 ptype; u16 pdu_len; struct sk_buff *new_skb; if (skb->len <= LLCP_HEADER_SIZE) { pr_err("Malformed AGF PDU\n"); return; } skb_pull(skb, LLCP_HEADER_SIZE); while (skb->len > LLCP_AGF_PDU_HEADER_SIZE) { pdu_len = skb->data[0] << 8 | skb->data[1]; skb_pull(skb, LLCP_AGF_PDU_HEADER_SIZE); if (pdu_len < LLCP_HEADER_SIZE || pdu_len > skb->len) { pr_err("Malformed AGF PDU\n"); return; } ptype = nfc_llcp_ptype(skb); if (ptype == LLCP_PDU_SYMM || ptype == LLCP_PDU_AGF) goto next; new_skb = nfc_alloc_recv_skb(pdu_len, GFP_KERNEL); if (new_skb == NULL) { pr_err("Could not allocate PDU\n"); return; } skb_put_data(new_skb, skb->data, pdu_len); nfc_llcp_rx_skb(local, new_skb); kfree_skb(new_skb); next: skb_pull(skb, pdu_len); } } static void nfc_llcp_rx_skb(struct nfc_llcp_local *local, struct sk_buff *skb) { u8 dsap, ssap, ptype; ptype = nfc_llcp_ptype(skb); dsap = nfc_llcp_dsap(skb); ssap = nfc_llcp_ssap(skb); pr_debug("ptype 0x%x dsap 0x%x ssap 0x%x\n", ptype, dsap, ssap); if (ptype != LLCP_PDU_SYMM) print_hex_dump_debug("LLCP Rx: ", DUMP_PREFIX_OFFSET, 16, 1, skb->data, skb->len, true); switch (ptype) { case LLCP_PDU_SYMM: pr_debug("SYMM\n"); break; case LLCP_PDU_UI: pr_debug("UI\n"); nfc_llcp_recv_ui(local, skb); break; case LLCP_PDU_CONNECT: pr_debug("CONNECT\n"); nfc_llcp_recv_connect(local, skb); break; case LLCP_PDU_DISC: pr_debug("DISC\n"); nfc_llcp_recv_disc(local, skb); break; case LLCP_PDU_CC: pr_debug("CC\n"); nfc_llcp_recv_cc(local, skb); break; case LLCP_PDU_DM: pr_debug("DM\n"); nfc_llcp_recv_dm(local, skb); break; case LLCP_PDU_SNL: pr_debug("SNL\n"); nfc_llcp_recv_snl(local, skb); break; case LLCP_PDU_I: case LLCP_PDU_RR: case LLCP_PDU_RNR: pr_debug("I frame\n"); nfc_llcp_recv_hdlc(local, skb); break; case LLCP_PDU_AGF: pr_debug("AGF frame\n"); nfc_llcp_recv_agf(local, skb); break; } } static void nfc_llcp_rx_work(struct work_struct *work) { struct nfc_llcp_local *local = container_of(work, struct nfc_llcp_local, rx_work); struct sk_buff *skb; skb = local->rx_pending; if (skb == NULL) { pr_debug("No pending SKB\n"); return; } __net_timestamp(skb); nfc_llcp_send_to_raw_sock(local, skb, NFC_DIRECTION_RX); nfc_llcp_rx_skb(local, skb); schedule_work(&local->tx_work); kfree_skb(local->rx_pending); local->rx_pending = NULL; } static void __nfc_llcp_recv(struct nfc_llcp_local *local, struct sk_buff *skb) { local->rx_pending = skb; del_timer(&local->link_timer); schedule_work(&local->rx_work); } void nfc_llcp_recv(void *data, struct sk_buff *skb, int err) { struct nfc_llcp_local *local = (struct nfc_llcp_local *) data; if (err < 0) { pr_err("LLCP PDU receive err %d\n", err); return; } __nfc_llcp_recv(local, skb); } int nfc_llcp_data_received(struct nfc_dev *dev, struct sk_buff *skb) { struct nfc_llcp_local *local; local = nfc_llcp_find_local(dev); if (local == NULL) { kfree_skb(skb); return -ENODEV; } __nfc_llcp_recv(local, skb); nfc_llcp_local_put(local); return 0; } void nfc_llcp_mac_is_down(struct nfc_dev *dev) { struct nfc_llcp_local *local; local = nfc_llcp_find_local(dev); if (local == NULL) return; local->remote_miu = LLCP_DEFAULT_MIU; local->remote_lto = LLCP_DEFAULT_LTO; /* Close and purge all existing sockets */ nfc_llcp_socket_release(local, true, 0); nfc_llcp_local_put(local); } void nfc_llcp_mac_is_up(struct nfc_dev *dev, u32 target_idx, u8 comm_mode, u8 rf_mode) { struct nfc_llcp_local *local; pr_debug("rf mode %d\n", rf_mode); local = nfc_llcp_find_local(dev); if (local == NULL) return; local->target_idx = target_idx; local->comm_mode = comm_mode; local->rf_mode = rf_mode; if (rf_mode == NFC_RF_INITIATOR) { pr_debug("Queueing Tx work\n"); schedule_work(&local->tx_work); } else { mod_timer(&local->link_timer, jiffies + msecs_to_jiffies(local->remote_lto)); } nfc_llcp_local_put(local); } int nfc_llcp_register_device(struct nfc_dev *ndev) { struct nfc_llcp_local *local; local = kzalloc(sizeof(struct nfc_llcp_local), GFP_KERNEL); if (local == NULL) return -ENOMEM; local->dev = ndev; INIT_LIST_HEAD(&local->list); kref_init(&local->ref); mutex_init(&local->sdp_lock); timer_setup(&local->link_timer, nfc_llcp_symm_timer, 0); skb_queue_head_init(&local->tx_queue); INIT_WORK(&local->tx_work, nfc_llcp_tx_work); local->rx_pending = NULL; INIT_WORK(&local->rx_work, nfc_llcp_rx_work); INIT_WORK(&local->timeout_work, nfc_llcp_timeout_work); rwlock_init(&local->sockets.lock); rwlock_init(&local->connecting_sockets.lock); rwlock_init(&local->raw_sockets.lock); local->lto = 150; /* 1500 ms */ local->rw = LLCP_MAX_RW; local->miux = cpu_to_be16(LLCP_MAX_MIUX); local->local_wks = 0x1; /* LLC Link Management */ nfc_llcp_build_gb(local); local->remote_miu = LLCP_DEFAULT_MIU; local->remote_lto = LLCP_DEFAULT_LTO; mutex_init(&local->sdreq_lock); INIT_HLIST_HEAD(&local->pending_sdreqs); timer_setup(&local->sdreq_timer, nfc_llcp_sdreq_timer, 0); INIT_WORK(&local->sdreq_timeout_work, nfc_llcp_sdreq_timeout_work); list_add(&local->list, &llcp_devices); return 0; } void nfc_llcp_unregister_device(struct nfc_dev *dev) { struct nfc_llcp_local *local = nfc_llcp_remove_local(dev); if (local == NULL) { pr_debug("No such device\n"); return; } local_cleanup(local); nfc_llcp_local_put(local); } int __init nfc_llcp_init(void) { return nfc_llcp_sock_init(); } void nfc_llcp_exit(void) { nfc_llcp_sock_exit(); }
linux-master
net/nfc/llcp_core.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (C) 2011 Intel Corporation. All rights reserved. */ #define pr_fmt(fmt) "llcp: %s: " fmt, __func__ #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/nfc.h> #include <linux/sched/signal.h> #include "nfc.h" #include "llcp.h" static int sock_wait_state(struct sock *sk, int state, unsigned long timeo) { DECLARE_WAITQUEUE(wait, current); int err = 0; pr_debug("sk %p", sk); add_wait_queue(sk_sleep(sk), &wait); set_current_state(TASK_INTERRUPTIBLE); while (sk->sk_state != state) { if (!timeo) { err = -EINPROGRESS; break; } if (signal_pending(current)) { err = sock_intr_errno(timeo); break; } release_sock(sk); timeo = schedule_timeout(timeo); lock_sock(sk); set_current_state(TASK_INTERRUPTIBLE); err = sock_error(sk); if (err) break; } __set_current_state(TASK_RUNNING); remove_wait_queue(sk_sleep(sk), &wait); return err; } static struct proto llcp_sock_proto = { .name = "NFC_LLCP", .owner = THIS_MODULE, .obj_size = sizeof(struct nfc_llcp_sock), }; static int llcp_sock_bind(struct socket *sock, struct sockaddr *addr, int alen) { struct sock *sk = sock->sk; struct nfc_llcp_sock *llcp_sock = nfc_llcp_sock(sk); struct nfc_llcp_local *local; struct nfc_dev *dev; struct sockaddr_nfc_llcp llcp_addr; int len, ret = 0; if (!addr || alen < offsetofend(struct sockaddr, sa_family) || addr->sa_family != AF_NFC) return -EINVAL; pr_debug("sk %p addr %p family %d\n", sk, addr, addr->sa_family); memset(&llcp_addr, 0, sizeof(llcp_addr)); len = min_t(unsigned int, sizeof(llcp_addr), alen); memcpy(&llcp_addr, addr, len); /* This is going to be a listening socket, dsap must be 0 */ if (llcp_addr.dsap != 0) return -EINVAL; lock_sock(sk); if (sk->sk_state != LLCP_CLOSED) { ret = -EBADFD; goto error; } dev = nfc_get_device(llcp_addr.dev_idx); if (dev == NULL) { ret = -ENODEV; goto error; } local = nfc_llcp_find_local(dev); if (local == NULL) { ret = -ENODEV; goto put_dev; } llcp_sock->dev = dev; llcp_sock->local = local; llcp_sock->nfc_protocol = llcp_addr.nfc_protocol; llcp_sock->service_name_len = min_t(unsigned int, llcp_addr.service_name_len, NFC_LLCP_MAX_SERVICE_NAME); llcp_sock->service_name = kmemdup(llcp_addr.service_name, llcp_sock->service_name_len, GFP_KERNEL); if (!llcp_sock->service_name) { ret = -ENOMEM; goto sock_llcp_put_local; } llcp_sock->ssap = nfc_llcp_get_sdp_ssap(local, llcp_sock); if (llcp_sock->ssap == LLCP_SAP_MAX) { ret = -EADDRINUSE; goto free_service_name; } llcp_sock->reserved_ssap = llcp_sock->ssap; nfc_llcp_sock_link(&local->sockets, sk); pr_debug("Socket bound to SAP %d\n", llcp_sock->ssap); sk->sk_state = LLCP_BOUND; nfc_put_device(dev); release_sock(sk); return 0; free_service_name: kfree(llcp_sock->service_name); llcp_sock->service_name = NULL; sock_llcp_put_local: nfc_llcp_local_put(llcp_sock->local); llcp_sock->local = NULL; llcp_sock->dev = NULL; put_dev: nfc_put_device(dev); error: release_sock(sk); return ret; } static int llcp_raw_sock_bind(struct socket *sock, struct sockaddr *addr, int alen) { struct sock *sk = sock->sk; struct nfc_llcp_sock *llcp_sock = nfc_llcp_sock(sk); struct nfc_llcp_local *local; struct nfc_dev *dev; struct sockaddr_nfc_llcp llcp_addr; int len, ret = 0; if (!addr || alen < offsetofend(struct sockaddr, sa_family) || addr->sa_family != AF_NFC) return -EINVAL; pr_debug("sk %p addr %p family %d\n", sk, addr, addr->sa_family); memset(&llcp_addr, 0, sizeof(llcp_addr)); len = min_t(unsigned int, sizeof(llcp_addr), alen); memcpy(&llcp_addr, addr, len); lock_sock(sk); if (sk->sk_state != LLCP_CLOSED) { ret = -EBADFD; goto error; } dev = nfc_get_device(llcp_addr.dev_idx); if (dev == NULL) { ret = -ENODEV; goto error; } local = nfc_llcp_find_local(dev); if (local == NULL) { ret = -ENODEV; goto put_dev; } llcp_sock->dev = dev; llcp_sock->local = local; llcp_sock->nfc_protocol = llcp_addr.nfc_protocol; nfc_llcp_sock_link(&local->raw_sockets, sk); sk->sk_state = LLCP_BOUND; put_dev: nfc_put_device(dev); error: release_sock(sk); return ret; } static int llcp_sock_listen(struct socket *sock, int backlog) { struct sock *sk = sock->sk; int ret = 0; pr_debug("sk %p backlog %d\n", sk, backlog); lock_sock(sk); if ((sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM) || sk->sk_state != LLCP_BOUND) { ret = -EBADFD; goto error; } sk->sk_max_ack_backlog = backlog; sk->sk_ack_backlog = 0; pr_debug("Socket listening\n"); sk->sk_state = LLCP_LISTEN; error: release_sock(sk); return ret; } static int nfc_llcp_setsockopt(struct socket *sock, int level, int optname, sockptr_t optval, unsigned int optlen) { struct sock *sk = sock->sk; struct nfc_llcp_sock *llcp_sock = nfc_llcp_sock(sk); u32 opt; int err = 0; pr_debug("%p optname %d\n", sk, optname); if (level != SOL_NFC) return -ENOPROTOOPT; lock_sock(sk); switch (optname) { case NFC_LLCP_RW: if (sk->sk_state == LLCP_CONNECTED || sk->sk_state == LLCP_BOUND || sk->sk_state == LLCP_LISTEN) { err = -EINVAL; break; } if (copy_from_sockptr(&opt, optval, sizeof(u32))) { err = -EFAULT; break; } if (opt > LLCP_MAX_RW) { err = -EINVAL; break; } llcp_sock->rw = (u8) opt; break; case NFC_LLCP_MIUX: if (sk->sk_state == LLCP_CONNECTED || sk->sk_state == LLCP_BOUND || sk->sk_state == LLCP_LISTEN) { err = -EINVAL; break; } if (copy_from_sockptr(&opt, optval, sizeof(u32))) { err = -EFAULT; break; } if (opt > LLCP_MAX_MIUX) { err = -EINVAL; break; } llcp_sock->miux = cpu_to_be16((u16) opt); break; default: err = -ENOPROTOOPT; break; } release_sock(sk); pr_debug("%p rw %d miux %d\n", llcp_sock, llcp_sock->rw, llcp_sock->miux); return err; } static int nfc_llcp_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen) { struct nfc_llcp_local *local; struct sock *sk = sock->sk; struct nfc_llcp_sock *llcp_sock = nfc_llcp_sock(sk); int len, err = 0; u16 miux, remote_miu; u8 rw; pr_debug("%p optname %d\n", sk, optname); if (level != SOL_NFC) return -ENOPROTOOPT; if (get_user(len, optlen)) return -EFAULT; local = llcp_sock->local; if (!local) return -ENODEV; len = min_t(u32, len, sizeof(u32)); lock_sock(sk); switch (optname) { case NFC_LLCP_RW: rw = llcp_sock->rw > LLCP_MAX_RW ? local->rw : llcp_sock->rw; if (put_user(rw, (u32 __user *) optval)) err = -EFAULT; break; case NFC_LLCP_MIUX: miux = be16_to_cpu(llcp_sock->miux) > LLCP_MAX_MIUX ? be16_to_cpu(local->miux) : be16_to_cpu(llcp_sock->miux); if (put_user(miux, (u32 __user *) optval)) err = -EFAULT; break; case NFC_LLCP_REMOTE_MIU: remote_miu = llcp_sock->remote_miu > LLCP_MAX_MIU ? local->remote_miu : llcp_sock->remote_miu; if (put_user(remote_miu, (u32 __user *) optval)) err = -EFAULT; break; case NFC_LLCP_REMOTE_LTO: if (put_user(local->remote_lto / 10, (u32 __user *) optval)) err = -EFAULT; break; case NFC_LLCP_REMOTE_RW: if (put_user(llcp_sock->remote_rw, (u32 __user *) optval)) err = -EFAULT; break; default: err = -ENOPROTOOPT; break; } release_sock(sk); if (put_user(len, optlen)) return -EFAULT; return err; } void nfc_llcp_accept_unlink(struct sock *sk) { struct nfc_llcp_sock *llcp_sock = nfc_llcp_sock(sk); pr_debug("state %d\n", sk->sk_state); list_del_init(&llcp_sock->accept_queue); sk_acceptq_removed(llcp_sock->parent); llcp_sock->parent = NULL; sock_put(sk); } void nfc_llcp_accept_enqueue(struct sock *parent, struct sock *sk) { struct nfc_llcp_sock *llcp_sock = nfc_llcp_sock(sk); struct nfc_llcp_sock *llcp_sock_parent = nfc_llcp_sock(parent); /* Lock will be free from unlink */ sock_hold(sk); list_add_tail(&llcp_sock->accept_queue, &llcp_sock_parent->accept_queue); llcp_sock->parent = parent; sk_acceptq_added(parent); } struct sock *nfc_llcp_accept_dequeue(struct sock *parent, struct socket *newsock) { struct nfc_llcp_sock *lsk, *n, *llcp_parent; struct sock *sk; llcp_parent = nfc_llcp_sock(parent); list_for_each_entry_safe(lsk, n, &llcp_parent->accept_queue, accept_queue) { sk = &lsk->sk; lock_sock(sk); if (sk->sk_state == LLCP_CLOSED) { release_sock(sk); nfc_llcp_accept_unlink(sk); continue; } if (sk->sk_state == LLCP_CONNECTED || !newsock) { list_del_init(&lsk->accept_queue); sock_put(sk); if (newsock) sock_graft(sk, newsock); release_sock(sk); pr_debug("Returning sk state %d\n", sk->sk_state); sk_acceptq_removed(parent); return sk; } release_sock(sk); } return NULL; } static int llcp_sock_accept(struct socket *sock, struct socket *newsock, int flags, bool kern) { DECLARE_WAITQUEUE(wait, current); struct sock *sk = sock->sk, *new_sk; long timeo; int ret = 0; pr_debug("parent %p\n", sk); lock_sock_nested(sk, SINGLE_DEPTH_NESTING); if (sk->sk_state != LLCP_LISTEN) { ret = -EBADFD; goto error; } timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK); /* Wait for an incoming connection. */ add_wait_queue_exclusive(sk_sleep(sk), &wait); while (!(new_sk = nfc_llcp_accept_dequeue(sk, newsock))) { set_current_state(TASK_INTERRUPTIBLE); if (!timeo) { ret = -EAGAIN; break; } if (signal_pending(current)) { ret = sock_intr_errno(timeo); break; } release_sock(sk); timeo = schedule_timeout(timeo); lock_sock_nested(sk, SINGLE_DEPTH_NESTING); } __set_current_state(TASK_RUNNING); remove_wait_queue(sk_sleep(sk), &wait); if (ret) goto error; newsock->state = SS_CONNECTED; pr_debug("new socket %p\n", new_sk); error: release_sock(sk); return ret; } static int llcp_sock_getname(struct socket *sock, struct sockaddr *uaddr, int peer) { struct sock *sk = sock->sk; struct nfc_llcp_sock *llcp_sock = nfc_llcp_sock(sk); DECLARE_SOCKADDR(struct sockaddr_nfc_llcp *, llcp_addr, uaddr); if (llcp_sock == NULL || llcp_sock->dev == NULL) return -EBADFD; pr_debug("%p %d %d %d\n", sk, llcp_sock->target_idx, llcp_sock->dsap, llcp_sock->ssap); memset(llcp_addr, 0, sizeof(*llcp_addr)); lock_sock(sk); if (!llcp_sock->dev) { release_sock(sk); return -EBADFD; } llcp_addr->sa_family = AF_NFC; llcp_addr->dev_idx = llcp_sock->dev->idx; llcp_addr->target_idx = llcp_sock->target_idx; llcp_addr->nfc_protocol = llcp_sock->nfc_protocol; llcp_addr->dsap = llcp_sock->dsap; llcp_addr->ssap = llcp_sock->ssap; llcp_addr->service_name_len = llcp_sock->service_name_len; memcpy(llcp_addr->service_name, llcp_sock->service_name, llcp_addr->service_name_len); release_sock(sk); return sizeof(struct sockaddr_nfc_llcp); } static inline __poll_t llcp_accept_poll(struct sock *parent) { struct nfc_llcp_sock *llcp_sock, *parent_sock; struct sock *sk; parent_sock = nfc_llcp_sock(parent); list_for_each_entry(llcp_sock, &parent_sock->accept_queue, accept_queue) { sk = &llcp_sock->sk; if (sk->sk_state == LLCP_CONNECTED) return EPOLLIN | EPOLLRDNORM; } return 0; } static __poll_t llcp_sock_poll(struct file *file, struct socket *sock, poll_table *wait) { struct sock *sk = sock->sk; __poll_t mask = 0; pr_debug("%p\n", sk); sock_poll_wait(file, sock, wait); if (sk->sk_state == LLCP_LISTEN) return llcp_accept_poll(sk); if (sk->sk_err || !skb_queue_empty_lockless(&sk->sk_error_queue)) mask |= EPOLLERR | (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? EPOLLPRI : 0); if (!skb_queue_empty_lockless(&sk->sk_receive_queue)) mask |= EPOLLIN | EPOLLRDNORM; if (sk->sk_state == LLCP_CLOSED) mask |= EPOLLHUP; if (sk->sk_shutdown & RCV_SHUTDOWN) mask |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM; if (sk->sk_shutdown == SHUTDOWN_MASK) mask |= EPOLLHUP; if (sock_writeable(sk) && sk->sk_state == LLCP_CONNECTED) mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND; else sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk); pr_debug("mask 0x%x\n", mask); return mask; } static int llcp_sock_release(struct socket *sock) { struct sock *sk = sock->sk; struct nfc_llcp_local *local; struct nfc_llcp_sock *llcp_sock = nfc_llcp_sock(sk); int err = 0; if (!sk) return 0; pr_debug("%p\n", sk); local = llcp_sock->local; if (local == NULL) { err = -ENODEV; goto out; } lock_sock(sk); /* Send a DISC */ if (sk->sk_state == LLCP_CONNECTED) nfc_llcp_send_disconnect(llcp_sock); if (sk->sk_state == LLCP_LISTEN) { struct nfc_llcp_sock *lsk, *n; struct sock *accept_sk; list_for_each_entry_safe(lsk, n, &llcp_sock->accept_queue, accept_queue) { accept_sk = &lsk->sk; lock_sock(accept_sk); nfc_llcp_send_disconnect(lsk); nfc_llcp_accept_unlink(accept_sk); release_sock(accept_sk); } } if (sock->type == SOCK_RAW) nfc_llcp_sock_unlink(&local->raw_sockets, sk); else nfc_llcp_sock_unlink(&local->sockets, sk); if (llcp_sock->reserved_ssap < LLCP_SAP_MAX) nfc_llcp_put_ssap(llcp_sock->local, llcp_sock->ssap); release_sock(sk); out: sock_orphan(sk); sock_put(sk); return err; } static int llcp_sock_connect(struct socket *sock, struct sockaddr *_addr, int len, int flags) { struct sock *sk = sock->sk; struct nfc_llcp_sock *llcp_sock = nfc_llcp_sock(sk); struct sockaddr_nfc_llcp *addr = (struct sockaddr_nfc_llcp *)_addr; struct nfc_dev *dev; struct nfc_llcp_local *local; int ret = 0; pr_debug("sock %p sk %p flags 0x%x\n", sock, sk, flags); if (!addr || len < sizeof(*addr) || addr->sa_family != AF_NFC) return -EINVAL; if (addr->service_name_len == 0 && addr->dsap == 0) return -EINVAL; pr_debug("addr dev_idx=%u target_idx=%u protocol=%u\n", addr->dev_idx, addr->target_idx, addr->nfc_protocol); lock_sock(sk); if (sk->sk_state == LLCP_CONNECTED) { ret = -EISCONN; goto error; } if (sk->sk_state == LLCP_CONNECTING) { ret = -EINPROGRESS; goto error; } dev = nfc_get_device(addr->dev_idx); if (dev == NULL) { ret = -ENODEV; goto error; } local = nfc_llcp_find_local(dev); if (local == NULL) { ret = -ENODEV; goto put_dev; } device_lock(&dev->dev); if (dev->dep_link_up == false) { ret = -ENOLINK; device_unlock(&dev->dev); goto sock_llcp_put_local; } device_unlock(&dev->dev); if (local->rf_mode == NFC_RF_INITIATOR && addr->target_idx != local->target_idx) { ret = -ENOLINK; goto sock_llcp_put_local; } llcp_sock->dev = dev; llcp_sock->local = local; llcp_sock->ssap = nfc_llcp_get_local_ssap(local); if (llcp_sock->ssap == LLCP_SAP_MAX) { ret = -ENOMEM; goto sock_llcp_nullify; } llcp_sock->reserved_ssap = llcp_sock->ssap; if (addr->service_name_len == 0) llcp_sock->dsap = addr->dsap; else llcp_sock->dsap = LLCP_SAP_SDP; llcp_sock->nfc_protocol = addr->nfc_protocol; llcp_sock->service_name_len = min_t(unsigned int, addr->service_name_len, NFC_LLCP_MAX_SERVICE_NAME); llcp_sock->service_name = kmemdup(addr->service_name, llcp_sock->service_name_len, GFP_KERNEL); if (!llcp_sock->service_name) { ret = -ENOMEM; goto sock_llcp_release; } nfc_llcp_sock_link(&local->connecting_sockets, sk); ret = nfc_llcp_send_connect(llcp_sock); if (ret) goto sock_unlink; sk->sk_state = LLCP_CONNECTING; ret = sock_wait_state(sk, LLCP_CONNECTED, sock_sndtimeo(sk, flags & O_NONBLOCK)); if (ret && ret != -EINPROGRESS) goto sock_unlink; release_sock(sk); return ret; sock_unlink: nfc_llcp_sock_unlink(&local->connecting_sockets, sk); kfree(llcp_sock->service_name); llcp_sock->service_name = NULL; sock_llcp_release: nfc_llcp_put_ssap(local, llcp_sock->ssap); sock_llcp_nullify: llcp_sock->local = NULL; llcp_sock->dev = NULL; sock_llcp_put_local: nfc_llcp_local_put(local); put_dev: nfc_put_device(dev); error: release_sock(sk); return ret; } static int llcp_sock_sendmsg(struct socket *sock, struct msghdr *msg, size_t len) { struct sock *sk = sock->sk; struct nfc_llcp_sock *llcp_sock = nfc_llcp_sock(sk); int ret; pr_debug("sock %p sk %p", sock, sk); ret = sock_error(sk); if (ret) return ret; if (msg->msg_flags & MSG_OOB) return -EOPNOTSUPP; lock_sock(sk); if (!llcp_sock->local) { release_sock(sk); return -ENODEV; } if (sk->sk_type == SOCK_DGRAM) { DECLARE_SOCKADDR(struct sockaddr_nfc_llcp *, addr, msg->msg_name); if (msg->msg_namelen < sizeof(*addr)) { release_sock(sk); return -EINVAL; } release_sock(sk); return nfc_llcp_send_ui_frame(llcp_sock, addr->dsap, addr->ssap, msg, len); } if (sk->sk_state != LLCP_CONNECTED) { release_sock(sk); return -ENOTCONN; } release_sock(sk); return nfc_llcp_send_i_frame(llcp_sock, msg, len); } static int llcp_sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, int flags) { struct sock *sk = sock->sk; unsigned int copied, rlen; struct sk_buff *skb, *cskb; int err = 0; pr_debug("%p %zu\n", sk, len); lock_sock(sk); if (sk->sk_state == LLCP_CLOSED && skb_queue_empty(&sk->sk_receive_queue)) { release_sock(sk); return 0; } release_sock(sk); if (flags & (MSG_OOB)) return -EOPNOTSUPP; skb = skb_recv_datagram(sk, flags, &err); if (!skb) { pr_err("Recv datagram failed state %d %d %d", sk->sk_state, err, sock_error(sk)); if (sk->sk_shutdown & RCV_SHUTDOWN) return 0; return err; } rlen = skb->len; /* real length of skb */ copied = min_t(unsigned int, rlen, len); cskb = skb; if (skb_copy_datagram_msg(cskb, 0, msg, copied)) { if (!(flags & MSG_PEEK)) skb_queue_head(&sk->sk_receive_queue, skb); return -EFAULT; } sock_recv_timestamp(msg, sk, skb); if (sk->sk_type == SOCK_DGRAM && msg->msg_name) { struct nfc_llcp_ui_cb *ui_cb = nfc_llcp_ui_skb_cb(skb); DECLARE_SOCKADDR(struct sockaddr_nfc_llcp *, sockaddr, msg->msg_name); msg->msg_namelen = sizeof(struct sockaddr_nfc_llcp); pr_debug("Datagram socket %d %d\n", ui_cb->dsap, ui_cb->ssap); memset(sockaddr, 0, sizeof(*sockaddr)); sockaddr->sa_family = AF_NFC; sockaddr->nfc_protocol = NFC_PROTO_NFC_DEP; sockaddr->dsap = ui_cb->dsap; sockaddr->ssap = ui_cb->ssap; } /* Mark read part of skb as used */ if (!(flags & MSG_PEEK)) { /* SOCK_STREAM: re-queue skb if it contains unreceived data */ if (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_DGRAM || sk->sk_type == SOCK_RAW) { skb_pull(skb, copied); if (skb->len) { skb_queue_head(&sk->sk_receive_queue, skb); goto done; } } kfree_skb(skb); } /* XXX Queue backlogged skbs */ done: /* SOCK_SEQPACKET: return real length if MSG_TRUNC is set */ if (sk->sk_type == SOCK_SEQPACKET && (flags & MSG_TRUNC)) copied = rlen; return copied; } static const struct proto_ops llcp_sock_ops = { .family = PF_NFC, .owner = THIS_MODULE, .bind = llcp_sock_bind, .connect = llcp_sock_connect, .release = llcp_sock_release, .socketpair = sock_no_socketpair, .accept = llcp_sock_accept, .getname = llcp_sock_getname, .poll = llcp_sock_poll, .ioctl = sock_no_ioctl, .listen = llcp_sock_listen, .shutdown = sock_no_shutdown, .setsockopt = nfc_llcp_setsockopt, .getsockopt = nfc_llcp_getsockopt, .sendmsg = llcp_sock_sendmsg, .recvmsg = llcp_sock_recvmsg, .mmap = sock_no_mmap, }; static const struct proto_ops llcp_rawsock_ops = { .family = PF_NFC, .owner = THIS_MODULE, .bind = llcp_raw_sock_bind, .connect = sock_no_connect, .release = llcp_sock_release, .socketpair = sock_no_socketpair, .accept = sock_no_accept, .getname = llcp_sock_getname, .poll = llcp_sock_poll, .ioctl = sock_no_ioctl, .listen = sock_no_listen, .shutdown = sock_no_shutdown, .sendmsg = sock_no_sendmsg, .recvmsg = llcp_sock_recvmsg, .mmap = sock_no_mmap, }; static void llcp_sock_destruct(struct sock *sk) { struct nfc_llcp_sock *llcp_sock = nfc_llcp_sock(sk); pr_debug("%p\n", sk); if (sk->sk_state == LLCP_CONNECTED) nfc_put_device(llcp_sock->dev); skb_queue_purge(&sk->sk_receive_queue); nfc_llcp_sock_free(llcp_sock); if (!sock_flag(sk, SOCK_DEAD)) { pr_err("Freeing alive NFC LLCP socket %p\n", sk); return; } } struct sock *nfc_llcp_sock_alloc(struct socket *sock, int type, gfp_t gfp, int kern) { struct sock *sk; struct nfc_llcp_sock *llcp_sock; sk = sk_alloc(&init_net, PF_NFC, gfp, &llcp_sock_proto, kern); if (!sk) return NULL; llcp_sock = nfc_llcp_sock(sk); sock_init_data(sock, sk); sk->sk_state = LLCP_CLOSED; sk->sk_protocol = NFC_SOCKPROTO_LLCP; sk->sk_type = type; sk->sk_destruct = llcp_sock_destruct; llcp_sock->ssap = 0; llcp_sock->dsap = LLCP_SAP_SDP; llcp_sock->rw = LLCP_MAX_RW + 1; llcp_sock->miux = cpu_to_be16(LLCP_MAX_MIUX + 1); llcp_sock->send_n = llcp_sock->send_ack_n = 0; llcp_sock->recv_n = llcp_sock->recv_ack_n = 0; llcp_sock->remote_ready = 1; llcp_sock->reserved_ssap = LLCP_SAP_MAX; nfc_llcp_socket_remote_param_init(llcp_sock); skb_queue_head_init(&llcp_sock->tx_queue); skb_queue_head_init(&llcp_sock->tx_pending_queue); INIT_LIST_HEAD(&llcp_sock->accept_queue); if (sock != NULL) sock->state = SS_UNCONNECTED; return sk; } void nfc_llcp_sock_free(struct nfc_llcp_sock *sock) { kfree(sock->service_name); skb_queue_purge(&sock->tx_queue); skb_queue_purge(&sock->tx_pending_queue); list_del_init(&sock->accept_queue); sock->parent = NULL; nfc_llcp_local_put(sock->local); } static int llcp_sock_create(struct net *net, struct socket *sock, const struct nfc_protocol *nfc_proto, int kern) { struct sock *sk; pr_debug("%p\n", sock); if (sock->type != SOCK_STREAM && sock->type != SOCK_DGRAM && sock->type != SOCK_RAW) return -ESOCKTNOSUPPORT; if (sock->type == SOCK_RAW) { if (!capable(CAP_NET_RAW)) return -EPERM; sock->ops = &llcp_rawsock_ops; } else { sock->ops = &llcp_sock_ops; } sk = nfc_llcp_sock_alloc(sock, sock->type, GFP_ATOMIC, kern); if (sk == NULL) return -ENOMEM; return 0; } static const struct nfc_protocol llcp_nfc_proto = { .id = NFC_SOCKPROTO_LLCP, .proto = &llcp_sock_proto, .owner = THIS_MODULE, .create = llcp_sock_create }; int __init nfc_llcp_sock_init(void) { return nfc_proto_register(&llcp_nfc_proto); } void nfc_llcp_sock_exit(void) { nfc_proto_unregister(&llcp_nfc_proto); }
linux-master
net/nfc/llcp_sock.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (C) 2011 Intel Corporation. All rights reserved. */ #define pr_fmt(fmt) "llcp: %s: " fmt, __func__ #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/nfc.h> #include <net/nfc/nfc.h> #include "nfc.h" #include "llcp.h" static const u8 llcp_tlv_length[LLCP_TLV_MAX] = { 0, 1, /* VERSION */ 2, /* MIUX */ 2, /* WKS */ 1, /* LTO */ 1, /* RW */ 0, /* SN */ 1, /* OPT */ 0, /* SDREQ */ 2, /* SDRES */ }; static u8 llcp_tlv8(const u8 *tlv, u8 type) { if (tlv[0] != type || tlv[1] != llcp_tlv_length[tlv[0]]) return 0; return tlv[2]; } static u16 llcp_tlv16(const u8 *tlv, u8 type) { if (tlv[0] != type || tlv[1] != llcp_tlv_length[tlv[0]]) return 0; return be16_to_cpu(*((__be16 *)(tlv + 2))); } static u8 llcp_tlv_version(const u8 *tlv) { return llcp_tlv8(tlv, LLCP_TLV_VERSION); } static u16 llcp_tlv_miux(const u8 *tlv) { return llcp_tlv16(tlv, LLCP_TLV_MIUX) & 0x7ff; } static u16 llcp_tlv_wks(const u8 *tlv) { return llcp_tlv16(tlv, LLCP_TLV_WKS); } static u16 llcp_tlv_lto(const u8 *tlv) { return llcp_tlv8(tlv, LLCP_TLV_LTO); } static u8 llcp_tlv_opt(const u8 *tlv) { return llcp_tlv8(tlv, LLCP_TLV_OPT); } static u8 llcp_tlv_rw(const u8 *tlv) { return llcp_tlv8(tlv, LLCP_TLV_RW) & 0xf; } u8 *nfc_llcp_build_tlv(u8 type, const u8 *value, u8 value_length, u8 *tlv_length) { u8 *tlv, length; pr_debug("type %d\n", type); if (type >= LLCP_TLV_MAX) return NULL; length = llcp_tlv_length[type]; if (length == 0 && value_length == 0) return NULL; else if (length == 0) length = value_length; *tlv_length = 2 + length; tlv = kzalloc(2 + length, GFP_KERNEL); if (tlv == NULL) return tlv; tlv[0] = type; tlv[1] = length; memcpy(tlv + 2, value, length); return tlv; } struct nfc_llcp_sdp_tlv *nfc_llcp_build_sdres_tlv(u8 tid, u8 sap) { struct nfc_llcp_sdp_tlv *sdres; u8 value[2]; sdres = kzalloc(sizeof(struct nfc_llcp_sdp_tlv), GFP_KERNEL); if (sdres == NULL) return NULL; value[0] = tid; value[1] = sap; sdres->tlv = nfc_llcp_build_tlv(LLCP_TLV_SDRES, value, 2, &sdres->tlv_len); if (sdres->tlv == NULL) { kfree(sdres); return NULL; } sdres->tid = tid; sdres->sap = sap; INIT_HLIST_NODE(&sdres->node); return sdres; } struct nfc_llcp_sdp_tlv *nfc_llcp_build_sdreq_tlv(u8 tid, const char *uri, size_t uri_len) { struct nfc_llcp_sdp_tlv *sdreq; pr_debug("uri: %s, len: %zu\n", uri, uri_len); /* sdreq->tlv_len is u8, takes uri_len, + 3 for header, + 1 for NULL */ if (WARN_ON_ONCE(uri_len > U8_MAX - 4)) return NULL; sdreq = kzalloc(sizeof(struct nfc_llcp_sdp_tlv), GFP_KERNEL); if (sdreq == NULL) return NULL; sdreq->tlv_len = uri_len + 3; if (uri[uri_len - 1] == 0) sdreq->tlv_len--; sdreq->tlv = kzalloc(sdreq->tlv_len + 1, GFP_KERNEL); if (sdreq->tlv == NULL) { kfree(sdreq); return NULL; } sdreq->tlv[0] = LLCP_TLV_SDREQ; sdreq->tlv[1] = sdreq->tlv_len - 2; sdreq->tlv[2] = tid; sdreq->tid = tid; sdreq->uri = sdreq->tlv + 3; memcpy(sdreq->uri, uri, uri_len); sdreq->time = jiffies; INIT_HLIST_NODE(&sdreq->node); return sdreq; } void nfc_llcp_free_sdp_tlv(struct nfc_llcp_sdp_tlv *sdp) { kfree(sdp->tlv); kfree(sdp); } void nfc_llcp_free_sdp_tlv_list(struct hlist_head *head) { struct nfc_llcp_sdp_tlv *sdp; struct hlist_node *n; hlist_for_each_entry_safe(sdp, n, head, node) { hlist_del(&sdp->node); nfc_llcp_free_sdp_tlv(sdp); } } int nfc_llcp_parse_gb_tlv(struct nfc_llcp_local *local, const u8 *tlv_array, u16 tlv_array_len) { const u8 *tlv = tlv_array; u8 type, length, offset = 0; pr_debug("TLV array length %d\n", tlv_array_len); if (local == NULL) return -ENODEV; while (offset < tlv_array_len) { type = tlv[0]; length = tlv[1]; pr_debug("type 0x%x length %d\n", type, length); switch (type) { case LLCP_TLV_VERSION: local->remote_version = llcp_tlv_version(tlv); break; case LLCP_TLV_MIUX: local->remote_miu = llcp_tlv_miux(tlv) + 128; break; case LLCP_TLV_WKS: local->remote_wks = llcp_tlv_wks(tlv); break; case LLCP_TLV_LTO: local->remote_lto = llcp_tlv_lto(tlv) * 10; break; case LLCP_TLV_OPT: local->remote_opt = llcp_tlv_opt(tlv); break; default: pr_err("Invalid gt tlv value 0x%x\n", type); break; } offset += length + 2; tlv += length + 2; } pr_debug("version 0x%x miu %d lto %d opt 0x%x wks 0x%x\n", local->remote_version, local->remote_miu, local->remote_lto, local->remote_opt, local->remote_wks); return 0; } int nfc_llcp_parse_connection_tlv(struct nfc_llcp_sock *sock, const u8 *tlv_array, u16 tlv_array_len) { const u8 *tlv = tlv_array; u8 type, length, offset = 0; pr_debug("TLV array length %d\n", tlv_array_len); if (sock == NULL) return -ENOTCONN; while (offset < tlv_array_len) { type = tlv[0]; length = tlv[1]; pr_debug("type 0x%x length %d\n", type, length); switch (type) { case LLCP_TLV_MIUX: sock->remote_miu = llcp_tlv_miux(tlv) + 128; break; case LLCP_TLV_RW: sock->remote_rw = llcp_tlv_rw(tlv); break; case LLCP_TLV_SN: break; default: pr_err("Invalid gt tlv value 0x%x\n", type); break; } offset += length + 2; tlv += length + 2; } pr_debug("sock %p rw %d miu %d\n", sock, sock->remote_rw, sock->remote_miu); return 0; } static struct sk_buff *llcp_add_header(struct sk_buff *pdu, u8 dsap, u8 ssap, u8 ptype) { u8 header[2]; pr_debug("ptype 0x%x dsap 0x%x ssap 0x%x\n", ptype, dsap, ssap); header[0] = (u8)((dsap << 2) | (ptype >> 2)); header[1] = (u8)((ptype << 6) | ssap); pr_debug("header 0x%x 0x%x\n", header[0], header[1]); skb_put_data(pdu, header, LLCP_HEADER_SIZE); return pdu; } static struct sk_buff *llcp_add_tlv(struct sk_buff *pdu, const u8 *tlv, u8 tlv_length) { /* XXX Add an skb length check */ if (tlv == NULL) return NULL; skb_put_data(pdu, tlv, tlv_length); return pdu; } static struct sk_buff *llcp_allocate_pdu(struct nfc_llcp_sock *sock, u8 cmd, u16 size) { struct sk_buff *skb; int err; if (sock->ssap == 0) return NULL; skb = nfc_alloc_send_skb(sock->dev, &sock->sk, MSG_DONTWAIT, size + LLCP_HEADER_SIZE, &err); if (skb == NULL) { pr_err("Could not allocate PDU\n"); return NULL; } skb = llcp_add_header(skb, sock->dsap, sock->ssap, cmd); return skb; } int nfc_llcp_send_disconnect(struct nfc_llcp_sock *sock) { struct sk_buff *skb; struct nfc_dev *dev; struct nfc_llcp_local *local; local = sock->local; if (local == NULL) return -ENODEV; dev = sock->dev; if (dev == NULL) return -ENODEV; skb = llcp_allocate_pdu(sock, LLCP_PDU_DISC, 0); if (skb == NULL) return -ENOMEM; skb_queue_tail(&local->tx_queue, skb); return 0; } int nfc_llcp_send_symm(struct nfc_dev *dev) { struct sk_buff *skb; struct nfc_llcp_local *local; u16 size = 0; int err; local = nfc_llcp_find_local(dev); if (local == NULL) return -ENODEV; size += LLCP_HEADER_SIZE; size += dev->tx_headroom + dev->tx_tailroom + NFC_HEADER_SIZE; skb = alloc_skb(size, GFP_KERNEL); if (skb == NULL) { err = -ENOMEM; goto out; } skb_reserve(skb, dev->tx_headroom + NFC_HEADER_SIZE); skb = llcp_add_header(skb, 0, 0, LLCP_PDU_SYMM); __net_timestamp(skb); nfc_llcp_send_to_raw_sock(local, skb, NFC_DIRECTION_TX); err = nfc_data_exchange(dev, local->target_idx, skb, nfc_llcp_recv, local); out: nfc_llcp_local_put(local); return err; } int nfc_llcp_send_connect(struct nfc_llcp_sock *sock) { struct nfc_llcp_local *local; struct sk_buff *skb; const u8 *service_name_tlv = NULL; const u8 *miux_tlv = NULL; const u8 *rw_tlv = NULL; u8 service_name_tlv_length = 0; u8 miux_tlv_length, rw_tlv_length, rw; int err; u16 size = 0; __be16 miux; local = sock->local; if (local == NULL) return -ENODEV; if (sock->service_name != NULL) { service_name_tlv = nfc_llcp_build_tlv(LLCP_TLV_SN, sock->service_name, sock->service_name_len, &service_name_tlv_length); if (!service_name_tlv) { err = -ENOMEM; goto error_tlv; } size += service_name_tlv_length; } /* If the socket parameters are not set, use the local ones */ miux = be16_to_cpu(sock->miux) > LLCP_MAX_MIUX ? local->miux : sock->miux; rw = sock->rw > LLCP_MAX_RW ? local->rw : sock->rw; miux_tlv = nfc_llcp_build_tlv(LLCP_TLV_MIUX, (u8 *)&miux, 0, &miux_tlv_length); if (!miux_tlv) { err = -ENOMEM; goto error_tlv; } size += miux_tlv_length; rw_tlv = nfc_llcp_build_tlv(LLCP_TLV_RW, &rw, 0, &rw_tlv_length); if (!rw_tlv) { err = -ENOMEM; goto error_tlv; } size += rw_tlv_length; pr_debug("SKB size %d SN length %zu\n", size, sock->service_name_len); skb = llcp_allocate_pdu(sock, LLCP_PDU_CONNECT, size); if (skb == NULL) { err = -ENOMEM; goto error_tlv; } llcp_add_tlv(skb, service_name_tlv, service_name_tlv_length); llcp_add_tlv(skb, miux_tlv, miux_tlv_length); llcp_add_tlv(skb, rw_tlv, rw_tlv_length); skb_queue_tail(&local->tx_queue, skb); err = 0; error_tlv: if (err) pr_err("error %d\n", err); kfree(service_name_tlv); kfree(miux_tlv); kfree(rw_tlv); return err; } int nfc_llcp_send_cc(struct nfc_llcp_sock *sock) { struct nfc_llcp_local *local; struct sk_buff *skb; const u8 *miux_tlv = NULL; const u8 *rw_tlv = NULL; u8 miux_tlv_length, rw_tlv_length, rw; int err; u16 size = 0; __be16 miux; local = sock->local; if (local == NULL) return -ENODEV; /* If the socket parameters are not set, use the local ones */ miux = be16_to_cpu(sock->miux) > LLCP_MAX_MIUX ? local->miux : sock->miux; rw = sock->rw > LLCP_MAX_RW ? local->rw : sock->rw; miux_tlv = nfc_llcp_build_tlv(LLCP_TLV_MIUX, (u8 *)&miux, 0, &miux_tlv_length); if (!miux_tlv) { err = -ENOMEM; goto error_tlv; } size += miux_tlv_length; rw_tlv = nfc_llcp_build_tlv(LLCP_TLV_RW, &rw, 0, &rw_tlv_length); if (!rw_tlv) { err = -ENOMEM; goto error_tlv; } size += rw_tlv_length; skb = llcp_allocate_pdu(sock, LLCP_PDU_CC, size); if (skb == NULL) { err = -ENOMEM; goto error_tlv; } llcp_add_tlv(skb, miux_tlv, miux_tlv_length); llcp_add_tlv(skb, rw_tlv, rw_tlv_length); skb_queue_tail(&local->tx_queue, skb); err = 0; error_tlv: if (err) pr_err("error %d\n", err); kfree(miux_tlv); kfree(rw_tlv); return err; } static struct sk_buff *nfc_llcp_allocate_snl(struct nfc_llcp_local *local, size_t tlv_length) { struct sk_buff *skb; struct nfc_dev *dev; u16 size = 0; if (local == NULL) return ERR_PTR(-ENODEV); dev = local->dev; if (dev == NULL) return ERR_PTR(-ENODEV); size += LLCP_HEADER_SIZE; size += dev->tx_headroom + dev->tx_tailroom + NFC_HEADER_SIZE; size += tlv_length; skb = alloc_skb(size, GFP_KERNEL); if (skb == NULL) return ERR_PTR(-ENOMEM); skb_reserve(skb, dev->tx_headroom + NFC_HEADER_SIZE); skb = llcp_add_header(skb, LLCP_SAP_SDP, LLCP_SAP_SDP, LLCP_PDU_SNL); return skb; } int nfc_llcp_send_snl_sdres(struct nfc_llcp_local *local, struct hlist_head *tlv_list, size_t tlvs_len) { struct nfc_llcp_sdp_tlv *sdp; struct hlist_node *n; struct sk_buff *skb; skb = nfc_llcp_allocate_snl(local, tlvs_len); if (IS_ERR(skb)) return PTR_ERR(skb); hlist_for_each_entry_safe(sdp, n, tlv_list, node) { skb_put_data(skb, sdp->tlv, sdp->tlv_len); hlist_del(&sdp->node); nfc_llcp_free_sdp_tlv(sdp); } skb_queue_tail(&local->tx_queue, skb); return 0; } int nfc_llcp_send_snl_sdreq(struct nfc_llcp_local *local, struct hlist_head *tlv_list, size_t tlvs_len) { struct nfc_llcp_sdp_tlv *sdreq; struct hlist_node *n; struct sk_buff *skb; skb = nfc_llcp_allocate_snl(local, tlvs_len); if (IS_ERR(skb)) return PTR_ERR(skb); mutex_lock(&local->sdreq_lock); if (hlist_empty(&local->pending_sdreqs)) mod_timer(&local->sdreq_timer, jiffies + msecs_to_jiffies(3 * local->remote_lto)); hlist_for_each_entry_safe(sdreq, n, tlv_list, node) { pr_debug("tid %d for %s\n", sdreq->tid, sdreq->uri); skb_put_data(skb, sdreq->tlv, sdreq->tlv_len); hlist_del(&sdreq->node); hlist_add_head(&sdreq->node, &local->pending_sdreqs); } mutex_unlock(&local->sdreq_lock); skb_queue_tail(&local->tx_queue, skb); return 0; } int nfc_llcp_send_dm(struct nfc_llcp_local *local, u8 ssap, u8 dsap, u8 reason) { struct sk_buff *skb; struct nfc_dev *dev; u16 size = 1; /* Reason code */ pr_debug("Sending DM reason 0x%x\n", reason); if (local == NULL) return -ENODEV; dev = local->dev; if (dev == NULL) return -ENODEV; size += LLCP_HEADER_SIZE; size += dev->tx_headroom + dev->tx_tailroom + NFC_HEADER_SIZE; skb = alloc_skb(size, GFP_KERNEL); if (skb == NULL) return -ENOMEM; skb_reserve(skb, dev->tx_headroom + NFC_HEADER_SIZE); skb = llcp_add_header(skb, dsap, ssap, LLCP_PDU_DM); skb_put_data(skb, &reason, 1); skb_queue_head(&local->tx_queue, skb); return 0; } int nfc_llcp_send_i_frame(struct nfc_llcp_sock *sock, struct msghdr *msg, size_t len) { struct sk_buff *pdu; struct sock *sk = &sock->sk; struct nfc_llcp_local *local; size_t frag_len = 0, remaining_len; u8 *msg_data, *msg_ptr; u16 remote_miu; pr_debug("Send I frame len %zd\n", len); local = sock->local; if (local == NULL) return -ENODEV; /* Remote is ready but has not acknowledged our frames */ if((sock->remote_ready && skb_queue_len(&sock->tx_pending_queue) >= sock->remote_rw && skb_queue_len(&sock->tx_queue) >= 2 * sock->remote_rw)) { pr_err("Pending queue is full %d frames\n", skb_queue_len(&sock->tx_pending_queue)); return -ENOBUFS; } /* Remote is not ready and we've been queueing enough frames */ if ((!sock->remote_ready && skb_queue_len(&sock->tx_queue) >= 2 * sock->remote_rw)) { pr_err("Tx queue is full %d frames\n", skb_queue_len(&sock->tx_queue)); return -ENOBUFS; } msg_data = kmalloc(len, GFP_USER | __GFP_NOWARN); if (msg_data == NULL) return -ENOMEM; if (memcpy_from_msg(msg_data, msg, len)) { kfree(msg_data); return -EFAULT; } remaining_len = len; msg_ptr = msg_data; do { remote_miu = sock->remote_miu > LLCP_MAX_MIU ? LLCP_DEFAULT_MIU : sock->remote_miu; frag_len = min_t(size_t, remote_miu, remaining_len); pr_debug("Fragment %zd bytes remaining %zd", frag_len, remaining_len); pdu = llcp_allocate_pdu(sock, LLCP_PDU_I, frag_len + LLCP_SEQUENCE_SIZE); if (pdu == NULL) { kfree(msg_data); return -ENOMEM; } skb_put(pdu, LLCP_SEQUENCE_SIZE); if (likely(frag_len > 0)) skb_put_data(pdu, msg_ptr, frag_len); skb_queue_tail(&sock->tx_queue, pdu); lock_sock(sk); nfc_llcp_queue_i_frames(sock); release_sock(sk); remaining_len -= frag_len; msg_ptr += frag_len; } while (remaining_len > 0); kfree(msg_data); return len; } int nfc_llcp_send_ui_frame(struct nfc_llcp_sock *sock, u8 ssap, u8 dsap, struct msghdr *msg, size_t len) { struct sk_buff *pdu; struct nfc_llcp_local *local; size_t frag_len = 0, remaining_len; u8 *msg_ptr, *msg_data; u16 remote_miu; int err; pr_debug("Send UI frame len %zd\n", len); local = sock->local; if (local == NULL) return -ENODEV; msg_data = kmalloc(len, GFP_USER | __GFP_NOWARN); if (msg_data == NULL) return -ENOMEM; if (memcpy_from_msg(msg_data, msg, len)) { kfree(msg_data); return -EFAULT; } remaining_len = len; msg_ptr = msg_data; do { remote_miu = sock->remote_miu > LLCP_MAX_MIU ? local->remote_miu : sock->remote_miu; frag_len = min_t(size_t, remote_miu, remaining_len); pr_debug("Fragment %zd bytes remaining %zd", frag_len, remaining_len); pdu = nfc_alloc_send_skb(sock->dev, &sock->sk, 0, frag_len + LLCP_HEADER_SIZE, &err); if (pdu == NULL) { pr_err("Could not allocate PDU (error=%d)\n", err); len -= remaining_len; if (len == 0) len = err; break; } pdu = llcp_add_header(pdu, dsap, ssap, LLCP_PDU_UI); if (likely(frag_len > 0)) skb_put_data(pdu, msg_ptr, frag_len); /* No need to check for the peer RW for UI frames */ skb_queue_tail(&local->tx_queue, pdu); remaining_len -= frag_len; msg_ptr += frag_len; } while (remaining_len > 0); kfree(msg_data); return len; } int nfc_llcp_send_rr(struct nfc_llcp_sock *sock) { struct sk_buff *skb; struct nfc_llcp_local *local; pr_debug("Send rr nr %d\n", sock->recv_n); local = sock->local; if (local == NULL) return -ENODEV; skb = llcp_allocate_pdu(sock, LLCP_PDU_RR, LLCP_SEQUENCE_SIZE); if (skb == NULL) return -ENOMEM; skb_put(skb, LLCP_SEQUENCE_SIZE); skb->data[2] = sock->recv_n; skb_queue_head(&local->tx_queue, skb); return 0; }
linux-master
net/nfc/llcp_commands.c
// SPDX-License-Identifier: GPL-2.0-only /* * shdlc Link Layer Control * * Copyright (C) 2012 Intel Corporation. All rights reserved. */ #define pr_fmt(fmt) "shdlc: %s: " fmt, __func__ #include <linux/types.h> #include <linux/sched.h> #include <linux/wait.h> #include <linux/slab.h> #include <linux/skbuff.h> #include "llc.h" enum shdlc_state { SHDLC_DISCONNECTED = 0, SHDLC_CONNECTING = 1, SHDLC_NEGOTIATING = 2, SHDLC_HALF_CONNECTED = 3, SHDLC_CONNECTED = 4 }; struct llc_shdlc { struct nfc_hci_dev *hdev; xmit_to_drv_t xmit_to_drv; rcv_to_hci_t rcv_to_hci; struct mutex state_mutex; enum shdlc_state state; int hard_fault; wait_queue_head_t *connect_wq; int connect_tries; int connect_result; struct timer_list connect_timer;/* aka T3 in spec 10.6.1 */ u8 w; /* window size */ bool srej_support; struct timer_list t1_timer; /* send ack timeout */ bool t1_active; struct timer_list t2_timer; /* guard/retransmit timeout */ bool t2_active; int ns; /* next seq num for send */ int nr; /* next expected seq num for receive */ int dnr; /* oldest sent unacked seq num */ struct sk_buff_head rcv_q; struct sk_buff_head send_q; bool rnr; /* other side is not ready to receive */ struct sk_buff_head ack_pending_q; struct work_struct sm_work; int tx_headroom; int tx_tailroom; llc_failure_t llc_failure; }; #define SHDLC_LLC_HEAD_ROOM 2 #define SHDLC_MAX_WINDOW 4 #define SHDLC_SREJ_SUPPORT false #define SHDLC_CONTROL_HEAD_MASK 0xe0 #define SHDLC_CONTROL_HEAD_I 0x80 #define SHDLC_CONTROL_HEAD_I2 0xa0 #define SHDLC_CONTROL_HEAD_S 0xc0 #define SHDLC_CONTROL_HEAD_U 0xe0 #define SHDLC_CONTROL_NS_MASK 0x38 #define SHDLC_CONTROL_NR_MASK 0x07 #define SHDLC_CONTROL_TYPE_MASK 0x18 #define SHDLC_CONTROL_M_MASK 0x1f enum sframe_type { S_FRAME_RR = 0x00, S_FRAME_REJ = 0x01, S_FRAME_RNR = 0x02, S_FRAME_SREJ = 0x03 }; enum uframe_modifier { U_FRAME_UA = 0x06, U_FRAME_RSET = 0x19 }; #define SHDLC_CONNECT_VALUE_MS 5 #define SHDLC_T1_VALUE_MS(w) ((5 * w) / 4) #define SHDLC_T2_VALUE_MS 300 #define SHDLC_DUMP_SKB(info, skb) \ do { \ pr_debug("%s:\n", info); \ print_hex_dump(KERN_DEBUG, "shdlc: ", DUMP_PREFIX_OFFSET, \ 16, 1, skb->data, skb->len, 0); \ } while (0) /* checks x < y <= z modulo 8 */ static bool llc_shdlc_x_lt_y_lteq_z(int x, int y, int z) { if (x < z) return ((x < y) && (y <= z)) ? true : false; else return ((y > x) || (y <= z)) ? true : false; } /* checks x <= y < z modulo 8 */ static bool llc_shdlc_x_lteq_y_lt_z(int x, int y, int z) { if (x <= z) return ((x <= y) && (y < z)) ? true : false; else /* x > z -> z+8 > x */ return ((y >= x) || (y < z)) ? true : false; } static struct sk_buff *llc_shdlc_alloc_skb(const struct llc_shdlc *shdlc, int payload_len) { struct sk_buff *skb; skb = alloc_skb(shdlc->tx_headroom + SHDLC_LLC_HEAD_ROOM + shdlc->tx_tailroom + payload_len, GFP_KERNEL); if (skb) skb_reserve(skb, shdlc->tx_headroom + SHDLC_LLC_HEAD_ROOM); return skb; } /* immediately sends an S frame. */ static int llc_shdlc_send_s_frame(const struct llc_shdlc *shdlc, enum sframe_type sframe_type, int nr) { int r; struct sk_buff *skb; pr_debug("sframe_type=%d nr=%d\n", sframe_type, nr); skb = llc_shdlc_alloc_skb(shdlc, 0); if (skb == NULL) return -ENOMEM; *(u8 *)skb_push(skb, 1) = SHDLC_CONTROL_HEAD_S | (sframe_type << 3) | nr; r = shdlc->xmit_to_drv(shdlc->hdev, skb); kfree_skb(skb); return r; } /* immediately sends an U frame. skb may contain optional payload */ static int llc_shdlc_send_u_frame(const struct llc_shdlc *shdlc, struct sk_buff *skb, enum uframe_modifier uframe_modifier) { int r; pr_debug("uframe_modifier=%d\n", uframe_modifier); *(u8 *)skb_push(skb, 1) = SHDLC_CONTROL_HEAD_U | uframe_modifier; r = shdlc->xmit_to_drv(shdlc->hdev, skb); kfree_skb(skb); return r; } /* * Free ack_pending frames until y_nr - 1, and reset t2 according to * the remaining oldest ack_pending frame sent time */ static void llc_shdlc_reset_t2(struct llc_shdlc *shdlc, int y_nr) { struct sk_buff *skb; int dnr = shdlc->dnr; /* MUST initially be < y_nr */ pr_debug("release ack pending up to frame %d excluded\n", y_nr); while (dnr != y_nr) { pr_debug("release ack pending frame %d\n", dnr); skb = skb_dequeue(&shdlc->ack_pending_q); kfree_skb(skb); dnr = (dnr + 1) % 8; } if (skb_queue_empty(&shdlc->ack_pending_q)) { if (shdlc->t2_active) { del_timer_sync(&shdlc->t2_timer); shdlc->t2_active = false; pr_debug("All sent frames acked. Stopped T2(retransmit)\n"); } } else { skb = skb_peek(&shdlc->ack_pending_q); mod_timer(&shdlc->t2_timer, *(unsigned long *)skb->cb + msecs_to_jiffies(SHDLC_T2_VALUE_MS)); shdlc->t2_active = true; pr_debug("Start T2(retransmit) for remaining unacked sent frames\n"); } } /* * Receive validated frames from lower layer. skb contains HCI payload only. * Handle according to algorithm at spec:10.8.2 */ static void llc_shdlc_rcv_i_frame(struct llc_shdlc *shdlc, struct sk_buff *skb, int ns, int nr) { int x_ns = ns; int y_nr = nr; pr_debug("recvd I-frame %d, remote waiting frame %d\n", ns, nr); if (shdlc->state != SHDLC_CONNECTED) goto exit; if (x_ns != shdlc->nr) { llc_shdlc_send_s_frame(shdlc, S_FRAME_REJ, shdlc->nr); goto exit; } if (!shdlc->t1_active) { shdlc->t1_active = true; mod_timer(&shdlc->t1_timer, jiffies + msecs_to_jiffies(SHDLC_T1_VALUE_MS(shdlc->w))); pr_debug("(re)Start T1(send ack)\n"); } if (skb->len) { shdlc->rcv_to_hci(shdlc->hdev, skb); skb = NULL; } shdlc->nr = (shdlc->nr + 1) % 8; if (llc_shdlc_x_lt_y_lteq_z(shdlc->dnr, y_nr, shdlc->ns)) { llc_shdlc_reset_t2(shdlc, y_nr); shdlc->dnr = y_nr; } exit: kfree_skb(skb); } static void llc_shdlc_rcv_ack(struct llc_shdlc *shdlc, int y_nr) { pr_debug("remote acked up to frame %d excluded\n", y_nr); if (llc_shdlc_x_lt_y_lteq_z(shdlc->dnr, y_nr, shdlc->ns)) { llc_shdlc_reset_t2(shdlc, y_nr); shdlc->dnr = y_nr; } } static void llc_shdlc_requeue_ack_pending(struct llc_shdlc *shdlc) { struct sk_buff *skb; pr_debug("ns reset to %d\n", shdlc->dnr); while ((skb = skb_dequeue_tail(&shdlc->ack_pending_q))) { skb_pull(skb, 1); /* remove control field */ skb_queue_head(&shdlc->send_q, skb); } shdlc->ns = shdlc->dnr; } static void llc_shdlc_rcv_rej(struct llc_shdlc *shdlc, int y_nr) { struct sk_buff *skb; pr_debug("remote asks retransmission from frame %d\n", y_nr); if (llc_shdlc_x_lteq_y_lt_z(shdlc->dnr, y_nr, shdlc->ns)) { if (shdlc->t2_active) { del_timer_sync(&shdlc->t2_timer); shdlc->t2_active = false; pr_debug("Stopped T2(retransmit)\n"); } if (shdlc->dnr != y_nr) { while ((shdlc->dnr = ((shdlc->dnr + 1) % 8)) != y_nr) { skb = skb_dequeue(&shdlc->ack_pending_q); kfree_skb(skb); } } llc_shdlc_requeue_ack_pending(shdlc); } } /* See spec RR:10.8.3 REJ:10.8.4 */ static void llc_shdlc_rcv_s_frame(struct llc_shdlc *shdlc, enum sframe_type s_frame_type, int nr) { struct sk_buff *skb; if (shdlc->state != SHDLC_CONNECTED) return; switch (s_frame_type) { case S_FRAME_RR: llc_shdlc_rcv_ack(shdlc, nr); if (shdlc->rnr == true) { /* see SHDLC 10.7.7 */ shdlc->rnr = false; if (shdlc->send_q.qlen == 0) { skb = llc_shdlc_alloc_skb(shdlc, 0); if (skb) skb_queue_tail(&shdlc->send_q, skb); } } break; case S_FRAME_REJ: llc_shdlc_rcv_rej(shdlc, nr); break; case S_FRAME_RNR: llc_shdlc_rcv_ack(shdlc, nr); shdlc->rnr = true; break; default: break; } } static void llc_shdlc_connect_complete(struct llc_shdlc *shdlc, int r) { pr_debug("result=%d\n", r); del_timer_sync(&shdlc->connect_timer); if (r == 0) { shdlc->ns = 0; shdlc->nr = 0; shdlc->dnr = 0; shdlc->state = SHDLC_HALF_CONNECTED; } else { shdlc->state = SHDLC_DISCONNECTED; } shdlc->connect_result = r; wake_up(shdlc->connect_wq); } static int llc_shdlc_connect_initiate(const struct llc_shdlc *shdlc) { struct sk_buff *skb; skb = llc_shdlc_alloc_skb(shdlc, 2); if (skb == NULL) return -ENOMEM; skb_put_u8(skb, SHDLC_MAX_WINDOW); skb_put_u8(skb, SHDLC_SREJ_SUPPORT ? 1 : 0); return llc_shdlc_send_u_frame(shdlc, skb, U_FRAME_RSET); } static int llc_shdlc_connect_send_ua(const struct llc_shdlc *shdlc) { struct sk_buff *skb; skb = llc_shdlc_alloc_skb(shdlc, 0); if (skb == NULL) return -ENOMEM; return llc_shdlc_send_u_frame(shdlc, skb, U_FRAME_UA); } static void llc_shdlc_rcv_u_frame(struct llc_shdlc *shdlc, struct sk_buff *skb, enum uframe_modifier u_frame_modifier) { u8 w = SHDLC_MAX_WINDOW; bool srej_support = SHDLC_SREJ_SUPPORT; int r; pr_debug("u_frame_modifier=%d\n", u_frame_modifier); switch (u_frame_modifier) { case U_FRAME_RSET: switch (shdlc->state) { case SHDLC_NEGOTIATING: case SHDLC_CONNECTING: /* * We sent RSET, but chip wants to negotiate or we * got RSET before we managed to send out our. */ if (skb->len > 0) w = skb->data[0]; if (skb->len > 1) srej_support = skb->data[1] & 0x01 ? true : false; if ((w <= SHDLC_MAX_WINDOW) && (SHDLC_SREJ_SUPPORT || (srej_support == false))) { shdlc->w = w; shdlc->srej_support = srej_support; r = llc_shdlc_connect_send_ua(shdlc); llc_shdlc_connect_complete(shdlc, r); } break; case SHDLC_HALF_CONNECTED: /* * Chip resent RSET due to its timeout - Ignote it * as we already sent UA. */ break; case SHDLC_CONNECTED: /* * Chip wants to reset link. This is unexpected and * unsupported. */ shdlc->hard_fault = -ECONNRESET; break; default: break; } break; case U_FRAME_UA: if ((shdlc->state == SHDLC_CONNECTING && shdlc->connect_tries > 0) || (shdlc->state == SHDLC_NEGOTIATING)) { llc_shdlc_connect_complete(shdlc, 0); shdlc->state = SHDLC_CONNECTED; } break; default: break; } kfree_skb(skb); } static void llc_shdlc_handle_rcv_queue(struct llc_shdlc *shdlc) { struct sk_buff *skb; u8 control; int nr; int ns; enum sframe_type s_frame_type; enum uframe_modifier u_frame_modifier; if (shdlc->rcv_q.qlen) pr_debug("rcvQlen=%d\n", shdlc->rcv_q.qlen); while ((skb = skb_dequeue(&shdlc->rcv_q)) != NULL) { control = skb->data[0]; skb_pull(skb, 1); switch (control & SHDLC_CONTROL_HEAD_MASK) { case SHDLC_CONTROL_HEAD_I: case SHDLC_CONTROL_HEAD_I2: if (shdlc->state == SHDLC_HALF_CONNECTED) shdlc->state = SHDLC_CONNECTED; ns = (control & SHDLC_CONTROL_NS_MASK) >> 3; nr = control & SHDLC_CONTROL_NR_MASK; llc_shdlc_rcv_i_frame(shdlc, skb, ns, nr); break; case SHDLC_CONTROL_HEAD_S: if (shdlc->state == SHDLC_HALF_CONNECTED) shdlc->state = SHDLC_CONNECTED; s_frame_type = (control & SHDLC_CONTROL_TYPE_MASK) >> 3; nr = control & SHDLC_CONTROL_NR_MASK; llc_shdlc_rcv_s_frame(shdlc, s_frame_type, nr); kfree_skb(skb); break; case SHDLC_CONTROL_HEAD_U: u_frame_modifier = control & SHDLC_CONTROL_M_MASK; llc_shdlc_rcv_u_frame(shdlc, skb, u_frame_modifier); break; default: pr_err("UNKNOWN Control=%d\n", control); kfree_skb(skb); break; } } } static int llc_shdlc_w_used(int ns, int dnr) { int unack_count; if (dnr <= ns) unack_count = ns - dnr; else unack_count = 8 - dnr + ns; return unack_count; } /* Send frames according to algorithm at spec:10.8.1 */ static void llc_shdlc_handle_send_queue(struct llc_shdlc *shdlc) { struct sk_buff *skb; int r; unsigned long time_sent; if (shdlc->send_q.qlen) pr_debug("sendQlen=%d ns=%d dnr=%d rnr=%s w_room=%d unackQlen=%d\n", shdlc->send_q.qlen, shdlc->ns, shdlc->dnr, shdlc->rnr == false ? "false" : "true", shdlc->w - llc_shdlc_w_used(shdlc->ns, shdlc->dnr), shdlc->ack_pending_q.qlen); while (shdlc->send_q.qlen && shdlc->ack_pending_q.qlen < shdlc->w && (shdlc->rnr == false)) { if (shdlc->t1_active) { del_timer_sync(&shdlc->t1_timer); shdlc->t1_active = false; pr_debug("Stopped T1(send ack)\n"); } skb = skb_dequeue(&shdlc->send_q); *(u8 *)skb_push(skb, 1) = SHDLC_CONTROL_HEAD_I | (shdlc->ns << 3) | shdlc->nr; pr_debug("Sending I-Frame %d, waiting to rcv %d\n", shdlc->ns, shdlc->nr); SHDLC_DUMP_SKB("shdlc frame written", skb); r = shdlc->xmit_to_drv(shdlc->hdev, skb); if (r < 0) { shdlc->hard_fault = r; break; } shdlc->ns = (shdlc->ns + 1) % 8; time_sent = jiffies; *(unsigned long *)skb->cb = time_sent; skb_queue_tail(&shdlc->ack_pending_q, skb); if (shdlc->t2_active == false) { shdlc->t2_active = true; mod_timer(&shdlc->t2_timer, time_sent + msecs_to_jiffies(SHDLC_T2_VALUE_MS)); pr_debug("Started T2 (retransmit)\n"); } } } static void llc_shdlc_connect_timeout(struct timer_list *t) { struct llc_shdlc *shdlc = from_timer(shdlc, t, connect_timer); schedule_work(&shdlc->sm_work); } static void llc_shdlc_t1_timeout(struct timer_list *t) { struct llc_shdlc *shdlc = from_timer(shdlc, t, t1_timer); pr_debug("SoftIRQ: need to send ack\n"); schedule_work(&shdlc->sm_work); } static void llc_shdlc_t2_timeout(struct timer_list *t) { struct llc_shdlc *shdlc = from_timer(shdlc, t, t2_timer); pr_debug("SoftIRQ: need to retransmit\n"); schedule_work(&shdlc->sm_work); } static void llc_shdlc_sm_work(struct work_struct *work) { struct llc_shdlc *shdlc = container_of(work, struct llc_shdlc, sm_work); int r; mutex_lock(&shdlc->state_mutex); switch (shdlc->state) { case SHDLC_DISCONNECTED: skb_queue_purge(&shdlc->rcv_q); skb_queue_purge(&shdlc->send_q); skb_queue_purge(&shdlc->ack_pending_q); break; case SHDLC_CONNECTING: if (shdlc->hard_fault) { llc_shdlc_connect_complete(shdlc, shdlc->hard_fault); break; } if (shdlc->connect_tries++ < 5) r = llc_shdlc_connect_initiate(shdlc); else r = -ETIME; if (r < 0) { llc_shdlc_connect_complete(shdlc, r); } else { mod_timer(&shdlc->connect_timer, jiffies + msecs_to_jiffies(SHDLC_CONNECT_VALUE_MS)); shdlc->state = SHDLC_NEGOTIATING; } break; case SHDLC_NEGOTIATING: if (timer_pending(&shdlc->connect_timer) == 0) { shdlc->state = SHDLC_CONNECTING; schedule_work(&shdlc->sm_work); } llc_shdlc_handle_rcv_queue(shdlc); if (shdlc->hard_fault) { llc_shdlc_connect_complete(shdlc, shdlc->hard_fault); break; } break; case SHDLC_HALF_CONNECTED: case SHDLC_CONNECTED: llc_shdlc_handle_rcv_queue(shdlc); llc_shdlc_handle_send_queue(shdlc); if (shdlc->t1_active && timer_pending(&shdlc->t1_timer) == 0) { pr_debug("Handle T1(send ack) elapsed (T1 now inactive)\n"); shdlc->t1_active = false; r = llc_shdlc_send_s_frame(shdlc, S_FRAME_RR, shdlc->nr); if (r < 0) shdlc->hard_fault = r; } if (shdlc->t2_active && timer_pending(&shdlc->t2_timer) == 0) { pr_debug("Handle T2(retransmit) elapsed (T2 inactive)\n"); shdlc->t2_active = false; llc_shdlc_requeue_ack_pending(shdlc); llc_shdlc_handle_send_queue(shdlc); } if (shdlc->hard_fault) shdlc->llc_failure(shdlc->hdev, shdlc->hard_fault); break; default: break; } mutex_unlock(&shdlc->state_mutex); } /* * Called from syscall context to establish shdlc link. Sleeps until * link is ready or failure. */ static int llc_shdlc_connect(struct llc_shdlc *shdlc) { DECLARE_WAIT_QUEUE_HEAD_ONSTACK(connect_wq); mutex_lock(&shdlc->state_mutex); shdlc->state = SHDLC_CONNECTING; shdlc->connect_wq = &connect_wq; shdlc->connect_tries = 0; shdlc->connect_result = 1; mutex_unlock(&shdlc->state_mutex); schedule_work(&shdlc->sm_work); wait_event(connect_wq, shdlc->connect_result != 1); return shdlc->connect_result; } static void llc_shdlc_disconnect(struct llc_shdlc *shdlc) { mutex_lock(&shdlc->state_mutex); shdlc->state = SHDLC_DISCONNECTED; mutex_unlock(&shdlc->state_mutex); schedule_work(&shdlc->sm_work); } /* * Receive an incoming shdlc frame. Frame has already been crc-validated. * skb contains only LLC header and payload. * If skb == NULL, it is a notification that the link below is dead. */ static void llc_shdlc_recv_frame(struct llc_shdlc *shdlc, struct sk_buff *skb) { if (skb == NULL) { pr_err("NULL Frame -> link is dead\n"); shdlc->hard_fault = -EREMOTEIO; } else { SHDLC_DUMP_SKB("incoming frame", skb); skb_queue_tail(&shdlc->rcv_q, skb); } schedule_work(&shdlc->sm_work); } static void *llc_shdlc_init(struct nfc_hci_dev *hdev, xmit_to_drv_t xmit_to_drv, rcv_to_hci_t rcv_to_hci, int tx_headroom, int tx_tailroom, int *rx_headroom, int *rx_tailroom, llc_failure_t llc_failure) { struct llc_shdlc *shdlc; *rx_headroom = SHDLC_LLC_HEAD_ROOM; *rx_tailroom = 0; shdlc = kzalloc(sizeof(struct llc_shdlc), GFP_KERNEL); if (shdlc == NULL) return NULL; mutex_init(&shdlc->state_mutex); shdlc->state = SHDLC_DISCONNECTED; timer_setup(&shdlc->connect_timer, llc_shdlc_connect_timeout, 0); timer_setup(&shdlc->t1_timer, llc_shdlc_t1_timeout, 0); timer_setup(&shdlc->t2_timer, llc_shdlc_t2_timeout, 0); shdlc->w = SHDLC_MAX_WINDOW; shdlc->srej_support = SHDLC_SREJ_SUPPORT; skb_queue_head_init(&shdlc->rcv_q); skb_queue_head_init(&shdlc->send_q); skb_queue_head_init(&shdlc->ack_pending_q); INIT_WORK(&shdlc->sm_work, llc_shdlc_sm_work); shdlc->hdev = hdev; shdlc->xmit_to_drv = xmit_to_drv; shdlc->rcv_to_hci = rcv_to_hci; shdlc->tx_headroom = tx_headroom; shdlc->tx_tailroom = tx_tailroom; shdlc->llc_failure = llc_failure; return shdlc; } static void llc_shdlc_deinit(struct nfc_llc *llc) { struct llc_shdlc *shdlc = nfc_llc_get_data(llc); skb_queue_purge(&shdlc->rcv_q); skb_queue_purge(&shdlc->send_q); skb_queue_purge(&shdlc->ack_pending_q); kfree(shdlc); } static int llc_shdlc_start(struct nfc_llc *llc) { struct llc_shdlc *shdlc = nfc_llc_get_data(llc); return llc_shdlc_connect(shdlc); } static int llc_shdlc_stop(struct nfc_llc *llc) { struct llc_shdlc *shdlc = nfc_llc_get_data(llc); llc_shdlc_disconnect(shdlc); return 0; } static void llc_shdlc_rcv_from_drv(struct nfc_llc *llc, struct sk_buff *skb) { struct llc_shdlc *shdlc = nfc_llc_get_data(llc); llc_shdlc_recv_frame(shdlc, skb); } static int llc_shdlc_xmit_from_hci(struct nfc_llc *llc, struct sk_buff *skb) { struct llc_shdlc *shdlc = nfc_llc_get_data(llc); skb_queue_tail(&shdlc->send_q, skb); schedule_work(&shdlc->sm_work); return 0; } static const struct nfc_llc_ops llc_shdlc_ops = { .init = llc_shdlc_init, .deinit = llc_shdlc_deinit, .start = llc_shdlc_start, .stop = llc_shdlc_stop, .rcv_from_drv = llc_shdlc_rcv_from_drv, .xmit_from_hci = llc_shdlc_xmit_from_hci, }; int nfc_llc_shdlc_register(void) { return nfc_llc_register(LLC_SHDLC_NAME, &llc_shdlc_ops); }
linux-master
net/nfc/hci/llc_shdlc.c
// SPDX-License-Identifier: GPL-2.0-only /* * nop (passthrough) Link Layer Control * * Copyright (C) 2012 Intel Corporation. All rights reserved. */ #include <linux/types.h> #include "llc.h" struct llc_nop { struct nfc_hci_dev *hdev; xmit_to_drv_t xmit_to_drv; rcv_to_hci_t rcv_to_hci; int tx_headroom; int tx_tailroom; llc_failure_t llc_failure; }; static void *llc_nop_init(struct nfc_hci_dev *hdev, xmit_to_drv_t xmit_to_drv, rcv_to_hci_t rcv_to_hci, int tx_headroom, int tx_tailroom, int *rx_headroom, int *rx_tailroom, llc_failure_t llc_failure) { struct llc_nop *llc_nop; *rx_headroom = 0; *rx_tailroom = 0; llc_nop = kzalloc(sizeof(struct llc_nop), GFP_KERNEL); if (llc_nop == NULL) return NULL; llc_nop->hdev = hdev; llc_nop->xmit_to_drv = xmit_to_drv; llc_nop->rcv_to_hci = rcv_to_hci; llc_nop->tx_headroom = tx_headroom; llc_nop->tx_tailroom = tx_tailroom; llc_nop->llc_failure = llc_failure; return llc_nop; } static void llc_nop_deinit(struct nfc_llc *llc) { kfree(nfc_llc_get_data(llc)); } static int llc_nop_start(struct nfc_llc *llc) { return 0; } static int llc_nop_stop(struct nfc_llc *llc) { return 0; } static void llc_nop_rcv_from_drv(struct nfc_llc *llc, struct sk_buff *skb) { struct llc_nop *llc_nop = nfc_llc_get_data(llc); llc_nop->rcv_to_hci(llc_nop->hdev, skb); } static int llc_nop_xmit_from_hci(struct nfc_llc *llc, struct sk_buff *skb) { struct llc_nop *llc_nop = nfc_llc_get_data(llc); return llc_nop->xmit_to_drv(llc_nop->hdev, skb); } static const struct nfc_llc_ops llc_nop_ops = { .init = llc_nop_init, .deinit = llc_nop_deinit, .start = llc_nop_start, .stop = llc_nop_stop, .rcv_from_drv = llc_nop_rcv_from_drv, .xmit_from_hci = llc_nop_xmit_from_hci, }; int nfc_llc_nop_register(void) { return nfc_llc_register(LLC_NOP_NAME, &llc_nop_ops); }
linux-master
net/nfc/hci/llc_nop.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (C) 2012 Intel Corporation. All rights reserved. */ #define pr_fmt(fmt) "hci: %s: " fmt, __func__ #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> #include <net/nfc/hci.h> #include "hci.h" /* * Payload is the HCP message data only. Instruction will be prepended. * Guarantees that cb will be called upon completion or timeout delay * counted from the moment the cmd is sent to the transport. */ int nfc_hci_hcp_message_tx(struct nfc_hci_dev *hdev, u8 pipe, u8 type, u8 instruction, const u8 *payload, size_t payload_len, data_exchange_cb_t cb, void *cb_context, unsigned long completion_delay) { struct nfc_dev *ndev = hdev->ndev; struct hci_msg *cmd; const u8 *ptr = payload; int hci_len, err; bool firstfrag = true; cmd = kzalloc(sizeof(struct hci_msg), GFP_KERNEL); if (cmd == NULL) return -ENOMEM; INIT_LIST_HEAD(&cmd->msg_l); skb_queue_head_init(&cmd->msg_frags); cmd->wait_response = (type == NFC_HCI_HCP_COMMAND) ? true : false; cmd->cb = cb; cmd->cb_context = cb_context; cmd->completion_delay = completion_delay; hci_len = payload_len + 1; while (hci_len > 0) { struct sk_buff *skb; int skb_len, data_link_len; struct hcp_packet *packet; if (NFC_HCI_HCP_PACKET_HEADER_LEN + hci_len <= hdev->max_data_link_payload) data_link_len = hci_len; else data_link_len = hdev->max_data_link_payload - NFC_HCI_HCP_PACKET_HEADER_LEN; skb_len = ndev->tx_headroom + NFC_HCI_HCP_PACKET_HEADER_LEN + data_link_len + ndev->tx_tailroom; hci_len -= data_link_len; skb = alloc_skb(skb_len, GFP_KERNEL); if (skb == NULL) { err = -ENOMEM; goto out_skb_err; } skb_reserve(skb, ndev->tx_headroom); skb_put(skb, NFC_HCI_HCP_PACKET_HEADER_LEN + data_link_len); /* Only the last fragment will have the cb bit set to 1 */ packet = (struct hcp_packet *)skb->data; packet->header = pipe; if (firstfrag) { firstfrag = false; packet->message.header = HCP_HEADER(type, instruction); } else { packet->message.header = *ptr++; } if (ptr) { memcpy(packet->message.data, ptr, data_link_len - 1); ptr += data_link_len - 1; } /* This is the last fragment, set the cb bit */ if (hci_len == 0) packet->header |= ~NFC_HCI_FRAGMENT; skb_queue_tail(&cmd->msg_frags, skb); } mutex_lock(&hdev->msg_tx_mutex); if (hdev->shutting_down) { err = -ESHUTDOWN; mutex_unlock(&hdev->msg_tx_mutex); goto out_skb_err; } list_add_tail(&cmd->msg_l, &hdev->msg_tx_queue); mutex_unlock(&hdev->msg_tx_mutex); schedule_work(&hdev->msg_tx_work); return 0; out_skb_err: skb_queue_purge(&cmd->msg_frags); kfree(cmd); return err; } /* * Receive hcp message for pipe, with type and cmd. * skb contains optional message data only. */ void nfc_hci_hcp_message_rx(struct nfc_hci_dev *hdev, u8 pipe, u8 type, u8 instruction, struct sk_buff *skb) { switch (type) { case NFC_HCI_HCP_RESPONSE: nfc_hci_resp_received(hdev, instruction, skb); break; case NFC_HCI_HCP_COMMAND: nfc_hci_cmd_received(hdev, pipe, instruction, skb); break; case NFC_HCI_HCP_EVENT: nfc_hci_event_received(hdev, pipe, instruction, skb); break; default: pr_err("UNKNOWN MSG Type %d, instruction=%d\n", type, instruction); kfree_skb(skb); break; } }
linux-master
net/nfc/hci/hcp.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (C) 2012 Intel Corporation. All rights reserved. */ #define pr_fmt(fmt) "hci: %s: " fmt, __func__ #include <linux/init.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/module.h> #include <net/nfc/hci.h> #include "hci.h" #define MAX_FWI 4949 static int nfc_hci_execute_cmd_async(struct nfc_hci_dev *hdev, u8 pipe, u8 cmd, const u8 *param, size_t param_len, data_exchange_cb_t cb, void *cb_context) { pr_debug("exec cmd async through pipe=%d, cmd=%d, plen=%zd\n", pipe, cmd, param_len); /* TODO: Define hci cmd execution delay. Should it be the same * for all commands? */ return nfc_hci_hcp_message_tx(hdev, pipe, NFC_HCI_HCP_COMMAND, cmd, param, param_len, cb, cb_context, MAX_FWI); } /* * HCI command execution completion callback. * err will be a standard linux error (may be converted from HCI response) * skb contains the response data and must be disposed, or may be NULL if * an error occurred */ static void nfc_hci_execute_cb(void *context, struct sk_buff *skb, int err) { struct hcp_exec_waiter *hcp_ew = (struct hcp_exec_waiter *)context; pr_debug("HCI Cmd completed with result=%d\n", err); hcp_ew->exec_result = err; if (hcp_ew->exec_result == 0) hcp_ew->result_skb = skb; else kfree_skb(skb); hcp_ew->exec_complete = true; wake_up(hcp_ew->wq); } static int nfc_hci_execute_cmd(struct nfc_hci_dev *hdev, u8 pipe, u8 cmd, const u8 *param, size_t param_len, struct sk_buff **skb) { DECLARE_WAIT_QUEUE_HEAD_ONSTACK(ew_wq); struct hcp_exec_waiter hcp_ew; hcp_ew.wq = &ew_wq; hcp_ew.exec_complete = false; hcp_ew.result_skb = NULL; pr_debug("exec cmd sync through pipe=%d, cmd=%d, plen=%zd\n", pipe, cmd, param_len); /* TODO: Define hci cmd execution delay. Should it be the same * for all commands? */ hcp_ew.exec_result = nfc_hci_hcp_message_tx(hdev, pipe, NFC_HCI_HCP_COMMAND, cmd, param, param_len, nfc_hci_execute_cb, &hcp_ew, MAX_FWI); if (hcp_ew.exec_result < 0) return hcp_ew.exec_result; wait_event(ew_wq, hcp_ew.exec_complete == true); if (hcp_ew.exec_result == 0) { if (skb) *skb = hcp_ew.result_skb; else kfree_skb(hcp_ew.result_skb); } return hcp_ew.exec_result; } int nfc_hci_send_event(struct nfc_hci_dev *hdev, u8 gate, u8 event, const u8 *param, size_t param_len) { u8 pipe; pr_debug("%d to gate %d\n", event, gate); pipe = hdev->gate2pipe[gate]; if (pipe == NFC_HCI_INVALID_PIPE) return -EADDRNOTAVAIL; return nfc_hci_hcp_message_tx(hdev, pipe, NFC_HCI_HCP_EVENT, event, param, param_len, NULL, NULL, 0); } EXPORT_SYMBOL(nfc_hci_send_event); /* * Execute an hci command sent to gate. * skb will contain response data if success. skb can be NULL if you are not * interested by the response. */ int nfc_hci_send_cmd(struct nfc_hci_dev *hdev, u8 gate, u8 cmd, const u8 *param, size_t param_len, struct sk_buff **skb) { u8 pipe; pipe = hdev->gate2pipe[gate]; if (pipe == NFC_HCI_INVALID_PIPE) return -EADDRNOTAVAIL; return nfc_hci_execute_cmd(hdev, pipe, cmd, param, param_len, skb); } EXPORT_SYMBOL(nfc_hci_send_cmd); int nfc_hci_send_cmd_async(struct nfc_hci_dev *hdev, u8 gate, u8 cmd, const u8 *param, size_t param_len, data_exchange_cb_t cb, void *cb_context) { u8 pipe; pipe = hdev->gate2pipe[gate]; if (pipe == NFC_HCI_INVALID_PIPE) return -EADDRNOTAVAIL; return nfc_hci_execute_cmd_async(hdev, pipe, cmd, param, param_len, cb, cb_context); } EXPORT_SYMBOL(nfc_hci_send_cmd_async); int nfc_hci_set_param(struct nfc_hci_dev *hdev, u8 gate, u8 idx, const u8 *param, size_t param_len) { int r; u8 *tmp; /* TODO ELa: reg idx must be inserted before param, but we don't want * to ask the caller to do it to keep a simpler API. * For now, just create a new temporary param buffer. This is far from * optimal though, and the plan is to modify APIs to pass idx down to * nfc_hci_hcp_message_tx where the frame is actually built, thereby * eliminating the need for the temp allocation-copy here. */ pr_debug("idx=%d to gate %d\n", idx, gate); tmp = kmalloc(1 + param_len, GFP_KERNEL); if (tmp == NULL) return -ENOMEM; *tmp = idx; memcpy(tmp + 1, param, param_len); r = nfc_hci_send_cmd(hdev, gate, NFC_HCI_ANY_SET_PARAMETER, tmp, param_len + 1, NULL); kfree(tmp); return r; } EXPORT_SYMBOL(nfc_hci_set_param); int nfc_hci_get_param(struct nfc_hci_dev *hdev, u8 gate, u8 idx, struct sk_buff **skb) { pr_debug("gate=%d regidx=%d\n", gate, idx); return nfc_hci_send_cmd(hdev, gate, NFC_HCI_ANY_GET_PARAMETER, &idx, 1, skb); } EXPORT_SYMBOL(nfc_hci_get_param); static int nfc_hci_open_pipe(struct nfc_hci_dev *hdev, u8 pipe) { struct sk_buff *skb; int r; pr_debug("pipe=%d\n", pipe); r = nfc_hci_execute_cmd(hdev, pipe, NFC_HCI_ANY_OPEN_PIPE, NULL, 0, &skb); if (r == 0) { /* dest host other than host controller will send * number of pipes already open on this gate before * execution. The number can be found in skb->data[0] */ kfree_skb(skb); } return r; } static int nfc_hci_close_pipe(struct nfc_hci_dev *hdev, u8 pipe) { return nfc_hci_execute_cmd(hdev, pipe, NFC_HCI_ANY_CLOSE_PIPE, NULL, 0, NULL); } static u8 nfc_hci_create_pipe(struct nfc_hci_dev *hdev, u8 dest_host, u8 dest_gate, int *result) { struct sk_buff *skb; struct hci_create_pipe_params params; struct hci_create_pipe_resp *resp; u8 pipe; pr_debug("gate=%d\n", dest_gate); params.src_gate = NFC_HCI_ADMIN_GATE; params.dest_host = dest_host; params.dest_gate = dest_gate; *result = nfc_hci_execute_cmd(hdev, NFC_HCI_ADMIN_PIPE, NFC_HCI_ADM_CREATE_PIPE, (u8 *) &params, sizeof(params), &skb); if (*result < 0) return NFC_HCI_INVALID_PIPE; resp = (struct hci_create_pipe_resp *)skb->data; pipe = resp->pipe; kfree_skb(skb); pr_debug("pipe created=%d\n", pipe); return pipe; } static int nfc_hci_delete_pipe(struct nfc_hci_dev *hdev, u8 pipe) { return nfc_hci_execute_cmd(hdev, NFC_HCI_ADMIN_PIPE, NFC_HCI_ADM_DELETE_PIPE, &pipe, 1, NULL); } static int nfc_hci_clear_all_pipes(struct nfc_hci_dev *hdev) { u8 param[2]; size_t param_len = 2; /* TODO: Find out what the identity reference data is * and fill param with it. HCI spec 6.1.3.5 */ if (test_bit(NFC_HCI_QUIRK_SHORT_CLEAR, &hdev->quirks)) param_len = 0; return nfc_hci_execute_cmd(hdev, NFC_HCI_ADMIN_PIPE, NFC_HCI_ADM_CLEAR_ALL_PIPE, param, param_len, NULL); } int nfc_hci_disconnect_gate(struct nfc_hci_dev *hdev, u8 gate) { int r; u8 pipe = hdev->gate2pipe[gate]; if (pipe == NFC_HCI_INVALID_PIPE) return -EADDRNOTAVAIL; r = nfc_hci_close_pipe(hdev, pipe); if (r < 0) return r; if (pipe != NFC_HCI_LINK_MGMT_PIPE && pipe != NFC_HCI_ADMIN_PIPE) { r = nfc_hci_delete_pipe(hdev, pipe); if (r < 0) return r; } hdev->gate2pipe[gate] = NFC_HCI_INVALID_PIPE; return 0; } EXPORT_SYMBOL(nfc_hci_disconnect_gate); int nfc_hci_disconnect_all_gates(struct nfc_hci_dev *hdev) { int r; r = nfc_hci_clear_all_pipes(hdev); if (r < 0) return r; nfc_hci_reset_pipes(hdev); return 0; } EXPORT_SYMBOL(nfc_hci_disconnect_all_gates); int nfc_hci_connect_gate(struct nfc_hci_dev *hdev, u8 dest_host, u8 dest_gate, u8 pipe) { bool pipe_created = false; int r; if (pipe == NFC_HCI_DO_NOT_CREATE_PIPE) return 0; if (hdev->gate2pipe[dest_gate] != NFC_HCI_INVALID_PIPE) return -EADDRINUSE; if (pipe != NFC_HCI_INVALID_PIPE) goto open_pipe; switch (dest_gate) { case NFC_HCI_LINK_MGMT_GATE: pipe = NFC_HCI_LINK_MGMT_PIPE; break; case NFC_HCI_ADMIN_GATE: pipe = NFC_HCI_ADMIN_PIPE; break; default: pipe = nfc_hci_create_pipe(hdev, dest_host, dest_gate, &r); if (pipe == NFC_HCI_INVALID_PIPE) return r; pipe_created = true; break; } open_pipe: r = nfc_hci_open_pipe(hdev, pipe); if (r < 0) { if (pipe_created) if (nfc_hci_delete_pipe(hdev, pipe) < 0) { /* TODO: Cannot clean by deleting pipe... * -> inconsistent state */ } return r; } hdev->pipes[pipe].gate = dest_gate; hdev->pipes[pipe].dest_host = dest_host; hdev->gate2pipe[dest_gate] = pipe; return 0; } EXPORT_SYMBOL(nfc_hci_connect_gate);
linux-master
net/nfc/hci/command.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (C) 2012 Intel Corporation. All rights reserved. */ #define pr_fmt(fmt) "hci: %s: " fmt, __func__ #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/nfc.h> #include <net/nfc/nfc.h> #include <net/nfc/hci.h> #include <net/nfc/llc.h> #include "hci.h" /* Largest headroom needed for outgoing HCI commands */ #define HCI_CMDS_HEADROOM 1 int nfc_hci_result_to_errno(u8 result) { switch (result) { case NFC_HCI_ANY_OK: return 0; case NFC_HCI_ANY_E_REG_PAR_UNKNOWN: return -EOPNOTSUPP; case NFC_HCI_ANY_E_TIMEOUT: return -ETIME; default: return -1; } } EXPORT_SYMBOL(nfc_hci_result_to_errno); void nfc_hci_reset_pipes(struct nfc_hci_dev *hdev) { int i = 0; for (i = 0; i < NFC_HCI_MAX_PIPES; i++) { hdev->pipes[i].gate = NFC_HCI_INVALID_GATE; hdev->pipes[i].dest_host = NFC_HCI_INVALID_HOST; } memset(hdev->gate2pipe, NFC_HCI_INVALID_PIPE, sizeof(hdev->gate2pipe)); } EXPORT_SYMBOL(nfc_hci_reset_pipes); void nfc_hci_reset_pipes_per_host(struct nfc_hci_dev *hdev, u8 host) { int i = 0; for (i = 0; i < NFC_HCI_MAX_PIPES; i++) { if (hdev->pipes[i].dest_host != host) continue; hdev->pipes[i].gate = NFC_HCI_INVALID_GATE; hdev->pipes[i].dest_host = NFC_HCI_INVALID_HOST; } } EXPORT_SYMBOL(nfc_hci_reset_pipes_per_host); static void nfc_hci_msg_tx_work(struct work_struct *work) { struct nfc_hci_dev *hdev = container_of(work, struct nfc_hci_dev, msg_tx_work); struct hci_msg *msg; struct sk_buff *skb; int r = 0; mutex_lock(&hdev->msg_tx_mutex); if (hdev->shutting_down) goto exit; if (hdev->cmd_pending_msg) { if (timer_pending(&hdev->cmd_timer) == 0) { if (hdev->cmd_pending_msg->cb) hdev->cmd_pending_msg->cb(hdev-> cmd_pending_msg-> cb_context, NULL, -ETIME); kfree(hdev->cmd_pending_msg); hdev->cmd_pending_msg = NULL; } else { goto exit; } } next_msg: if (list_empty(&hdev->msg_tx_queue)) goto exit; msg = list_first_entry(&hdev->msg_tx_queue, struct hci_msg, msg_l); list_del(&msg->msg_l); pr_debug("msg_tx_queue has a cmd to send\n"); while ((skb = skb_dequeue(&msg->msg_frags)) != NULL) { r = nfc_llc_xmit_from_hci(hdev->llc, skb); if (r < 0) { kfree_skb(skb); skb_queue_purge(&msg->msg_frags); if (msg->cb) msg->cb(msg->cb_context, NULL, r); kfree(msg); break; } } if (r) goto next_msg; if (msg->wait_response == false) { kfree(msg); goto next_msg; } hdev->cmd_pending_msg = msg; mod_timer(&hdev->cmd_timer, jiffies + msecs_to_jiffies(hdev->cmd_pending_msg->completion_delay)); exit: mutex_unlock(&hdev->msg_tx_mutex); } static void nfc_hci_msg_rx_work(struct work_struct *work) { struct nfc_hci_dev *hdev = container_of(work, struct nfc_hci_dev, msg_rx_work); struct sk_buff *skb; const struct hcp_message *message; u8 pipe; u8 type; u8 instruction; while ((skb = skb_dequeue(&hdev->msg_rx_queue)) != NULL) { pipe = skb->data[0]; skb_pull(skb, NFC_HCI_HCP_PACKET_HEADER_LEN); message = (struct hcp_message *)skb->data; type = HCP_MSG_GET_TYPE(message->header); instruction = HCP_MSG_GET_CMD(message->header); skb_pull(skb, NFC_HCI_HCP_MESSAGE_HEADER_LEN); nfc_hci_hcp_message_rx(hdev, pipe, type, instruction, skb); } } static void __nfc_hci_cmd_completion(struct nfc_hci_dev *hdev, int err, struct sk_buff *skb) { del_timer_sync(&hdev->cmd_timer); if (hdev->cmd_pending_msg->cb) hdev->cmd_pending_msg->cb(hdev->cmd_pending_msg->cb_context, skb, err); else kfree_skb(skb); kfree(hdev->cmd_pending_msg); hdev->cmd_pending_msg = NULL; schedule_work(&hdev->msg_tx_work); } void nfc_hci_resp_received(struct nfc_hci_dev *hdev, u8 result, struct sk_buff *skb) { mutex_lock(&hdev->msg_tx_mutex); if (hdev->cmd_pending_msg == NULL) { kfree_skb(skb); goto exit; } __nfc_hci_cmd_completion(hdev, nfc_hci_result_to_errno(result), skb); exit: mutex_unlock(&hdev->msg_tx_mutex); } void nfc_hci_cmd_received(struct nfc_hci_dev *hdev, u8 pipe, u8 cmd, struct sk_buff *skb) { u8 status = NFC_HCI_ANY_OK; const struct hci_create_pipe_resp *create_info; const struct hci_delete_pipe_noti *delete_info; const struct hci_all_pipe_cleared_noti *cleared_info; u8 gate; pr_debug("from pipe %x cmd %x\n", pipe, cmd); if (pipe >= NFC_HCI_MAX_PIPES) { status = NFC_HCI_ANY_E_NOK; goto exit; } gate = hdev->pipes[pipe].gate; switch (cmd) { case NFC_HCI_ADM_NOTIFY_PIPE_CREATED: if (skb->len != 5) { status = NFC_HCI_ANY_E_NOK; goto exit; } create_info = (struct hci_create_pipe_resp *)skb->data; if (create_info->pipe >= NFC_HCI_MAX_PIPES) { status = NFC_HCI_ANY_E_NOK; goto exit; } /* Save the new created pipe and bind with local gate, * the description for skb->data[3] is destination gate id * but since we received this cmd from host controller, we * are the destination and it is our local gate */ hdev->gate2pipe[create_info->dest_gate] = create_info->pipe; hdev->pipes[create_info->pipe].gate = create_info->dest_gate; hdev->pipes[create_info->pipe].dest_host = create_info->src_host; break; case NFC_HCI_ANY_OPEN_PIPE: if (gate == NFC_HCI_INVALID_GATE) { status = NFC_HCI_ANY_E_NOK; goto exit; } break; case NFC_HCI_ADM_NOTIFY_PIPE_DELETED: if (skb->len != 1) { status = NFC_HCI_ANY_E_NOK; goto exit; } delete_info = (struct hci_delete_pipe_noti *)skb->data; if (delete_info->pipe >= NFC_HCI_MAX_PIPES) { status = NFC_HCI_ANY_E_NOK; goto exit; } hdev->pipes[delete_info->pipe].gate = NFC_HCI_INVALID_GATE; hdev->pipes[delete_info->pipe].dest_host = NFC_HCI_INVALID_HOST; break; case NFC_HCI_ADM_NOTIFY_ALL_PIPE_CLEARED: if (skb->len != 1) { status = NFC_HCI_ANY_E_NOK; goto exit; } cleared_info = (struct hci_all_pipe_cleared_noti *)skb->data; nfc_hci_reset_pipes_per_host(hdev, cleared_info->host); break; default: pr_info("Discarded unknown cmd %x to gate %x\n", cmd, gate); break; } if (hdev->ops->cmd_received) hdev->ops->cmd_received(hdev, pipe, cmd, skb); exit: nfc_hci_hcp_message_tx(hdev, pipe, NFC_HCI_HCP_RESPONSE, status, NULL, 0, NULL, NULL, 0); kfree_skb(skb); } u32 nfc_hci_sak_to_protocol(u8 sak) { switch (NFC_HCI_TYPE_A_SEL_PROT(sak)) { case NFC_HCI_TYPE_A_SEL_PROT_MIFARE: return NFC_PROTO_MIFARE_MASK; case NFC_HCI_TYPE_A_SEL_PROT_ISO14443: return NFC_PROTO_ISO14443_MASK; case NFC_HCI_TYPE_A_SEL_PROT_DEP: return NFC_PROTO_NFC_DEP_MASK; case NFC_HCI_TYPE_A_SEL_PROT_ISO14443_DEP: return NFC_PROTO_ISO14443_MASK | NFC_PROTO_NFC_DEP_MASK; default: return 0xffffffff; } } EXPORT_SYMBOL(nfc_hci_sak_to_protocol); int nfc_hci_target_discovered(struct nfc_hci_dev *hdev, u8 gate) { struct nfc_target *targets; struct sk_buff *atqa_skb = NULL; struct sk_buff *sak_skb = NULL; struct sk_buff *uid_skb = NULL; int r; pr_debug("from gate %d\n", gate); targets = kzalloc(sizeof(struct nfc_target), GFP_KERNEL); if (targets == NULL) return -ENOMEM; switch (gate) { case NFC_HCI_RF_READER_A_GATE: r = nfc_hci_get_param(hdev, NFC_HCI_RF_READER_A_GATE, NFC_HCI_RF_READER_A_ATQA, &atqa_skb); if (r < 0) goto exit; r = nfc_hci_get_param(hdev, NFC_HCI_RF_READER_A_GATE, NFC_HCI_RF_READER_A_SAK, &sak_skb); if (r < 0) goto exit; if (atqa_skb->len != 2 || sak_skb->len != 1) { r = -EPROTO; goto exit; } targets->supported_protocols = nfc_hci_sak_to_protocol(sak_skb->data[0]); if (targets->supported_protocols == 0xffffffff) { r = -EPROTO; goto exit; } targets->sens_res = be16_to_cpu(*(__be16 *)atqa_skb->data); targets->sel_res = sak_skb->data[0]; r = nfc_hci_get_param(hdev, NFC_HCI_RF_READER_A_GATE, NFC_HCI_RF_READER_A_UID, &uid_skb); if (r < 0) goto exit; if (uid_skb->len == 0 || uid_skb->len > NFC_NFCID1_MAXSIZE) { r = -EPROTO; goto exit; } memcpy(targets->nfcid1, uid_skb->data, uid_skb->len); targets->nfcid1_len = uid_skb->len; if (hdev->ops->complete_target_discovered) { r = hdev->ops->complete_target_discovered(hdev, gate, targets); if (r < 0) goto exit; } break; case NFC_HCI_RF_READER_B_GATE: targets->supported_protocols = NFC_PROTO_ISO14443_B_MASK; break; default: if (hdev->ops->target_from_gate) r = hdev->ops->target_from_gate(hdev, gate, targets); else r = -EPROTO; if (r < 0) goto exit; if (hdev->ops->complete_target_discovered) { r = hdev->ops->complete_target_discovered(hdev, gate, targets); if (r < 0) goto exit; } break; } /* if driver set the new gate, we will skip the old one */ if (targets->hci_reader_gate == 0x00) targets->hci_reader_gate = gate; r = nfc_targets_found(hdev->ndev, targets, 1); exit: kfree(targets); kfree_skb(atqa_skb); kfree_skb(sak_skb); kfree_skb(uid_skb); return r; } EXPORT_SYMBOL(nfc_hci_target_discovered); void nfc_hci_event_received(struct nfc_hci_dev *hdev, u8 pipe, u8 event, struct sk_buff *skb) { int r = 0; u8 gate; if (pipe >= NFC_HCI_MAX_PIPES) { pr_err("Discarded event %x to invalid pipe %x\n", event, pipe); goto exit; } gate = hdev->pipes[pipe].gate; if (gate == NFC_HCI_INVALID_GATE) { pr_err("Discarded event %x to unopened pipe %x\n", event, pipe); goto exit; } if (hdev->ops->event_received) { r = hdev->ops->event_received(hdev, pipe, event, skb); if (r <= 0) goto exit_noskb; } switch (event) { case NFC_HCI_EVT_TARGET_DISCOVERED: if (skb->len < 1) { /* no status data? */ r = -EPROTO; goto exit; } if (skb->data[0] == 3) { /* TODO: Multiple targets in field, none activated * poll is supposedly stopped, but there is no * single target to activate, so nothing to report * up. * if we need to restart poll, we must save the * protocols from the initial poll and reuse here. */ } if (skb->data[0] != 0) { r = -EPROTO; goto exit; } r = nfc_hci_target_discovered(hdev, gate); break; default: pr_info("Discarded unknown event %x to gate %x\n", event, gate); r = -EINVAL; break; } exit: kfree_skb(skb); exit_noskb: if (r) nfc_hci_driver_failure(hdev, r); } static void nfc_hci_cmd_timeout(struct timer_list *t) { struct nfc_hci_dev *hdev = from_timer(hdev, t, cmd_timer); schedule_work(&hdev->msg_tx_work); } static int hci_dev_connect_gates(struct nfc_hci_dev *hdev, u8 gate_count, const struct nfc_hci_gate *gates) { int r; while (gate_count--) { r = nfc_hci_connect_gate(hdev, NFC_HCI_HOST_CONTROLLER_ID, gates->gate, gates->pipe); if (r < 0) return r; gates++; } return 0; } static int hci_dev_session_init(struct nfc_hci_dev *hdev) { struct sk_buff *skb = NULL; int r; if (hdev->init_data.gates[0].gate != NFC_HCI_ADMIN_GATE) return -EPROTO; r = nfc_hci_connect_gate(hdev, NFC_HCI_HOST_CONTROLLER_ID, hdev->init_data.gates[0].gate, hdev->init_data.gates[0].pipe); if (r < 0) goto exit; r = nfc_hci_get_param(hdev, NFC_HCI_ADMIN_GATE, NFC_HCI_ADMIN_SESSION_IDENTITY, &skb); if (r < 0) goto disconnect_all; if (skb->len && skb->len == strlen(hdev->init_data.session_id) && (memcmp(hdev->init_data.session_id, skb->data, skb->len) == 0) && hdev->ops->load_session) { /* Restore gate<->pipe table from some proprietary location. */ r = hdev->ops->load_session(hdev); if (r < 0) goto disconnect_all; } else { r = nfc_hci_disconnect_all_gates(hdev); if (r < 0) goto exit; r = hci_dev_connect_gates(hdev, hdev->init_data.gate_count, hdev->init_data.gates); if (r < 0) goto disconnect_all; r = nfc_hci_set_param(hdev, NFC_HCI_ADMIN_GATE, NFC_HCI_ADMIN_SESSION_IDENTITY, hdev->init_data.session_id, strlen(hdev->init_data.session_id)); } if (r == 0) goto exit; disconnect_all: nfc_hci_disconnect_all_gates(hdev); exit: kfree_skb(skb); return r; } static int hci_dev_version(struct nfc_hci_dev *hdev) { int r; struct sk_buff *skb; r = nfc_hci_get_param(hdev, NFC_HCI_ID_MGMT_GATE, NFC_HCI_ID_MGMT_VERSION_SW, &skb); if (r == -EOPNOTSUPP) { pr_info("Software/Hardware info not available\n"); return 0; } if (r < 0) return r; if (skb->len != 3) { kfree_skb(skb); return -EINVAL; } hdev->sw_romlib = (skb->data[0] & 0xf0) >> 4; hdev->sw_patch = skb->data[0] & 0x0f; hdev->sw_flashlib_major = skb->data[1]; hdev->sw_flashlib_minor = skb->data[2]; kfree_skb(skb); r = nfc_hci_get_param(hdev, NFC_HCI_ID_MGMT_GATE, NFC_HCI_ID_MGMT_VERSION_HW, &skb); if (r < 0) return r; if (skb->len != 3) { kfree_skb(skb); return -EINVAL; } hdev->hw_derivative = (skb->data[0] & 0xe0) >> 5; hdev->hw_version = skb->data[0] & 0x1f; hdev->hw_mpw = (skb->data[1] & 0xc0) >> 6; hdev->hw_software = skb->data[1] & 0x3f; hdev->hw_bsid = skb->data[2]; kfree_skb(skb); pr_info("SOFTWARE INFO:\n"); pr_info("RomLib : %d\n", hdev->sw_romlib); pr_info("Patch : %d\n", hdev->sw_patch); pr_info("FlashLib Major : %d\n", hdev->sw_flashlib_major); pr_info("FlashLib Minor : %d\n", hdev->sw_flashlib_minor); pr_info("HARDWARE INFO:\n"); pr_info("Derivative : %d\n", hdev->hw_derivative); pr_info("HW Version : %d\n", hdev->hw_version); pr_info("#MPW : %d\n", hdev->hw_mpw); pr_info("Software : %d\n", hdev->hw_software); pr_info("BSID Version : %d\n", hdev->hw_bsid); return 0; } static int hci_dev_up(struct nfc_dev *nfc_dev) { struct nfc_hci_dev *hdev = nfc_get_drvdata(nfc_dev); int r = 0; if (hdev->ops->open) { r = hdev->ops->open(hdev); if (r < 0) return r; } r = nfc_llc_start(hdev->llc); if (r < 0) goto exit_close; r = hci_dev_session_init(hdev); if (r < 0) goto exit_llc; r = nfc_hci_send_event(hdev, NFC_HCI_RF_READER_A_GATE, NFC_HCI_EVT_END_OPERATION, NULL, 0); if (r < 0) goto exit_llc; if (hdev->ops->hci_ready) { r = hdev->ops->hci_ready(hdev); if (r < 0) goto exit_llc; } r = hci_dev_version(hdev); if (r < 0) goto exit_llc; return 0; exit_llc: nfc_llc_stop(hdev->llc); exit_close: if (hdev->ops->close) hdev->ops->close(hdev); return r; } static int hci_dev_down(struct nfc_dev *nfc_dev) { struct nfc_hci_dev *hdev = nfc_get_drvdata(nfc_dev); nfc_llc_stop(hdev->llc); if (hdev->ops->close) hdev->ops->close(hdev); nfc_hci_reset_pipes(hdev); return 0; } static int hci_start_poll(struct nfc_dev *nfc_dev, u32 im_protocols, u32 tm_protocols) { struct nfc_hci_dev *hdev = nfc_get_drvdata(nfc_dev); if (hdev->ops->start_poll) return hdev->ops->start_poll(hdev, im_protocols, tm_protocols); else return nfc_hci_send_event(hdev, NFC_HCI_RF_READER_A_GATE, NFC_HCI_EVT_READER_REQUESTED, NULL, 0); } static void hci_stop_poll(struct nfc_dev *nfc_dev) { struct nfc_hci_dev *hdev = nfc_get_drvdata(nfc_dev); if (hdev->ops->stop_poll) hdev->ops->stop_poll(hdev); else nfc_hci_send_event(hdev, NFC_HCI_RF_READER_A_GATE, NFC_HCI_EVT_END_OPERATION, NULL, 0); } static int hci_dep_link_up(struct nfc_dev *nfc_dev, struct nfc_target *target, __u8 comm_mode, __u8 *gb, size_t gb_len) { struct nfc_hci_dev *hdev = nfc_get_drvdata(nfc_dev); if (!hdev->ops->dep_link_up) return 0; return hdev->ops->dep_link_up(hdev, target, comm_mode, gb, gb_len); } static int hci_dep_link_down(struct nfc_dev *nfc_dev) { struct nfc_hci_dev *hdev = nfc_get_drvdata(nfc_dev); if (!hdev->ops->dep_link_down) return 0; return hdev->ops->dep_link_down(hdev); } static int hci_activate_target(struct nfc_dev *nfc_dev, struct nfc_target *target, u32 protocol) { return 0; } static void hci_deactivate_target(struct nfc_dev *nfc_dev, struct nfc_target *target, u8 mode) { } #define HCI_CB_TYPE_TRANSCEIVE 1 static void hci_transceive_cb(void *context, struct sk_buff *skb, int err) { struct nfc_hci_dev *hdev = context; switch (hdev->async_cb_type) { case HCI_CB_TYPE_TRANSCEIVE: /* * TODO: Check RF Error indicator to make sure data is valid. * It seems that HCI cmd can complete without error, but data * can be invalid if an RF error occurred? Ignore for now. */ if (err == 0) skb_trim(skb, skb->len - 1); /* RF Err ind */ hdev->async_cb(hdev->async_cb_context, skb, err); break; default: if (err == 0) kfree_skb(skb); break; } } static int hci_transceive(struct nfc_dev *nfc_dev, struct nfc_target *target, struct sk_buff *skb, data_exchange_cb_t cb, void *cb_context) { struct nfc_hci_dev *hdev = nfc_get_drvdata(nfc_dev); int r; pr_debug("target_idx=%d\n", target->idx); switch (target->hci_reader_gate) { case NFC_HCI_RF_READER_A_GATE: case NFC_HCI_RF_READER_B_GATE: if (hdev->ops->im_transceive) { r = hdev->ops->im_transceive(hdev, target, skb, cb, cb_context); if (r <= 0) /* handled */ break; } *(u8 *)skb_push(skb, 1) = 0; /* CTR, see spec:10.2.2.1 */ hdev->async_cb_type = HCI_CB_TYPE_TRANSCEIVE; hdev->async_cb = cb; hdev->async_cb_context = cb_context; r = nfc_hci_send_cmd_async(hdev, target->hci_reader_gate, NFC_HCI_WR_XCHG_DATA, skb->data, skb->len, hci_transceive_cb, hdev); break; default: if (hdev->ops->im_transceive) { r = hdev->ops->im_transceive(hdev, target, skb, cb, cb_context); if (r == 1) r = -ENOTSUPP; } else { r = -ENOTSUPP; } break; } kfree_skb(skb); return r; } static int hci_tm_send(struct nfc_dev *nfc_dev, struct sk_buff *skb) { struct nfc_hci_dev *hdev = nfc_get_drvdata(nfc_dev); if (!hdev->ops->tm_send) { kfree_skb(skb); return -ENOTSUPP; } return hdev->ops->tm_send(hdev, skb); } static int hci_check_presence(struct nfc_dev *nfc_dev, struct nfc_target *target) { struct nfc_hci_dev *hdev = nfc_get_drvdata(nfc_dev); if (!hdev->ops->check_presence) return 0; return hdev->ops->check_presence(hdev, target); } static int hci_discover_se(struct nfc_dev *nfc_dev) { struct nfc_hci_dev *hdev = nfc_get_drvdata(nfc_dev); if (hdev->ops->discover_se) return hdev->ops->discover_se(hdev); return 0; } static int hci_enable_se(struct nfc_dev *nfc_dev, u32 se_idx) { struct nfc_hci_dev *hdev = nfc_get_drvdata(nfc_dev); if (hdev->ops->enable_se) return hdev->ops->enable_se(hdev, se_idx); return 0; } static int hci_disable_se(struct nfc_dev *nfc_dev, u32 se_idx) { struct nfc_hci_dev *hdev = nfc_get_drvdata(nfc_dev); if (hdev->ops->disable_se) return hdev->ops->disable_se(hdev, se_idx); return 0; } static int hci_se_io(struct nfc_dev *nfc_dev, u32 se_idx, u8 *apdu, size_t apdu_length, se_io_cb_t cb, void *cb_context) { struct nfc_hci_dev *hdev = nfc_get_drvdata(nfc_dev); if (hdev->ops->se_io) return hdev->ops->se_io(hdev, se_idx, apdu, apdu_length, cb, cb_context); return 0; } static void nfc_hci_failure(struct nfc_hci_dev *hdev, int err) { mutex_lock(&hdev->msg_tx_mutex); if (hdev->cmd_pending_msg == NULL) { nfc_driver_failure(hdev->ndev, err); goto exit; } __nfc_hci_cmd_completion(hdev, err, NULL); exit: mutex_unlock(&hdev->msg_tx_mutex); } static void nfc_hci_llc_failure(struct nfc_hci_dev *hdev, int err) { nfc_hci_failure(hdev, err); } static void nfc_hci_recv_from_llc(struct nfc_hci_dev *hdev, struct sk_buff *skb) { struct hcp_packet *packet; u8 type; u8 instruction; struct sk_buff *hcp_skb; u8 pipe; struct sk_buff *frag_skb; int msg_len; packet = (struct hcp_packet *)skb->data; if ((packet->header & ~NFC_HCI_FRAGMENT) == 0) { skb_queue_tail(&hdev->rx_hcp_frags, skb); return; } /* it's the last fragment. Does it need re-aggregation? */ if (skb_queue_len(&hdev->rx_hcp_frags)) { pipe = packet->header & NFC_HCI_FRAGMENT; skb_queue_tail(&hdev->rx_hcp_frags, skb); msg_len = 0; skb_queue_walk(&hdev->rx_hcp_frags, frag_skb) { msg_len += (frag_skb->len - NFC_HCI_HCP_PACKET_HEADER_LEN); } hcp_skb = nfc_alloc_recv_skb(NFC_HCI_HCP_PACKET_HEADER_LEN + msg_len, GFP_KERNEL); if (hcp_skb == NULL) { nfc_hci_failure(hdev, -ENOMEM); return; } skb_put_u8(hcp_skb, pipe); skb_queue_walk(&hdev->rx_hcp_frags, frag_skb) { msg_len = frag_skb->len - NFC_HCI_HCP_PACKET_HEADER_LEN; skb_put_data(hcp_skb, frag_skb->data + NFC_HCI_HCP_PACKET_HEADER_LEN, msg_len); } skb_queue_purge(&hdev->rx_hcp_frags); } else { packet->header &= NFC_HCI_FRAGMENT; hcp_skb = skb; } /* if this is a response, dispatch immediately to * unblock waiting cmd context. Otherwise, enqueue to dispatch * in separate context where handler can also execute command. */ packet = (struct hcp_packet *)hcp_skb->data; type = HCP_MSG_GET_TYPE(packet->message.header); if (type == NFC_HCI_HCP_RESPONSE) { pipe = packet->header; instruction = HCP_MSG_GET_CMD(packet->message.header); skb_pull(hcp_skb, NFC_HCI_HCP_PACKET_HEADER_LEN + NFC_HCI_HCP_MESSAGE_HEADER_LEN); nfc_hci_hcp_message_rx(hdev, pipe, type, instruction, hcp_skb); } else { skb_queue_tail(&hdev->msg_rx_queue, hcp_skb); schedule_work(&hdev->msg_rx_work); } } static int hci_fw_download(struct nfc_dev *nfc_dev, const char *firmware_name) { struct nfc_hci_dev *hdev = nfc_get_drvdata(nfc_dev); if (!hdev->ops->fw_download) return -ENOTSUPP; return hdev->ops->fw_download(hdev, firmware_name); } static const struct nfc_ops hci_nfc_ops = { .dev_up = hci_dev_up, .dev_down = hci_dev_down, .start_poll = hci_start_poll, .stop_poll = hci_stop_poll, .dep_link_up = hci_dep_link_up, .dep_link_down = hci_dep_link_down, .activate_target = hci_activate_target, .deactivate_target = hci_deactivate_target, .im_transceive = hci_transceive, .tm_send = hci_tm_send, .check_presence = hci_check_presence, .fw_download = hci_fw_download, .discover_se = hci_discover_se, .enable_se = hci_enable_se, .disable_se = hci_disable_se, .se_io = hci_se_io, }; struct nfc_hci_dev *nfc_hci_allocate_device(const struct nfc_hci_ops *ops, struct nfc_hci_init_data *init_data, unsigned long quirks, u32 protocols, const char *llc_name, int tx_headroom, int tx_tailroom, int max_link_payload) { struct nfc_hci_dev *hdev; if (ops->xmit == NULL) return NULL; if (protocols == 0) return NULL; hdev = kzalloc(sizeof(struct nfc_hci_dev), GFP_KERNEL); if (hdev == NULL) return NULL; hdev->llc = nfc_llc_allocate(llc_name, hdev, ops->xmit, nfc_hci_recv_from_llc, tx_headroom, tx_tailroom, nfc_hci_llc_failure); if (hdev->llc == NULL) { kfree(hdev); return NULL; } hdev->ndev = nfc_allocate_device(&hci_nfc_ops, protocols, tx_headroom + HCI_CMDS_HEADROOM, tx_tailroom); if (!hdev->ndev) { nfc_llc_free(hdev->llc); kfree(hdev); return NULL; } hdev->ops = ops; hdev->max_data_link_payload = max_link_payload; hdev->init_data = *init_data; nfc_set_drvdata(hdev->ndev, hdev); nfc_hci_reset_pipes(hdev); hdev->quirks = quirks; return hdev; } EXPORT_SYMBOL(nfc_hci_allocate_device); void nfc_hci_free_device(struct nfc_hci_dev *hdev) { nfc_free_device(hdev->ndev); nfc_llc_free(hdev->llc); kfree(hdev); } EXPORT_SYMBOL(nfc_hci_free_device); int nfc_hci_register_device(struct nfc_hci_dev *hdev) { mutex_init(&hdev->msg_tx_mutex); INIT_LIST_HEAD(&hdev->msg_tx_queue); INIT_WORK(&hdev->msg_tx_work, nfc_hci_msg_tx_work); timer_setup(&hdev->cmd_timer, nfc_hci_cmd_timeout, 0); skb_queue_head_init(&hdev->rx_hcp_frags); INIT_WORK(&hdev->msg_rx_work, nfc_hci_msg_rx_work); skb_queue_head_init(&hdev->msg_rx_queue); return nfc_register_device(hdev->ndev); } EXPORT_SYMBOL(nfc_hci_register_device); void nfc_hci_unregister_device(struct nfc_hci_dev *hdev) { struct hci_msg *msg, *n; mutex_lock(&hdev->msg_tx_mutex); if (hdev->cmd_pending_msg) { if (hdev->cmd_pending_msg->cb) hdev->cmd_pending_msg->cb( hdev->cmd_pending_msg->cb_context, NULL, -ESHUTDOWN); kfree(hdev->cmd_pending_msg); hdev->cmd_pending_msg = NULL; } hdev->shutting_down = true; mutex_unlock(&hdev->msg_tx_mutex); del_timer_sync(&hdev->cmd_timer); cancel_work_sync(&hdev->msg_tx_work); cancel_work_sync(&hdev->msg_rx_work); nfc_unregister_device(hdev->ndev); skb_queue_purge(&hdev->rx_hcp_frags); skb_queue_purge(&hdev->msg_rx_queue); list_for_each_entry_safe(msg, n, &hdev->msg_tx_queue, msg_l) { list_del(&msg->msg_l); skb_queue_purge(&msg->msg_frags); kfree(msg); } } EXPORT_SYMBOL(nfc_hci_unregister_device); void nfc_hci_set_clientdata(struct nfc_hci_dev *hdev, void *clientdata) { hdev->clientdata = clientdata; } EXPORT_SYMBOL(nfc_hci_set_clientdata); void *nfc_hci_get_clientdata(struct nfc_hci_dev *hdev) { return hdev->clientdata; } EXPORT_SYMBOL(nfc_hci_get_clientdata); void nfc_hci_driver_failure(struct nfc_hci_dev *hdev, int err) { nfc_hci_failure(hdev, err); } EXPORT_SYMBOL(nfc_hci_driver_failure); void nfc_hci_recv_frame(struct nfc_hci_dev *hdev, struct sk_buff *skb) { nfc_llc_rcv_from_drv(hdev->llc, skb); } EXPORT_SYMBOL(nfc_hci_recv_frame); static int __init nfc_hci_init(void) { return nfc_llc_init(); } static void __exit nfc_hci_exit(void) { nfc_llc_exit(); } subsys_initcall(nfc_hci_init); module_exit(nfc_hci_exit); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("NFC HCI Core");
linux-master
net/nfc/hci/core.c
// SPDX-License-Identifier: GPL-2.0-only /* * Link Layer Control manager * * Copyright (C) 2012 Intel Corporation. All rights reserved. */ #include <net/nfc/llc.h> #include "llc.h" static LIST_HEAD(llc_engines); int __init nfc_llc_init(void) { int r; r = nfc_llc_nop_register(); if (r) goto exit; r = nfc_llc_shdlc_register(); if (r) goto exit; return 0; exit: nfc_llc_exit(); return r; } void nfc_llc_exit(void) { struct nfc_llc_engine *llc_engine, *n; list_for_each_entry_safe(llc_engine, n, &llc_engines, entry) { list_del(&llc_engine->entry); kfree(llc_engine->name); kfree(llc_engine); } } int nfc_llc_register(const char *name, const struct nfc_llc_ops *ops) { struct nfc_llc_engine *llc_engine; llc_engine = kzalloc(sizeof(struct nfc_llc_engine), GFP_KERNEL); if (llc_engine == NULL) return -ENOMEM; llc_engine->name = kstrdup(name, GFP_KERNEL); if (llc_engine->name == NULL) { kfree(llc_engine); return -ENOMEM; } llc_engine->ops = ops; INIT_LIST_HEAD(&llc_engine->entry); list_add_tail(&llc_engine->entry, &llc_engines); return 0; } static struct nfc_llc_engine *nfc_llc_name_to_engine(const char *name) { struct nfc_llc_engine *llc_engine; list_for_each_entry(llc_engine, &llc_engines, entry) { if (strcmp(llc_engine->name, name) == 0) return llc_engine; } return NULL; } void nfc_llc_unregister(const char *name) { struct nfc_llc_engine *llc_engine; llc_engine = nfc_llc_name_to_engine(name); if (llc_engine == NULL) return; list_del(&llc_engine->entry); kfree(llc_engine->name); kfree(llc_engine); } struct nfc_llc *nfc_llc_allocate(const char *name, struct nfc_hci_dev *hdev, xmit_to_drv_t xmit_to_drv, rcv_to_hci_t rcv_to_hci, int tx_headroom, int tx_tailroom, llc_failure_t llc_failure) { struct nfc_llc_engine *llc_engine; struct nfc_llc *llc; llc_engine = nfc_llc_name_to_engine(name); if (llc_engine == NULL) return NULL; llc = kzalloc(sizeof(struct nfc_llc), GFP_KERNEL); if (llc == NULL) return NULL; llc->data = llc_engine->ops->init(hdev, xmit_to_drv, rcv_to_hci, tx_headroom, tx_tailroom, &llc->rx_headroom, &llc->rx_tailroom, llc_failure); if (llc->data == NULL) { kfree(llc); return NULL; } llc->ops = llc_engine->ops; return llc; } void nfc_llc_free(struct nfc_llc *llc) { llc->ops->deinit(llc); kfree(llc); } int nfc_llc_start(struct nfc_llc *llc) { return llc->ops->start(llc); } EXPORT_SYMBOL(nfc_llc_start); int nfc_llc_stop(struct nfc_llc *llc) { return llc->ops->stop(llc); } EXPORT_SYMBOL(nfc_llc_stop); void nfc_llc_rcv_from_drv(struct nfc_llc *llc, struct sk_buff *skb) { llc->ops->rcv_from_drv(llc, skb); } int nfc_llc_xmit_from_hci(struct nfc_llc *llc, struct sk_buff *skb) { return llc->ops->xmit_from_hci(llc, skb); } void *nfc_llc_get_data(struct nfc_llc *llc) { return llc->data; }
linux-master
net/nfc/hci/llc.c
// SPDX-License-Identifier: GPL-2.0-only /* * The NFC Controller Interface is the communication protocol between an * NFC Controller (NFCC) and a Device Host (DH). * * Copyright (C) 2011 Texas Instruments, Inc. * * Written by Ilan Elias <[email protected]> * * Acknowledgements: * This file is based on lib.c, which was written * by Maxim Krasnyansky. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/errno.h> #include <net/nfc/nci.h> #include <net/nfc/nci_core.h> /* NCI status codes to Unix errno mapping */ int nci_to_errno(__u8 code) { switch (code) { case NCI_STATUS_OK: return 0; case NCI_STATUS_REJECTED: return -EBUSY; case NCI_STATUS_RF_FRAME_CORRUPTED: return -EBADMSG; case NCI_STATUS_NOT_INITIALIZED: return -EHOSTDOWN; case NCI_STATUS_SYNTAX_ERROR: case NCI_STATUS_SEMANTIC_ERROR: case NCI_STATUS_INVALID_PARAM: case NCI_STATUS_RF_PROTOCOL_ERROR: case NCI_STATUS_NFCEE_PROTOCOL_ERROR: return -EPROTO; case NCI_STATUS_UNKNOWN_GID: case NCI_STATUS_UNKNOWN_OID: return -EBADRQC; case NCI_STATUS_MESSAGE_SIZE_EXCEEDED: return -EMSGSIZE; case NCI_STATUS_DISCOVERY_ALREADY_STARTED: return -EALREADY; case NCI_STATUS_DISCOVERY_TARGET_ACTIVATION_FAILED: case NCI_STATUS_NFCEE_INTERFACE_ACTIVATION_FAILED: return -ECONNREFUSED; case NCI_STATUS_RF_TRANSMISSION_ERROR: case NCI_STATUS_NFCEE_TRANSMISSION_ERROR: return -ECOMM; case NCI_STATUS_RF_TIMEOUT_ERROR: case NCI_STATUS_NFCEE_TIMEOUT_ERROR: return -ETIMEDOUT; case NCI_STATUS_FAILED: default: return -ENOSYS; } } EXPORT_SYMBOL(nci_to_errno);
linux-master
net/nfc/nci/lib.c
// SPDX-License-Identifier: GPL-2.0-only /* * The NFC Controller Interface is the communication protocol between an * NFC Controller (NFCC) and a Device Host (DH). * * Copyright (C) 2011 Texas Instruments, Inc. * * Written by Ilan Elias <[email protected]> * * Acknowledgements: * This file is based on hci_event.c, which was written * by Maxim Krasnyansky. */ #define pr_fmt(fmt) KBUILD_MODNAME ": %s: " fmt, __func__ #include <linux/types.h> #include <linux/interrupt.h> #include <linux/bitops.h> #include <linux/skbuff.h> #include "../nfc.h" #include <net/nfc/nci.h> #include <net/nfc/nci_core.h> /* Handle NCI Response packets */ static void nci_core_reset_rsp_packet(struct nci_dev *ndev, const struct sk_buff *skb) { const struct nci_core_reset_rsp *rsp = (void *)skb->data; pr_debug("status 0x%x\n", rsp->status); /* Handle NCI 1.x ver */ if (skb->len != 1) { if (rsp->status == NCI_STATUS_OK) { ndev->nci_ver = rsp->nci_ver; pr_debug("nci_ver 0x%x, config_status 0x%x\n", rsp->nci_ver, rsp->config_status); } nci_req_complete(ndev, rsp->status); } } static u8 nci_core_init_rsp_packet_v1(struct nci_dev *ndev, const struct sk_buff *skb) { const struct nci_core_init_rsp_1 *rsp_1 = (void *)skb->data; const struct nci_core_init_rsp_2 *rsp_2; pr_debug("status 0x%x\n", rsp_1->status); if (rsp_1->status != NCI_STATUS_OK) return rsp_1->status; ndev->nfcc_features = __le32_to_cpu(rsp_1->nfcc_features); ndev->num_supported_rf_interfaces = rsp_1->num_supported_rf_interfaces; ndev->num_supported_rf_interfaces = min((int)ndev->num_supported_rf_interfaces, NCI_MAX_SUPPORTED_RF_INTERFACES); memcpy(ndev->supported_rf_interfaces, rsp_1->supported_rf_interfaces, ndev->num_supported_rf_interfaces); rsp_2 = (void *) (skb->data + 6 + rsp_1->num_supported_rf_interfaces); ndev->max_logical_connections = rsp_2->max_logical_connections; ndev->max_routing_table_size = __le16_to_cpu(rsp_2->max_routing_table_size); ndev->max_ctrl_pkt_payload_len = rsp_2->max_ctrl_pkt_payload_len; ndev->max_size_for_large_params = __le16_to_cpu(rsp_2->max_size_for_large_params); ndev->manufact_id = rsp_2->manufact_id; ndev->manufact_specific_info = __le32_to_cpu(rsp_2->manufact_specific_info); return NCI_STATUS_OK; } static u8 nci_core_init_rsp_packet_v2(struct nci_dev *ndev, const struct sk_buff *skb) { const struct nci_core_init_rsp_nci_ver2 *rsp = (void *)skb->data; const u8 *supported_rf_interface = rsp->supported_rf_interfaces; u8 rf_interface_idx = 0; u8 rf_extension_cnt = 0; pr_debug("status %x\n", rsp->status); if (rsp->status != NCI_STATUS_OK) return rsp->status; ndev->nfcc_features = __le32_to_cpu(rsp->nfcc_features); ndev->num_supported_rf_interfaces = rsp->num_supported_rf_interfaces; ndev->num_supported_rf_interfaces = min((int)ndev->num_supported_rf_interfaces, NCI_MAX_SUPPORTED_RF_INTERFACES); while (rf_interface_idx < ndev->num_supported_rf_interfaces) { ndev->supported_rf_interfaces[rf_interface_idx++] = *supported_rf_interface++; /* skip rf extension parameters */ rf_extension_cnt = *supported_rf_interface++; supported_rf_interface += rf_extension_cnt; } ndev->max_logical_connections = rsp->max_logical_connections; ndev->max_routing_table_size = __le16_to_cpu(rsp->max_routing_table_size); ndev->max_ctrl_pkt_payload_len = rsp->max_ctrl_pkt_payload_len; ndev->max_size_for_large_params = NCI_MAX_LARGE_PARAMS_NCI_v2; return NCI_STATUS_OK; } static void nci_core_init_rsp_packet(struct nci_dev *ndev, const struct sk_buff *skb) { u8 status = 0; if (!(ndev->nci_ver & NCI_VER_2_MASK)) status = nci_core_init_rsp_packet_v1(ndev, skb); else status = nci_core_init_rsp_packet_v2(ndev, skb); if (status != NCI_STATUS_OK) goto exit; pr_debug("nfcc_features 0x%x\n", ndev->nfcc_features); pr_debug("num_supported_rf_interfaces %d\n", ndev->num_supported_rf_interfaces); pr_debug("supported_rf_interfaces[0] 0x%x\n", ndev->supported_rf_interfaces[0]); pr_debug("supported_rf_interfaces[1] 0x%x\n", ndev->supported_rf_interfaces[1]); pr_debug("supported_rf_interfaces[2] 0x%x\n", ndev->supported_rf_interfaces[2]); pr_debug("supported_rf_interfaces[3] 0x%x\n", ndev->supported_rf_interfaces[3]); pr_debug("max_logical_connections %d\n", ndev->max_logical_connections); pr_debug("max_routing_table_size %d\n", ndev->max_routing_table_size); pr_debug("max_ctrl_pkt_payload_len %d\n", ndev->max_ctrl_pkt_payload_len); pr_debug("max_size_for_large_params %d\n", ndev->max_size_for_large_params); pr_debug("manufact_id 0x%x\n", ndev->manufact_id); pr_debug("manufact_specific_info 0x%x\n", ndev->manufact_specific_info); exit: nci_req_complete(ndev, status); } static void nci_core_set_config_rsp_packet(struct nci_dev *ndev, const struct sk_buff *skb) { const struct nci_core_set_config_rsp *rsp = (void *)skb->data; pr_debug("status 0x%x\n", rsp->status); nci_req_complete(ndev, rsp->status); } static void nci_rf_disc_map_rsp_packet(struct nci_dev *ndev, const struct sk_buff *skb) { __u8 status = skb->data[0]; pr_debug("status 0x%x\n", status); nci_req_complete(ndev, status); } static void nci_rf_disc_rsp_packet(struct nci_dev *ndev, const struct sk_buff *skb) { struct nci_conn_info *conn_info; __u8 status = skb->data[0]; pr_debug("status 0x%x\n", status); if (status == NCI_STATUS_OK) { atomic_set(&ndev->state, NCI_DISCOVERY); conn_info = ndev->rf_conn_info; if (!conn_info) { conn_info = devm_kzalloc(&ndev->nfc_dev->dev, sizeof(struct nci_conn_info), GFP_KERNEL); if (!conn_info) { status = NCI_STATUS_REJECTED; goto exit; } conn_info->conn_id = NCI_STATIC_RF_CONN_ID; INIT_LIST_HEAD(&conn_info->list); list_add(&conn_info->list, &ndev->conn_info_list); ndev->rf_conn_info = conn_info; } } exit: nci_req_complete(ndev, status); } static void nci_rf_disc_select_rsp_packet(struct nci_dev *ndev, const struct sk_buff *skb) { __u8 status = skb->data[0]; pr_debug("status 0x%x\n", status); /* Complete the request on intf_activated_ntf or generic_error_ntf */ if (status != NCI_STATUS_OK) nci_req_complete(ndev, status); } static void nci_rf_deactivate_rsp_packet(struct nci_dev *ndev, const struct sk_buff *skb) { __u8 status = skb->data[0]; pr_debug("status 0x%x\n", status); /* If target was active, complete the request only in deactivate_ntf */ if ((status != NCI_STATUS_OK) || (atomic_read(&ndev->state) != NCI_POLL_ACTIVE)) { nci_clear_target_list(ndev); atomic_set(&ndev->state, NCI_IDLE); nci_req_complete(ndev, status); } } static void nci_nfcee_discover_rsp_packet(struct nci_dev *ndev, const struct sk_buff *skb) { const struct nci_nfcee_discover_rsp *discover_rsp; if (skb->len != 2) { nci_req_complete(ndev, NCI_STATUS_NFCEE_PROTOCOL_ERROR); return; } discover_rsp = (struct nci_nfcee_discover_rsp *)skb->data; if (discover_rsp->status != NCI_STATUS_OK || discover_rsp->num_nfcee == 0) nci_req_complete(ndev, discover_rsp->status); } static void nci_nfcee_mode_set_rsp_packet(struct nci_dev *ndev, const struct sk_buff *skb) { __u8 status = skb->data[0]; pr_debug("status 0x%x\n", status); nci_req_complete(ndev, status); } static void nci_core_conn_create_rsp_packet(struct nci_dev *ndev, const struct sk_buff *skb) { __u8 status = skb->data[0]; struct nci_conn_info *conn_info = NULL; const struct nci_core_conn_create_rsp *rsp; pr_debug("status 0x%x\n", status); if (status == NCI_STATUS_OK) { rsp = (struct nci_core_conn_create_rsp *)skb->data; conn_info = devm_kzalloc(&ndev->nfc_dev->dev, sizeof(*conn_info), GFP_KERNEL); if (!conn_info) { status = NCI_STATUS_REJECTED; goto exit; } conn_info->dest_params = devm_kzalloc(&ndev->nfc_dev->dev, sizeof(struct dest_spec_params), GFP_KERNEL); if (!conn_info->dest_params) { status = NCI_STATUS_REJECTED; goto free_conn_info; } conn_info->dest_type = ndev->cur_dest_type; conn_info->dest_params->id = ndev->cur_params.id; conn_info->dest_params->protocol = ndev->cur_params.protocol; conn_info->conn_id = rsp->conn_id; /* Note: data_exchange_cb and data_exchange_cb_context need to * be specify out of nci_core_conn_create_rsp_packet */ INIT_LIST_HEAD(&conn_info->list); list_add(&conn_info->list, &ndev->conn_info_list); if (ndev->cur_params.id == ndev->hci_dev->nfcee_id) ndev->hci_dev->conn_info = conn_info; conn_info->conn_id = rsp->conn_id; conn_info->max_pkt_payload_len = rsp->max_ctrl_pkt_payload_len; atomic_set(&conn_info->credits_cnt, rsp->credits_cnt); } free_conn_info: if (status == NCI_STATUS_REJECTED) devm_kfree(&ndev->nfc_dev->dev, conn_info); exit: nci_req_complete(ndev, status); } static void nci_core_conn_close_rsp_packet(struct nci_dev *ndev, const struct sk_buff *skb) { struct nci_conn_info *conn_info; __u8 status = skb->data[0]; pr_debug("status 0x%x\n", status); if (status == NCI_STATUS_OK) { conn_info = nci_get_conn_info_by_conn_id(ndev, ndev->cur_conn_id); if (conn_info) { list_del(&conn_info->list); if (conn_info == ndev->rf_conn_info) ndev->rf_conn_info = NULL; devm_kfree(&ndev->nfc_dev->dev, conn_info); } } nci_req_complete(ndev, status); } void nci_rsp_packet(struct nci_dev *ndev, struct sk_buff *skb) { __u16 rsp_opcode = nci_opcode(skb->data); /* we got a rsp, stop the cmd timer */ del_timer(&ndev->cmd_timer); pr_debug("NCI RX: MT=rsp, PBF=%d, GID=0x%x, OID=0x%x, plen=%d\n", nci_pbf(skb->data), nci_opcode_gid(rsp_opcode), nci_opcode_oid(rsp_opcode), nci_plen(skb->data)); /* strip the nci control header */ skb_pull(skb, NCI_CTRL_HDR_SIZE); if (nci_opcode_gid(rsp_opcode) == NCI_GID_PROPRIETARY) { if (nci_prop_rsp_packet(ndev, rsp_opcode, skb) == -ENOTSUPP) { pr_err("unsupported rsp opcode 0x%x\n", rsp_opcode); } goto end; } switch (rsp_opcode) { case NCI_OP_CORE_RESET_RSP: nci_core_reset_rsp_packet(ndev, skb); break; case NCI_OP_CORE_INIT_RSP: nci_core_init_rsp_packet(ndev, skb); break; case NCI_OP_CORE_SET_CONFIG_RSP: nci_core_set_config_rsp_packet(ndev, skb); break; case NCI_OP_CORE_CONN_CREATE_RSP: nci_core_conn_create_rsp_packet(ndev, skb); break; case NCI_OP_CORE_CONN_CLOSE_RSP: nci_core_conn_close_rsp_packet(ndev, skb); break; case NCI_OP_RF_DISCOVER_MAP_RSP: nci_rf_disc_map_rsp_packet(ndev, skb); break; case NCI_OP_RF_DISCOVER_RSP: nci_rf_disc_rsp_packet(ndev, skb); break; case NCI_OP_RF_DISCOVER_SELECT_RSP: nci_rf_disc_select_rsp_packet(ndev, skb); break; case NCI_OP_RF_DEACTIVATE_RSP: nci_rf_deactivate_rsp_packet(ndev, skb); break; case NCI_OP_NFCEE_DISCOVER_RSP: nci_nfcee_discover_rsp_packet(ndev, skb); break; case NCI_OP_NFCEE_MODE_SET_RSP: nci_nfcee_mode_set_rsp_packet(ndev, skb); break; default: pr_err("unknown rsp opcode 0x%x\n", rsp_opcode); break; } nci_core_rsp_packet(ndev, rsp_opcode, skb); end: kfree_skb(skb); /* trigger the next cmd */ atomic_set(&ndev->cmd_cnt, 1); if (!skb_queue_empty(&ndev->cmd_q)) queue_work(ndev->cmd_wq, &ndev->cmd_work); }
linux-master
net/nfc/nci/rsp.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2013 Intel Corporation. All rights reserved. */ #define pr_fmt(fmt) "nci_spi: %s: " fmt, __func__ #include <linux/module.h> #include <linux/export.h> #include <linux/spi/spi.h> #include <linux/crc-ccitt.h> #include <net/nfc/nci_core.h> #define NCI_SPI_ACK_SHIFT 6 #define NCI_SPI_MSB_PAYLOAD_MASK 0x3F #define NCI_SPI_SEND_TIMEOUT (NCI_CMD_TIMEOUT > NCI_DATA_TIMEOUT ? \ NCI_CMD_TIMEOUT : NCI_DATA_TIMEOUT) #define NCI_SPI_DIRECT_WRITE 0x01 #define NCI_SPI_DIRECT_READ 0x02 #define ACKNOWLEDGE_NONE 0 #define ACKNOWLEDGE_ACK 1 #define ACKNOWLEDGE_NACK 2 #define CRC_INIT 0xFFFF static int __nci_spi_send(struct nci_spi *nspi, const struct sk_buff *skb, int cs_change) { struct spi_message m; struct spi_transfer t; memset(&t, 0, sizeof(struct spi_transfer)); /* a NULL skb means we just want the SPI chip select line to raise */ if (skb) { t.tx_buf = skb->data; t.len = skb->len; } else { /* still set tx_buf non NULL to make the driver happy */ t.tx_buf = &t; t.len = 0; } t.cs_change = cs_change; t.delay.value = nspi->xfer_udelay; t.delay.unit = SPI_DELAY_UNIT_USECS; t.speed_hz = nspi->xfer_speed_hz; spi_message_init(&m); spi_message_add_tail(&t, &m); return spi_sync(nspi->spi, &m); } int nci_spi_send(struct nci_spi *nspi, struct completion *write_handshake_completion, struct sk_buff *skb) { unsigned int payload_len = skb->len; unsigned char *hdr; int ret; long completion_rc; /* add the NCI SPI header to the start of the buffer */ hdr = skb_push(skb, NCI_SPI_HDR_LEN); hdr[0] = NCI_SPI_DIRECT_WRITE; hdr[1] = nspi->acknowledge_mode; hdr[2] = payload_len >> 8; hdr[3] = payload_len & 0xFF; if (nspi->acknowledge_mode == NCI_SPI_CRC_ENABLED) { u16 crc; crc = crc_ccitt(CRC_INIT, skb->data, skb->len); skb_put_u8(skb, crc >> 8); skb_put_u8(skb, crc & 0xFF); } if (write_handshake_completion) { /* Trick SPI driver to raise chip select */ ret = __nci_spi_send(nspi, NULL, 1); if (ret) goto done; /* wait for NFC chip hardware handshake to complete */ if (wait_for_completion_timeout(write_handshake_completion, msecs_to_jiffies(1000)) == 0) { ret = -ETIME; goto done; } } ret = __nci_spi_send(nspi, skb, 0); if (ret != 0 || nspi->acknowledge_mode == NCI_SPI_CRC_DISABLED) goto done; reinit_completion(&nspi->req_completion); completion_rc = wait_for_completion_interruptible_timeout( &nspi->req_completion, NCI_SPI_SEND_TIMEOUT); if (completion_rc <= 0 || nspi->req_result == ACKNOWLEDGE_NACK) ret = -EIO; done: kfree_skb(skb); return ret; } EXPORT_SYMBOL_GPL(nci_spi_send); /* ---- Interface to NCI SPI drivers ---- */ /** * nci_spi_allocate_spi - allocate a new nci spi * * @spi: SPI device * @acknowledge_mode: Acknowledge mode used by the NFC device * @delay: delay between transactions in us * @ndev: nci dev to send incoming nci frames to */ struct nci_spi *nci_spi_allocate_spi(struct spi_device *spi, u8 acknowledge_mode, unsigned int delay, struct nci_dev *ndev) { struct nci_spi *nspi; nspi = devm_kzalloc(&spi->dev, sizeof(struct nci_spi), GFP_KERNEL); if (!nspi) return NULL; nspi->acknowledge_mode = acknowledge_mode; nspi->xfer_udelay = delay; /* Use controller max SPI speed by default */ nspi->xfer_speed_hz = 0; nspi->spi = spi; nspi->ndev = ndev; init_completion(&nspi->req_completion); return nspi; } EXPORT_SYMBOL_GPL(nci_spi_allocate_spi); static int send_acknowledge(struct nci_spi *nspi, u8 acknowledge) { struct sk_buff *skb; unsigned char *hdr; u16 crc; int ret; skb = nci_skb_alloc(nspi->ndev, 0, GFP_KERNEL); /* add the NCI SPI header to the start of the buffer */ hdr = skb_push(skb, NCI_SPI_HDR_LEN); hdr[0] = NCI_SPI_DIRECT_WRITE; hdr[1] = NCI_SPI_CRC_ENABLED; hdr[2] = acknowledge << NCI_SPI_ACK_SHIFT; hdr[3] = 0; crc = crc_ccitt(CRC_INIT, skb->data, skb->len); skb_put_u8(skb, crc >> 8); skb_put_u8(skb, crc & 0xFF); ret = __nci_spi_send(nspi, skb, 0); kfree_skb(skb); return ret; } static struct sk_buff *__nci_spi_read(struct nci_spi *nspi) { struct sk_buff *skb; struct spi_message m; unsigned char req[2], resp_hdr[2]; struct spi_transfer tx, rx; unsigned short rx_len = 0; int ret; spi_message_init(&m); memset(&tx, 0, sizeof(struct spi_transfer)); req[0] = NCI_SPI_DIRECT_READ; req[1] = nspi->acknowledge_mode; tx.tx_buf = req; tx.len = 2; tx.cs_change = 0; tx.speed_hz = nspi->xfer_speed_hz; spi_message_add_tail(&tx, &m); memset(&rx, 0, sizeof(struct spi_transfer)); rx.rx_buf = resp_hdr; rx.len = 2; rx.cs_change = 1; rx.speed_hz = nspi->xfer_speed_hz; spi_message_add_tail(&rx, &m); ret = spi_sync(nspi->spi, &m); if (ret) return NULL; if (nspi->acknowledge_mode == NCI_SPI_CRC_ENABLED) rx_len = ((resp_hdr[0] & NCI_SPI_MSB_PAYLOAD_MASK) << 8) + resp_hdr[1] + NCI_SPI_CRC_LEN; else rx_len = (resp_hdr[0] << 8) | resp_hdr[1]; skb = nci_skb_alloc(nspi->ndev, rx_len, GFP_KERNEL); if (!skb) return NULL; spi_message_init(&m); memset(&rx, 0, sizeof(struct spi_transfer)); rx.rx_buf = skb_put(skb, rx_len); rx.len = rx_len; rx.cs_change = 0; rx.delay.value = nspi->xfer_udelay; rx.delay.unit = SPI_DELAY_UNIT_USECS; rx.speed_hz = nspi->xfer_speed_hz; spi_message_add_tail(&rx, &m); ret = spi_sync(nspi->spi, &m); if (ret) goto receive_error; if (nspi->acknowledge_mode == NCI_SPI_CRC_ENABLED) { *(u8 *)skb_push(skb, 1) = resp_hdr[1]; *(u8 *)skb_push(skb, 1) = resp_hdr[0]; } return skb; receive_error: kfree_skb(skb); return NULL; } static int nci_spi_check_crc(struct sk_buff *skb) { u16 crc_data = (skb->data[skb->len - 2] << 8) | skb->data[skb->len - 1]; int ret; ret = (crc_ccitt(CRC_INIT, skb->data, skb->len - NCI_SPI_CRC_LEN) == crc_data); skb_trim(skb, skb->len - NCI_SPI_CRC_LEN); return ret; } static u8 nci_spi_get_ack(struct sk_buff *skb) { u8 ret; ret = skb->data[0] >> NCI_SPI_ACK_SHIFT; /* Remove NFCC part of the header: ACK, NACK and MSB payload len */ skb_pull(skb, 2); return ret; } /** * nci_spi_read - read frame from NCI SPI drivers * * @nspi: The nci spi * Context: can sleep * * This call may only be used from a context that may sleep. The sleep * is non-interruptible, and has no timeout. * * It returns an allocated skb containing the frame on success, or NULL. */ struct sk_buff *nci_spi_read(struct nci_spi *nspi) { struct sk_buff *skb; /* Retrieve frame from SPI */ skb = __nci_spi_read(nspi); if (!skb) goto done; if (nspi->acknowledge_mode == NCI_SPI_CRC_ENABLED) { if (!nci_spi_check_crc(skb)) { send_acknowledge(nspi, ACKNOWLEDGE_NACK); goto done; } /* In case of acknowledged mode: if ACK or NACK received, * unblock completion of latest frame sent. */ nspi->req_result = nci_spi_get_ack(skb); if (nspi->req_result) complete(&nspi->req_completion); } /* If there is no payload (ACK/NACK only frame), * free the socket buffer */ if (!skb->len) { kfree_skb(skb); skb = NULL; goto done; } if (nspi->acknowledge_mode == NCI_SPI_CRC_ENABLED) send_acknowledge(nspi, ACKNOWLEDGE_ACK); done: return skb; } EXPORT_SYMBOL_GPL(nci_spi_read); MODULE_LICENSE("GPL");
linux-master
net/nfc/nci/spi.c
// SPDX-License-Identifier: GPL-2.0-only /* * The NFC Controller Interface is the communication protocol between an * NFC Controller (NFCC) and a Device Host (DH). * * Copyright (C) 2014 Marvell International Ltd. * Copyright (C) 2011 Texas Instruments, Inc. * * Written by Ilan Elias <[email protected]> * * Acknowledgements: * This file is based on hci_event.c, which was written * by Maxim Krasnyansky. */ #define pr_fmt(fmt) KBUILD_MODNAME ": %s: " fmt, __func__ #include <linux/types.h> #include <linux/interrupt.h> #include <linux/bitops.h> #include <linux/skbuff.h> #include "../nfc.h" #include <net/nfc/nci.h> #include <net/nfc/nci_core.h> #include <linux/nfc.h> /* Handle NCI Notification packets */ static void nci_core_reset_ntf_packet(struct nci_dev *ndev, const struct sk_buff *skb) { /* Handle NCI 2.x core reset notification */ const struct nci_core_reset_ntf *ntf = (void *)skb->data; ndev->nci_ver = ntf->nci_ver; pr_debug("nci_ver 0x%x, config_status 0x%x\n", ntf->nci_ver, ntf->config_status); ndev->manufact_id = ntf->manufact_id; ndev->manufact_specific_info = __le32_to_cpu(ntf->manufact_specific_info); nci_req_complete(ndev, NCI_STATUS_OK); } static void nci_core_conn_credits_ntf_packet(struct nci_dev *ndev, struct sk_buff *skb) { struct nci_core_conn_credit_ntf *ntf = (void *) skb->data; struct nci_conn_info *conn_info; int i; pr_debug("num_entries %d\n", ntf->num_entries); if (ntf->num_entries > NCI_MAX_NUM_CONN) ntf->num_entries = NCI_MAX_NUM_CONN; /* update the credits */ for (i = 0; i < ntf->num_entries; i++) { ntf->conn_entries[i].conn_id = nci_conn_id(&ntf->conn_entries[i].conn_id); pr_debug("entry[%d]: conn_id %d, credits %d\n", i, ntf->conn_entries[i].conn_id, ntf->conn_entries[i].credits); conn_info = nci_get_conn_info_by_conn_id(ndev, ntf->conn_entries[i].conn_id); if (!conn_info) return; atomic_add(ntf->conn_entries[i].credits, &conn_info->credits_cnt); } /* trigger the next tx */ if (!skb_queue_empty(&ndev->tx_q)) queue_work(ndev->tx_wq, &ndev->tx_work); } static void nci_core_generic_error_ntf_packet(struct nci_dev *ndev, const struct sk_buff *skb) { __u8 status = skb->data[0]; pr_debug("status 0x%x\n", status); if (atomic_read(&ndev->state) == NCI_W4_HOST_SELECT) { /* Activation failed, so complete the request (the state remains the same) */ nci_req_complete(ndev, status); } } static void nci_core_conn_intf_error_ntf_packet(struct nci_dev *ndev, struct sk_buff *skb) { struct nci_core_intf_error_ntf *ntf = (void *) skb->data; ntf->conn_id = nci_conn_id(&ntf->conn_id); pr_debug("status 0x%x, conn_id %d\n", ntf->status, ntf->conn_id); /* complete the data exchange transaction, if exists */ if (test_bit(NCI_DATA_EXCHANGE, &ndev->flags)) nci_data_exchange_complete(ndev, NULL, ntf->conn_id, -EIO); } static const __u8 * nci_extract_rf_params_nfca_passive_poll(struct nci_dev *ndev, struct rf_tech_specific_params_nfca_poll *nfca_poll, const __u8 *data) { nfca_poll->sens_res = __le16_to_cpu(*((__le16 *)data)); data += 2; nfca_poll->nfcid1_len = min_t(__u8, *data++, NFC_NFCID1_MAXSIZE); pr_debug("sens_res 0x%x, nfcid1_len %d\n", nfca_poll->sens_res, nfca_poll->nfcid1_len); memcpy(nfca_poll->nfcid1, data, nfca_poll->nfcid1_len); data += nfca_poll->nfcid1_len; nfca_poll->sel_res_len = *data++; if (nfca_poll->sel_res_len != 0) nfca_poll->sel_res = *data++; pr_debug("sel_res_len %d, sel_res 0x%x\n", nfca_poll->sel_res_len, nfca_poll->sel_res); return data; } static const __u8 * nci_extract_rf_params_nfcb_passive_poll(struct nci_dev *ndev, struct rf_tech_specific_params_nfcb_poll *nfcb_poll, const __u8 *data) { nfcb_poll->sensb_res_len = min_t(__u8, *data++, NFC_SENSB_RES_MAXSIZE); pr_debug("sensb_res_len %d\n", nfcb_poll->sensb_res_len); memcpy(nfcb_poll->sensb_res, data, nfcb_poll->sensb_res_len); data += nfcb_poll->sensb_res_len; return data; } static const __u8 * nci_extract_rf_params_nfcf_passive_poll(struct nci_dev *ndev, struct rf_tech_specific_params_nfcf_poll *nfcf_poll, const __u8 *data) { nfcf_poll->bit_rate = *data++; nfcf_poll->sensf_res_len = min_t(__u8, *data++, NFC_SENSF_RES_MAXSIZE); pr_debug("bit_rate %d, sensf_res_len %d\n", nfcf_poll->bit_rate, nfcf_poll->sensf_res_len); memcpy(nfcf_poll->sensf_res, data, nfcf_poll->sensf_res_len); data += nfcf_poll->sensf_res_len; return data; } static const __u8 * nci_extract_rf_params_nfcv_passive_poll(struct nci_dev *ndev, struct rf_tech_specific_params_nfcv_poll *nfcv_poll, const __u8 *data) { ++data; nfcv_poll->dsfid = *data++; memcpy(nfcv_poll->uid, data, NFC_ISO15693_UID_MAXSIZE); data += NFC_ISO15693_UID_MAXSIZE; return data; } static const __u8 * nci_extract_rf_params_nfcf_passive_listen(struct nci_dev *ndev, struct rf_tech_specific_params_nfcf_listen *nfcf_listen, const __u8 *data) { nfcf_listen->local_nfcid2_len = min_t(__u8, *data++, NFC_NFCID2_MAXSIZE); memcpy(nfcf_listen->local_nfcid2, data, nfcf_listen->local_nfcid2_len); data += nfcf_listen->local_nfcid2_len; return data; } static __u32 nci_get_prop_rf_protocol(struct nci_dev *ndev, __u8 rf_protocol) { if (ndev->ops->get_rfprotocol) return ndev->ops->get_rfprotocol(ndev, rf_protocol); return 0; } static int nci_add_new_protocol(struct nci_dev *ndev, struct nfc_target *target, __u8 rf_protocol, __u8 rf_tech_and_mode, const void *params) { const struct rf_tech_specific_params_nfca_poll *nfca_poll; const struct rf_tech_specific_params_nfcb_poll *nfcb_poll; const struct rf_tech_specific_params_nfcf_poll *nfcf_poll; const struct rf_tech_specific_params_nfcv_poll *nfcv_poll; __u32 protocol; if (rf_protocol == NCI_RF_PROTOCOL_T1T) protocol = NFC_PROTO_JEWEL_MASK; else if (rf_protocol == NCI_RF_PROTOCOL_T2T) protocol = NFC_PROTO_MIFARE_MASK; else if (rf_protocol == NCI_RF_PROTOCOL_ISO_DEP) if (rf_tech_and_mode == NCI_NFC_A_PASSIVE_POLL_MODE) protocol = NFC_PROTO_ISO14443_MASK; else protocol = NFC_PROTO_ISO14443_B_MASK; else if (rf_protocol == NCI_RF_PROTOCOL_T3T) protocol = NFC_PROTO_FELICA_MASK; else if (rf_protocol == NCI_RF_PROTOCOL_NFC_DEP) protocol = NFC_PROTO_NFC_DEP_MASK; else if (rf_protocol == NCI_RF_PROTOCOL_T5T) protocol = NFC_PROTO_ISO15693_MASK; else protocol = nci_get_prop_rf_protocol(ndev, rf_protocol); if (!(protocol & ndev->poll_prots)) { pr_err("the target found does not have the desired protocol\n"); return -EPROTO; } if (rf_tech_and_mode == NCI_NFC_A_PASSIVE_POLL_MODE) { nfca_poll = (struct rf_tech_specific_params_nfca_poll *)params; target->sens_res = nfca_poll->sens_res; target->sel_res = nfca_poll->sel_res; target->nfcid1_len = nfca_poll->nfcid1_len; if (target->nfcid1_len > ARRAY_SIZE(target->nfcid1)) return -EPROTO; if (target->nfcid1_len > 0) { memcpy(target->nfcid1, nfca_poll->nfcid1, target->nfcid1_len); } } else if (rf_tech_and_mode == NCI_NFC_B_PASSIVE_POLL_MODE) { nfcb_poll = (struct rf_tech_specific_params_nfcb_poll *)params; target->sensb_res_len = nfcb_poll->sensb_res_len; if (target->sensb_res_len > ARRAY_SIZE(target->sensb_res)) return -EPROTO; if (target->sensb_res_len > 0) { memcpy(target->sensb_res, nfcb_poll->sensb_res, target->sensb_res_len); } } else if (rf_tech_and_mode == NCI_NFC_F_PASSIVE_POLL_MODE) { nfcf_poll = (struct rf_tech_specific_params_nfcf_poll *)params; target->sensf_res_len = nfcf_poll->sensf_res_len; if (target->sensf_res_len > ARRAY_SIZE(target->sensf_res)) return -EPROTO; if (target->sensf_res_len > 0) { memcpy(target->sensf_res, nfcf_poll->sensf_res, target->sensf_res_len); } } else if (rf_tech_and_mode == NCI_NFC_V_PASSIVE_POLL_MODE) { nfcv_poll = (struct rf_tech_specific_params_nfcv_poll *)params; target->is_iso15693 = 1; target->iso15693_dsfid = nfcv_poll->dsfid; memcpy(target->iso15693_uid, nfcv_poll->uid, NFC_ISO15693_UID_MAXSIZE); } else { pr_err("unsupported rf_tech_and_mode 0x%x\n", rf_tech_and_mode); return -EPROTO; } target->supported_protocols |= protocol; pr_debug("protocol 0x%x\n", protocol); return 0; } static void nci_add_new_target(struct nci_dev *ndev, const struct nci_rf_discover_ntf *ntf) { struct nfc_target *target; int i, rc; for (i = 0; i < ndev->n_targets; i++) { target = &ndev->targets[i]; if (target->logical_idx == ntf->rf_discovery_id) { /* This target already exists, add the new protocol */ nci_add_new_protocol(ndev, target, ntf->rf_protocol, ntf->rf_tech_and_mode, &ntf->rf_tech_specific_params); return; } } /* This is a new target, check if we've enough room */ if (ndev->n_targets == NCI_MAX_DISCOVERED_TARGETS) { pr_debug("not enough room, ignoring new target...\n"); return; } target = &ndev->targets[ndev->n_targets]; rc = nci_add_new_protocol(ndev, target, ntf->rf_protocol, ntf->rf_tech_and_mode, &ntf->rf_tech_specific_params); if (!rc) { target->logical_idx = ntf->rf_discovery_id; ndev->n_targets++; pr_debug("logical idx %d, n_targets %d\n", target->logical_idx, ndev->n_targets); } } void nci_clear_target_list(struct nci_dev *ndev) { memset(ndev->targets, 0, (sizeof(struct nfc_target)*NCI_MAX_DISCOVERED_TARGETS)); ndev->n_targets = 0; } static void nci_rf_discover_ntf_packet(struct nci_dev *ndev, const struct sk_buff *skb) { struct nci_rf_discover_ntf ntf; const __u8 *data = skb->data; bool add_target = true; ntf.rf_discovery_id = *data++; ntf.rf_protocol = *data++; ntf.rf_tech_and_mode = *data++; ntf.rf_tech_specific_params_len = *data++; pr_debug("rf_discovery_id %d\n", ntf.rf_discovery_id); pr_debug("rf_protocol 0x%x\n", ntf.rf_protocol); pr_debug("rf_tech_and_mode 0x%x\n", ntf.rf_tech_and_mode); pr_debug("rf_tech_specific_params_len %d\n", ntf.rf_tech_specific_params_len); if (ntf.rf_tech_specific_params_len > 0) { switch (ntf.rf_tech_and_mode) { case NCI_NFC_A_PASSIVE_POLL_MODE: data = nci_extract_rf_params_nfca_passive_poll(ndev, &(ntf.rf_tech_specific_params.nfca_poll), data); break; case NCI_NFC_B_PASSIVE_POLL_MODE: data = nci_extract_rf_params_nfcb_passive_poll(ndev, &(ntf.rf_tech_specific_params.nfcb_poll), data); break; case NCI_NFC_F_PASSIVE_POLL_MODE: data = nci_extract_rf_params_nfcf_passive_poll(ndev, &(ntf.rf_tech_specific_params.nfcf_poll), data); break; case NCI_NFC_V_PASSIVE_POLL_MODE: data = nci_extract_rf_params_nfcv_passive_poll(ndev, &(ntf.rf_tech_specific_params.nfcv_poll), data); break; default: pr_err("unsupported rf_tech_and_mode 0x%x\n", ntf.rf_tech_and_mode); data += ntf.rf_tech_specific_params_len; add_target = false; } } ntf.ntf_type = *data++; pr_debug("ntf_type %d\n", ntf.ntf_type); if (add_target == true) nci_add_new_target(ndev, &ntf); if (ntf.ntf_type == NCI_DISCOVER_NTF_TYPE_MORE) { atomic_set(&ndev->state, NCI_W4_ALL_DISCOVERIES); } else { atomic_set(&ndev->state, NCI_W4_HOST_SELECT); nfc_targets_found(ndev->nfc_dev, ndev->targets, ndev->n_targets); } } static int nci_extract_activation_params_iso_dep(struct nci_dev *ndev, struct nci_rf_intf_activated_ntf *ntf, const __u8 *data) { struct activation_params_nfca_poll_iso_dep *nfca_poll; struct activation_params_nfcb_poll_iso_dep *nfcb_poll; switch (ntf->activation_rf_tech_and_mode) { case NCI_NFC_A_PASSIVE_POLL_MODE: nfca_poll = &ntf->activation_params.nfca_poll_iso_dep; nfca_poll->rats_res_len = min_t(__u8, *data++, 20); pr_debug("rats_res_len %d\n", nfca_poll->rats_res_len); if (nfca_poll->rats_res_len > 0) { memcpy(nfca_poll->rats_res, data, nfca_poll->rats_res_len); } break; case NCI_NFC_B_PASSIVE_POLL_MODE: nfcb_poll = &ntf->activation_params.nfcb_poll_iso_dep; nfcb_poll->attrib_res_len = min_t(__u8, *data++, 50); pr_debug("attrib_res_len %d\n", nfcb_poll->attrib_res_len); if (nfcb_poll->attrib_res_len > 0) { memcpy(nfcb_poll->attrib_res, data, nfcb_poll->attrib_res_len); } break; default: pr_err("unsupported activation_rf_tech_and_mode 0x%x\n", ntf->activation_rf_tech_and_mode); return NCI_STATUS_RF_PROTOCOL_ERROR; } return NCI_STATUS_OK; } static int nci_extract_activation_params_nfc_dep(struct nci_dev *ndev, struct nci_rf_intf_activated_ntf *ntf, const __u8 *data) { struct activation_params_poll_nfc_dep *poll; struct activation_params_listen_nfc_dep *listen; switch (ntf->activation_rf_tech_and_mode) { case NCI_NFC_A_PASSIVE_POLL_MODE: case NCI_NFC_F_PASSIVE_POLL_MODE: poll = &ntf->activation_params.poll_nfc_dep; poll->atr_res_len = min_t(__u8, *data++, NFC_ATR_RES_MAXSIZE - 2); pr_debug("atr_res_len %d\n", poll->atr_res_len); if (poll->atr_res_len > 0) memcpy(poll->atr_res, data, poll->atr_res_len); break; case NCI_NFC_A_PASSIVE_LISTEN_MODE: case NCI_NFC_F_PASSIVE_LISTEN_MODE: listen = &ntf->activation_params.listen_nfc_dep; listen->atr_req_len = min_t(__u8, *data++, NFC_ATR_REQ_MAXSIZE - 2); pr_debug("atr_req_len %d\n", listen->atr_req_len); if (listen->atr_req_len > 0) memcpy(listen->atr_req, data, listen->atr_req_len); break; default: pr_err("unsupported activation_rf_tech_and_mode 0x%x\n", ntf->activation_rf_tech_and_mode); return NCI_STATUS_RF_PROTOCOL_ERROR; } return NCI_STATUS_OK; } static void nci_target_auto_activated(struct nci_dev *ndev, const struct nci_rf_intf_activated_ntf *ntf) { struct nfc_target *target; int rc; target = &ndev->targets[ndev->n_targets]; rc = nci_add_new_protocol(ndev, target, ntf->rf_protocol, ntf->activation_rf_tech_and_mode, &ntf->rf_tech_specific_params); if (rc) return; target->logical_idx = ntf->rf_discovery_id; ndev->n_targets++; pr_debug("logical idx %d, n_targets %d\n", target->logical_idx, ndev->n_targets); nfc_targets_found(ndev->nfc_dev, ndev->targets, ndev->n_targets); } static int nci_store_general_bytes_nfc_dep(struct nci_dev *ndev, const struct nci_rf_intf_activated_ntf *ntf) { ndev->remote_gb_len = 0; if (ntf->activation_params_len <= 0) return NCI_STATUS_OK; switch (ntf->activation_rf_tech_and_mode) { case NCI_NFC_A_PASSIVE_POLL_MODE: case NCI_NFC_F_PASSIVE_POLL_MODE: ndev->remote_gb_len = min_t(__u8, (ntf->activation_params.poll_nfc_dep.atr_res_len - NFC_ATR_RES_GT_OFFSET), NFC_ATR_RES_GB_MAXSIZE); memcpy(ndev->remote_gb, (ntf->activation_params.poll_nfc_dep.atr_res + NFC_ATR_RES_GT_OFFSET), ndev->remote_gb_len); break; case NCI_NFC_A_PASSIVE_LISTEN_MODE: case NCI_NFC_F_PASSIVE_LISTEN_MODE: ndev->remote_gb_len = min_t(__u8, (ntf->activation_params.listen_nfc_dep.atr_req_len - NFC_ATR_REQ_GT_OFFSET), NFC_ATR_REQ_GB_MAXSIZE); memcpy(ndev->remote_gb, (ntf->activation_params.listen_nfc_dep.atr_req + NFC_ATR_REQ_GT_OFFSET), ndev->remote_gb_len); break; default: pr_err("unsupported activation_rf_tech_and_mode 0x%x\n", ntf->activation_rf_tech_and_mode); return NCI_STATUS_RF_PROTOCOL_ERROR; } return NCI_STATUS_OK; } static void nci_rf_intf_activated_ntf_packet(struct nci_dev *ndev, const struct sk_buff *skb) { struct nci_conn_info *conn_info; struct nci_rf_intf_activated_ntf ntf; const __u8 *data = skb->data; int err = NCI_STATUS_OK; ntf.rf_discovery_id = *data++; ntf.rf_interface = *data++; ntf.rf_protocol = *data++; ntf.activation_rf_tech_and_mode = *data++; ntf.max_data_pkt_payload_size = *data++; ntf.initial_num_credits = *data++; ntf.rf_tech_specific_params_len = *data++; pr_debug("rf_discovery_id %d\n", ntf.rf_discovery_id); pr_debug("rf_interface 0x%x\n", ntf.rf_interface); pr_debug("rf_protocol 0x%x\n", ntf.rf_protocol); pr_debug("activation_rf_tech_and_mode 0x%x\n", ntf.activation_rf_tech_and_mode); pr_debug("max_data_pkt_payload_size 0x%x\n", ntf.max_data_pkt_payload_size); pr_debug("initial_num_credits 0x%x\n", ntf.initial_num_credits); pr_debug("rf_tech_specific_params_len %d\n", ntf.rf_tech_specific_params_len); /* If this contains a value of 0x00 (NFCEE Direct RF * Interface) then all following parameters SHALL contain a * value of 0 and SHALL be ignored. */ if (ntf.rf_interface == NCI_RF_INTERFACE_NFCEE_DIRECT) goto listen; if (ntf.rf_tech_specific_params_len > 0) { switch (ntf.activation_rf_tech_and_mode) { case NCI_NFC_A_PASSIVE_POLL_MODE: data = nci_extract_rf_params_nfca_passive_poll(ndev, &(ntf.rf_tech_specific_params.nfca_poll), data); break; case NCI_NFC_B_PASSIVE_POLL_MODE: data = nci_extract_rf_params_nfcb_passive_poll(ndev, &(ntf.rf_tech_specific_params.nfcb_poll), data); break; case NCI_NFC_F_PASSIVE_POLL_MODE: data = nci_extract_rf_params_nfcf_passive_poll(ndev, &(ntf.rf_tech_specific_params.nfcf_poll), data); break; case NCI_NFC_V_PASSIVE_POLL_MODE: data = nci_extract_rf_params_nfcv_passive_poll(ndev, &(ntf.rf_tech_specific_params.nfcv_poll), data); break; case NCI_NFC_A_PASSIVE_LISTEN_MODE: /* no RF technology specific parameters */ break; case NCI_NFC_F_PASSIVE_LISTEN_MODE: data = nci_extract_rf_params_nfcf_passive_listen(ndev, &(ntf.rf_tech_specific_params.nfcf_listen), data); break; default: pr_err("unsupported activation_rf_tech_and_mode 0x%x\n", ntf.activation_rf_tech_and_mode); err = NCI_STATUS_RF_PROTOCOL_ERROR; goto exit; } } ntf.data_exch_rf_tech_and_mode = *data++; ntf.data_exch_tx_bit_rate = *data++; ntf.data_exch_rx_bit_rate = *data++; ntf.activation_params_len = *data++; pr_debug("data_exch_rf_tech_and_mode 0x%x\n", ntf.data_exch_rf_tech_and_mode); pr_debug("data_exch_tx_bit_rate 0x%x\n", ntf.data_exch_tx_bit_rate); pr_debug("data_exch_rx_bit_rate 0x%x\n", ntf.data_exch_rx_bit_rate); pr_debug("activation_params_len %d\n", ntf.activation_params_len); if (ntf.activation_params_len > 0) { switch (ntf.rf_interface) { case NCI_RF_INTERFACE_ISO_DEP: err = nci_extract_activation_params_iso_dep(ndev, &ntf, data); break; case NCI_RF_INTERFACE_NFC_DEP: err = nci_extract_activation_params_nfc_dep(ndev, &ntf, data); break; case NCI_RF_INTERFACE_FRAME: /* no activation params */ break; default: pr_err("unsupported rf_interface 0x%x\n", ntf.rf_interface); err = NCI_STATUS_RF_PROTOCOL_ERROR; break; } } exit: if (err == NCI_STATUS_OK) { conn_info = ndev->rf_conn_info; if (!conn_info) return; conn_info->max_pkt_payload_len = ntf.max_data_pkt_payload_size; conn_info->initial_num_credits = ntf.initial_num_credits; /* set the available credits to initial value */ atomic_set(&conn_info->credits_cnt, conn_info->initial_num_credits); /* store general bytes to be reported later in dep_link_up */ if (ntf.rf_interface == NCI_RF_INTERFACE_NFC_DEP) { err = nci_store_general_bytes_nfc_dep(ndev, &ntf); if (err != NCI_STATUS_OK) pr_err("unable to store general bytes\n"); } } if (!(ntf.activation_rf_tech_and_mode & NCI_RF_TECH_MODE_LISTEN_MASK)) { /* Poll mode */ if (atomic_read(&ndev->state) == NCI_DISCOVERY) { /* A single target was found and activated * automatically */ atomic_set(&ndev->state, NCI_POLL_ACTIVE); if (err == NCI_STATUS_OK) nci_target_auto_activated(ndev, &ntf); } else { /* ndev->state == NCI_W4_HOST_SELECT */ /* A selected target was activated, so complete the * request */ atomic_set(&ndev->state, NCI_POLL_ACTIVE); nci_req_complete(ndev, err); } } else { listen: /* Listen mode */ atomic_set(&ndev->state, NCI_LISTEN_ACTIVE); if (err == NCI_STATUS_OK && ntf.rf_protocol == NCI_RF_PROTOCOL_NFC_DEP) { err = nfc_tm_activated(ndev->nfc_dev, NFC_PROTO_NFC_DEP_MASK, NFC_COMM_PASSIVE, ndev->remote_gb, ndev->remote_gb_len); if (err != NCI_STATUS_OK) pr_err("error when signaling tm activation\n"); } } } static void nci_rf_deactivate_ntf_packet(struct nci_dev *ndev, const struct sk_buff *skb) { const struct nci_conn_info *conn_info; const struct nci_rf_deactivate_ntf *ntf = (void *)skb->data; pr_debug("entry, type 0x%x, reason 0x%x\n", ntf->type, ntf->reason); conn_info = ndev->rf_conn_info; if (!conn_info) return; /* drop tx data queue */ skb_queue_purge(&ndev->tx_q); /* drop partial rx data packet */ if (ndev->rx_data_reassembly) { kfree_skb(ndev->rx_data_reassembly); ndev->rx_data_reassembly = NULL; } /* complete the data exchange transaction, if exists */ if (test_bit(NCI_DATA_EXCHANGE, &ndev->flags)) nci_data_exchange_complete(ndev, NULL, NCI_STATIC_RF_CONN_ID, -EIO); switch (ntf->type) { case NCI_DEACTIVATE_TYPE_IDLE_MODE: nci_clear_target_list(ndev); atomic_set(&ndev->state, NCI_IDLE); break; case NCI_DEACTIVATE_TYPE_SLEEP_MODE: case NCI_DEACTIVATE_TYPE_SLEEP_AF_MODE: atomic_set(&ndev->state, NCI_W4_HOST_SELECT); break; case NCI_DEACTIVATE_TYPE_DISCOVERY: nci_clear_target_list(ndev); atomic_set(&ndev->state, NCI_DISCOVERY); break; } nci_req_complete(ndev, NCI_STATUS_OK); } static void nci_nfcee_discover_ntf_packet(struct nci_dev *ndev, const struct sk_buff *skb) { u8 status = NCI_STATUS_OK; const struct nci_nfcee_discover_ntf *nfcee_ntf = (struct nci_nfcee_discover_ntf *)skb->data; /* NFCForum NCI 9.2.1 HCI Network Specific Handling * If the NFCC supports the HCI Network, it SHALL return one, * and only one, NFCEE_DISCOVER_NTF with a Protocol type of * “HCI Access”, even if the HCI Network contains multiple NFCEEs. */ ndev->hci_dev->nfcee_id = nfcee_ntf->nfcee_id; ndev->cur_params.id = nfcee_ntf->nfcee_id; nci_req_complete(ndev, status); } void nci_ntf_packet(struct nci_dev *ndev, struct sk_buff *skb) { __u16 ntf_opcode = nci_opcode(skb->data); pr_debug("NCI RX: MT=ntf, PBF=%d, GID=0x%x, OID=0x%x, plen=%d\n", nci_pbf(skb->data), nci_opcode_gid(ntf_opcode), nci_opcode_oid(ntf_opcode), nci_plen(skb->data)); /* strip the nci control header */ skb_pull(skb, NCI_CTRL_HDR_SIZE); if (nci_opcode_gid(ntf_opcode) == NCI_GID_PROPRIETARY) { if (nci_prop_ntf_packet(ndev, ntf_opcode, skb) == -ENOTSUPP) { pr_err("unsupported ntf opcode 0x%x\n", ntf_opcode); } goto end; } switch (ntf_opcode) { case NCI_OP_CORE_RESET_NTF: nci_core_reset_ntf_packet(ndev, skb); break; case NCI_OP_CORE_CONN_CREDITS_NTF: nci_core_conn_credits_ntf_packet(ndev, skb); break; case NCI_OP_CORE_GENERIC_ERROR_NTF: nci_core_generic_error_ntf_packet(ndev, skb); break; case NCI_OP_CORE_INTF_ERROR_NTF: nci_core_conn_intf_error_ntf_packet(ndev, skb); break; case NCI_OP_RF_DISCOVER_NTF: nci_rf_discover_ntf_packet(ndev, skb); break; case NCI_OP_RF_INTF_ACTIVATED_NTF: nci_rf_intf_activated_ntf_packet(ndev, skb); break; case NCI_OP_RF_DEACTIVATE_NTF: nci_rf_deactivate_ntf_packet(ndev, skb); break; case NCI_OP_NFCEE_DISCOVER_NTF: nci_nfcee_discover_ntf_packet(ndev, skb); break; case NCI_OP_RF_NFCEE_ACTION_NTF: break; default: pr_err("unknown ntf opcode 0x%x\n", ntf_opcode); break; } nci_core_ntf_packet(ndev, ntf_opcode, skb); end: kfree_skb(skb); }
linux-master
net/nfc/nci/ntf.c
// SPDX-License-Identifier: GPL-2.0-only /* * The NFC Controller Interface is the communication protocol between an * NFC Controller (NFCC) and a Device Host (DH). * * Copyright (C) 2011 Texas Instruments, Inc. * Copyright (C) 2014 Marvell International Ltd. * * Written by Ilan Elias <[email protected]> * * Acknowledgements: * This file is based on hci_core.c, which was written * by Maxim Krasnyansky. */ #define pr_fmt(fmt) KBUILD_MODNAME ": %s: " fmt, __func__ #include <linux/module.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/workqueue.h> #include <linux/completion.h> #include <linux/export.h> #include <linux/sched.h> #include <linux/bitops.h> #include <linux/skbuff.h> #include <linux/kcov.h> #include "../nfc.h" #include <net/nfc/nci.h> #include <net/nfc/nci_core.h> #include <linux/nfc.h> struct core_conn_create_data { int length; struct nci_core_conn_create_cmd *cmd; }; static void nci_cmd_work(struct work_struct *work); static void nci_rx_work(struct work_struct *work); static void nci_tx_work(struct work_struct *work); struct nci_conn_info *nci_get_conn_info_by_conn_id(struct nci_dev *ndev, int conn_id) { struct nci_conn_info *conn_info; list_for_each_entry(conn_info, &ndev->conn_info_list, list) { if (conn_info->conn_id == conn_id) return conn_info; } return NULL; } int nci_get_conn_info_by_dest_type_params(struct nci_dev *ndev, u8 dest_type, const struct dest_spec_params *params) { const struct nci_conn_info *conn_info; list_for_each_entry(conn_info, &ndev->conn_info_list, list) { if (conn_info->dest_type == dest_type) { if (!params) return conn_info->conn_id; if (params->id == conn_info->dest_params->id && params->protocol == conn_info->dest_params->protocol) return conn_info->conn_id; } } return -EINVAL; } EXPORT_SYMBOL(nci_get_conn_info_by_dest_type_params); /* ---- NCI requests ---- */ void nci_req_complete(struct nci_dev *ndev, int result) { if (ndev->req_status == NCI_REQ_PEND) { ndev->req_result = result; ndev->req_status = NCI_REQ_DONE; complete(&ndev->req_completion); } } EXPORT_SYMBOL(nci_req_complete); static void nci_req_cancel(struct nci_dev *ndev, int err) { if (ndev->req_status == NCI_REQ_PEND) { ndev->req_result = err; ndev->req_status = NCI_REQ_CANCELED; complete(&ndev->req_completion); } } /* Execute request and wait for completion. */ static int __nci_request(struct nci_dev *ndev, void (*req)(struct nci_dev *ndev, const void *opt), const void *opt, __u32 timeout) { int rc = 0; long completion_rc; ndev->req_status = NCI_REQ_PEND; reinit_completion(&ndev->req_completion); req(ndev, opt); completion_rc = wait_for_completion_interruptible_timeout(&ndev->req_completion, timeout); pr_debug("wait_for_completion return %ld\n", completion_rc); if (completion_rc > 0) { switch (ndev->req_status) { case NCI_REQ_DONE: rc = nci_to_errno(ndev->req_result); break; case NCI_REQ_CANCELED: rc = -ndev->req_result; break; default: rc = -ETIMEDOUT; break; } } else { pr_err("wait_for_completion_interruptible_timeout failed %ld\n", completion_rc); rc = ((completion_rc == 0) ? (-ETIMEDOUT) : (completion_rc)); } ndev->req_status = ndev->req_result = 0; return rc; } inline int nci_request(struct nci_dev *ndev, void (*req)(struct nci_dev *ndev, const void *opt), const void *opt, __u32 timeout) { int rc; /* Serialize all requests */ mutex_lock(&ndev->req_lock); /* check the state after obtaing the lock against any races * from nci_close_device when the device gets removed. */ if (test_bit(NCI_UP, &ndev->flags)) rc = __nci_request(ndev, req, opt, timeout); else rc = -ENETDOWN; mutex_unlock(&ndev->req_lock); return rc; } static void nci_reset_req(struct nci_dev *ndev, const void *opt) { struct nci_core_reset_cmd cmd; cmd.reset_type = NCI_RESET_TYPE_RESET_CONFIG; nci_send_cmd(ndev, NCI_OP_CORE_RESET_CMD, 1, &cmd); } static void nci_init_req(struct nci_dev *ndev, const void *opt) { u8 plen = 0; if (opt) plen = sizeof(struct nci_core_init_v2_cmd); nci_send_cmd(ndev, NCI_OP_CORE_INIT_CMD, plen, opt); } static void nci_init_complete_req(struct nci_dev *ndev, const void *opt) { struct nci_rf_disc_map_cmd cmd; struct disc_map_config *cfg = cmd.mapping_configs; __u8 *num = &cmd.num_mapping_configs; int i; /* set rf mapping configurations */ *num = 0; /* by default mapping is set to NCI_RF_INTERFACE_FRAME */ for (i = 0; i < ndev->num_supported_rf_interfaces; i++) { if (ndev->supported_rf_interfaces[i] == NCI_RF_INTERFACE_ISO_DEP) { cfg[*num].rf_protocol = NCI_RF_PROTOCOL_ISO_DEP; cfg[*num].mode = NCI_DISC_MAP_MODE_POLL | NCI_DISC_MAP_MODE_LISTEN; cfg[*num].rf_interface = NCI_RF_INTERFACE_ISO_DEP; (*num)++; } else if (ndev->supported_rf_interfaces[i] == NCI_RF_INTERFACE_NFC_DEP) { cfg[*num].rf_protocol = NCI_RF_PROTOCOL_NFC_DEP; cfg[*num].mode = NCI_DISC_MAP_MODE_POLL | NCI_DISC_MAP_MODE_LISTEN; cfg[*num].rf_interface = NCI_RF_INTERFACE_NFC_DEP; (*num)++; } if (*num == NCI_MAX_NUM_MAPPING_CONFIGS) break; } nci_send_cmd(ndev, NCI_OP_RF_DISCOVER_MAP_CMD, (1 + ((*num) * sizeof(struct disc_map_config))), &cmd); } struct nci_set_config_param { __u8 id; size_t len; const __u8 *val; }; static void nci_set_config_req(struct nci_dev *ndev, const void *opt) { const struct nci_set_config_param *param = opt; struct nci_core_set_config_cmd cmd; BUG_ON(param->len > NCI_MAX_PARAM_LEN); cmd.num_params = 1; cmd.param.id = param->id; cmd.param.len = param->len; memcpy(cmd.param.val, param->val, param->len); nci_send_cmd(ndev, NCI_OP_CORE_SET_CONFIG_CMD, (3 + param->len), &cmd); } struct nci_rf_discover_param { __u32 im_protocols; __u32 tm_protocols; }; static void nci_rf_discover_req(struct nci_dev *ndev, const void *opt) { const struct nci_rf_discover_param *param = opt; struct nci_rf_disc_cmd cmd; cmd.num_disc_configs = 0; if ((cmd.num_disc_configs < NCI_MAX_NUM_RF_CONFIGS) && (param->im_protocols & NFC_PROTO_JEWEL_MASK || param->im_protocols & NFC_PROTO_MIFARE_MASK || param->im_protocols & NFC_PROTO_ISO14443_MASK || param->im_protocols & NFC_PROTO_NFC_DEP_MASK)) { cmd.disc_configs[cmd.num_disc_configs].rf_tech_and_mode = NCI_NFC_A_PASSIVE_POLL_MODE; cmd.disc_configs[cmd.num_disc_configs].frequency = 1; cmd.num_disc_configs++; } if ((cmd.num_disc_configs < NCI_MAX_NUM_RF_CONFIGS) && (param->im_protocols & NFC_PROTO_ISO14443_B_MASK)) { cmd.disc_configs[cmd.num_disc_configs].rf_tech_and_mode = NCI_NFC_B_PASSIVE_POLL_MODE; cmd.disc_configs[cmd.num_disc_configs].frequency = 1; cmd.num_disc_configs++; } if ((cmd.num_disc_configs < NCI_MAX_NUM_RF_CONFIGS) && (param->im_protocols & NFC_PROTO_FELICA_MASK || param->im_protocols & NFC_PROTO_NFC_DEP_MASK)) { cmd.disc_configs[cmd.num_disc_configs].rf_tech_and_mode = NCI_NFC_F_PASSIVE_POLL_MODE; cmd.disc_configs[cmd.num_disc_configs].frequency = 1; cmd.num_disc_configs++; } if ((cmd.num_disc_configs < NCI_MAX_NUM_RF_CONFIGS) && (param->im_protocols & NFC_PROTO_ISO15693_MASK)) { cmd.disc_configs[cmd.num_disc_configs].rf_tech_and_mode = NCI_NFC_V_PASSIVE_POLL_MODE; cmd.disc_configs[cmd.num_disc_configs].frequency = 1; cmd.num_disc_configs++; } if ((cmd.num_disc_configs < NCI_MAX_NUM_RF_CONFIGS - 1) && (param->tm_protocols & NFC_PROTO_NFC_DEP_MASK)) { cmd.disc_configs[cmd.num_disc_configs].rf_tech_and_mode = NCI_NFC_A_PASSIVE_LISTEN_MODE; cmd.disc_configs[cmd.num_disc_configs].frequency = 1; cmd.num_disc_configs++; cmd.disc_configs[cmd.num_disc_configs].rf_tech_and_mode = NCI_NFC_F_PASSIVE_LISTEN_MODE; cmd.disc_configs[cmd.num_disc_configs].frequency = 1; cmd.num_disc_configs++; } nci_send_cmd(ndev, NCI_OP_RF_DISCOVER_CMD, (1 + (cmd.num_disc_configs * sizeof(struct disc_config))), &cmd); } struct nci_rf_discover_select_param { __u8 rf_discovery_id; __u8 rf_protocol; }; static void nci_rf_discover_select_req(struct nci_dev *ndev, const void *opt) { const struct nci_rf_discover_select_param *param = opt; struct nci_rf_discover_select_cmd cmd; cmd.rf_discovery_id = param->rf_discovery_id; cmd.rf_protocol = param->rf_protocol; switch (cmd.rf_protocol) { case NCI_RF_PROTOCOL_ISO_DEP: cmd.rf_interface = NCI_RF_INTERFACE_ISO_DEP; break; case NCI_RF_PROTOCOL_NFC_DEP: cmd.rf_interface = NCI_RF_INTERFACE_NFC_DEP; break; default: cmd.rf_interface = NCI_RF_INTERFACE_FRAME; break; } nci_send_cmd(ndev, NCI_OP_RF_DISCOVER_SELECT_CMD, sizeof(struct nci_rf_discover_select_cmd), &cmd); } static void nci_rf_deactivate_req(struct nci_dev *ndev, const void *opt) { struct nci_rf_deactivate_cmd cmd; cmd.type = (unsigned long)opt; nci_send_cmd(ndev, NCI_OP_RF_DEACTIVATE_CMD, sizeof(struct nci_rf_deactivate_cmd), &cmd); } struct nci_cmd_param { __u16 opcode; size_t len; const __u8 *payload; }; static void nci_generic_req(struct nci_dev *ndev, const void *opt) { const struct nci_cmd_param *param = opt; nci_send_cmd(ndev, param->opcode, param->len, param->payload); } int nci_prop_cmd(struct nci_dev *ndev, __u8 oid, size_t len, const __u8 *payload) { struct nci_cmd_param param; param.opcode = nci_opcode_pack(NCI_GID_PROPRIETARY, oid); param.len = len; param.payload = payload; return __nci_request(ndev, nci_generic_req, &param, msecs_to_jiffies(NCI_CMD_TIMEOUT)); } EXPORT_SYMBOL(nci_prop_cmd); int nci_core_cmd(struct nci_dev *ndev, __u16 opcode, size_t len, const __u8 *payload) { struct nci_cmd_param param; param.opcode = opcode; param.len = len; param.payload = payload; return __nci_request(ndev, nci_generic_req, &param, msecs_to_jiffies(NCI_CMD_TIMEOUT)); } EXPORT_SYMBOL(nci_core_cmd); int nci_core_reset(struct nci_dev *ndev) { return __nci_request(ndev, nci_reset_req, (void *)0, msecs_to_jiffies(NCI_RESET_TIMEOUT)); } EXPORT_SYMBOL(nci_core_reset); int nci_core_init(struct nci_dev *ndev) { return __nci_request(ndev, nci_init_req, (void *)0, msecs_to_jiffies(NCI_INIT_TIMEOUT)); } EXPORT_SYMBOL(nci_core_init); struct nci_loopback_data { u8 conn_id; struct sk_buff *data; }; static void nci_send_data_req(struct nci_dev *ndev, const void *opt) { const struct nci_loopback_data *data = opt; nci_send_data(ndev, data->conn_id, data->data); } static void nci_nfcc_loopback_cb(void *context, struct sk_buff *skb, int err) { struct nci_dev *ndev = (struct nci_dev *)context; struct nci_conn_info *conn_info; conn_info = nci_get_conn_info_by_conn_id(ndev, ndev->cur_conn_id); if (!conn_info) { nci_req_complete(ndev, NCI_STATUS_REJECTED); return; } conn_info->rx_skb = skb; nci_req_complete(ndev, NCI_STATUS_OK); } int nci_nfcc_loopback(struct nci_dev *ndev, const void *data, size_t data_len, struct sk_buff **resp) { int r; struct nci_loopback_data loopback_data; struct nci_conn_info *conn_info; struct sk_buff *skb; int conn_id = nci_get_conn_info_by_dest_type_params(ndev, NCI_DESTINATION_NFCC_LOOPBACK, NULL); if (conn_id < 0) { r = nci_core_conn_create(ndev, NCI_DESTINATION_NFCC_LOOPBACK, 0, 0, NULL); if (r != NCI_STATUS_OK) return r; conn_id = nci_get_conn_info_by_dest_type_params(ndev, NCI_DESTINATION_NFCC_LOOPBACK, NULL); } conn_info = nci_get_conn_info_by_conn_id(ndev, conn_id); if (!conn_info) return -EPROTO; /* store cb and context to be used on receiving data */ conn_info->data_exchange_cb = nci_nfcc_loopback_cb; conn_info->data_exchange_cb_context = ndev; skb = nci_skb_alloc(ndev, NCI_DATA_HDR_SIZE + data_len, GFP_KERNEL); if (!skb) return -ENOMEM; skb_reserve(skb, NCI_DATA_HDR_SIZE); skb_put_data(skb, data, data_len); loopback_data.conn_id = conn_id; loopback_data.data = skb; ndev->cur_conn_id = conn_id; r = nci_request(ndev, nci_send_data_req, &loopback_data, msecs_to_jiffies(NCI_DATA_TIMEOUT)); if (r == NCI_STATUS_OK && resp) *resp = conn_info->rx_skb; return r; } EXPORT_SYMBOL(nci_nfcc_loopback); static int nci_open_device(struct nci_dev *ndev) { int rc = 0; mutex_lock(&ndev->req_lock); if (test_bit(NCI_UNREG, &ndev->flags)) { rc = -ENODEV; goto done; } if (test_bit(NCI_UP, &ndev->flags)) { rc = -EALREADY; goto done; } if (ndev->ops->open(ndev)) { rc = -EIO; goto done; } atomic_set(&ndev->cmd_cnt, 1); set_bit(NCI_INIT, &ndev->flags); if (ndev->ops->init) rc = ndev->ops->init(ndev); if (!rc) { rc = __nci_request(ndev, nci_reset_req, (void *)0, msecs_to_jiffies(NCI_RESET_TIMEOUT)); } if (!rc && ndev->ops->setup) { rc = ndev->ops->setup(ndev); } if (!rc) { struct nci_core_init_v2_cmd nci_init_v2_cmd = { .feature1 = NCI_FEATURE_DISABLE, .feature2 = NCI_FEATURE_DISABLE }; const void *opt = NULL; if (ndev->nci_ver & NCI_VER_2_MASK) opt = &nci_init_v2_cmd; rc = __nci_request(ndev, nci_init_req, opt, msecs_to_jiffies(NCI_INIT_TIMEOUT)); } if (!rc && ndev->ops->post_setup) rc = ndev->ops->post_setup(ndev); if (!rc) { rc = __nci_request(ndev, nci_init_complete_req, (void *)0, msecs_to_jiffies(NCI_INIT_TIMEOUT)); } clear_bit(NCI_INIT, &ndev->flags); if (!rc) { set_bit(NCI_UP, &ndev->flags); nci_clear_target_list(ndev); atomic_set(&ndev->state, NCI_IDLE); } else { /* Init failed, cleanup */ skb_queue_purge(&ndev->cmd_q); skb_queue_purge(&ndev->rx_q); skb_queue_purge(&ndev->tx_q); ndev->ops->close(ndev); ndev->flags &= BIT(NCI_UNREG); } done: mutex_unlock(&ndev->req_lock); return rc; } static int nci_close_device(struct nci_dev *ndev) { nci_req_cancel(ndev, ENODEV); /* This mutex needs to be held as a barrier for * caller nci_unregister_device */ mutex_lock(&ndev->req_lock); if (!test_and_clear_bit(NCI_UP, &ndev->flags)) { /* Need to flush the cmd wq in case * there is a queued/running cmd_work */ flush_workqueue(ndev->cmd_wq); del_timer_sync(&ndev->cmd_timer); del_timer_sync(&ndev->data_timer); mutex_unlock(&ndev->req_lock); return 0; } /* Drop RX and TX queues */ skb_queue_purge(&ndev->rx_q); skb_queue_purge(&ndev->tx_q); /* Flush RX and TX wq */ flush_workqueue(ndev->rx_wq); flush_workqueue(ndev->tx_wq); /* Reset device */ skb_queue_purge(&ndev->cmd_q); atomic_set(&ndev->cmd_cnt, 1); set_bit(NCI_INIT, &ndev->flags); __nci_request(ndev, nci_reset_req, (void *)0, msecs_to_jiffies(NCI_RESET_TIMEOUT)); /* After this point our queues are empty * and no works are scheduled. */ ndev->ops->close(ndev); clear_bit(NCI_INIT, &ndev->flags); /* Flush cmd wq */ flush_workqueue(ndev->cmd_wq); del_timer_sync(&ndev->cmd_timer); /* Clear flags except NCI_UNREG */ ndev->flags &= BIT(NCI_UNREG); mutex_unlock(&ndev->req_lock); return 0; } /* NCI command timer function */ static void nci_cmd_timer(struct timer_list *t) { struct nci_dev *ndev = from_timer(ndev, t, cmd_timer); atomic_set(&ndev->cmd_cnt, 1); queue_work(ndev->cmd_wq, &ndev->cmd_work); } /* NCI data exchange timer function */ static void nci_data_timer(struct timer_list *t) { struct nci_dev *ndev = from_timer(ndev, t, data_timer); set_bit(NCI_DATA_EXCHANGE_TO, &ndev->flags); queue_work(ndev->rx_wq, &ndev->rx_work); } static int nci_dev_up(struct nfc_dev *nfc_dev) { struct nci_dev *ndev = nfc_get_drvdata(nfc_dev); return nci_open_device(ndev); } static int nci_dev_down(struct nfc_dev *nfc_dev) { struct nci_dev *ndev = nfc_get_drvdata(nfc_dev); return nci_close_device(ndev); } int nci_set_config(struct nci_dev *ndev, __u8 id, size_t len, const __u8 *val) { struct nci_set_config_param param; if (!val || !len) return 0; param.id = id; param.len = len; param.val = val; return __nci_request(ndev, nci_set_config_req, &param, msecs_to_jiffies(NCI_SET_CONFIG_TIMEOUT)); } EXPORT_SYMBOL(nci_set_config); static void nci_nfcee_discover_req(struct nci_dev *ndev, const void *opt) { struct nci_nfcee_discover_cmd cmd; __u8 action = (unsigned long)opt; cmd.discovery_action = action; nci_send_cmd(ndev, NCI_OP_NFCEE_DISCOVER_CMD, 1, &cmd); } int nci_nfcee_discover(struct nci_dev *ndev, u8 action) { unsigned long opt = action; return __nci_request(ndev, nci_nfcee_discover_req, (void *)opt, msecs_to_jiffies(NCI_CMD_TIMEOUT)); } EXPORT_SYMBOL(nci_nfcee_discover); static void nci_nfcee_mode_set_req(struct nci_dev *ndev, const void *opt) { const struct nci_nfcee_mode_set_cmd *cmd = opt; nci_send_cmd(ndev, NCI_OP_NFCEE_MODE_SET_CMD, sizeof(struct nci_nfcee_mode_set_cmd), cmd); } int nci_nfcee_mode_set(struct nci_dev *ndev, u8 nfcee_id, u8 nfcee_mode) { struct nci_nfcee_mode_set_cmd cmd; cmd.nfcee_id = nfcee_id; cmd.nfcee_mode = nfcee_mode; return __nci_request(ndev, nci_nfcee_mode_set_req, &cmd, msecs_to_jiffies(NCI_CMD_TIMEOUT)); } EXPORT_SYMBOL(nci_nfcee_mode_set); static void nci_core_conn_create_req(struct nci_dev *ndev, const void *opt) { const struct core_conn_create_data *data = opt; nci_send_cmd(ndev, NCI_OP_CORE_CONN_CREATE_CMD, data->length, data->cmd); } int nci_core_conn_create(struct nci_dev *ndev, u8 destination_type, u8 number_destination_params, size_t params_len, const struct core_conn_create_dest_spec_params *params) { int r; struct nci_core_conn_create_cmd *cmd; struct core_conn_create_data data; data.length = params_len + sizeof(struct nci_core_conn_create_cmd); cmd = kzalloc(data.length, GFP_KERNEL); if (!cmd) return -ENOMEM; cmd->destination_type = destination_type; cmd->number_destination_params = number_destination_params; data.cmd = cmd; if (params) { memcpy(cmd->params, params, params_len); if (params->length > 0) memcpy(&ndev->cur_params, &params->value[DEST_SPEC_PARAMS_ID_INDEX], sizeof(struct dest_spec_params)); else ndev->cur_params.id = 0; } else { ndev->cur_params.id = 0; } ndev->cur_dest_type = destination_type; r = __nci_request(ndev, nci_core_conn_create_req, &data, msecs_to_jiffies(NCI_CMD_TIMEOUT)); kfree(cmd); return r; } EXPORT_SYMBOL(nci_core_conn_create); static void nci_core_conn_close_req(struct nci_dev *ndev, const void *opt) { __u8 conn_id = (unsigned long)opt; nci_send_cmd(ndev, NCI_OP_CORE_CONN_CLOSE_CMD, 1, &conn_id); } int nci_core_conn_close(struct nci_dev *ndev, u8 conn_id) { unsigned long opt = conn_id; ndev->cur_conn_id = conn_id; return __nci_request(ndev, nci_core_conn_close_req, (void *)opt, msecs_to_jiffies(NCI_CMD_TIMEOUT)); } EXPORT_SYMBOL(nci_core_conn_close); static int nci_set_local_general_bytes(struct nfc_dev *nfc_dev) { struct nci_dev *ndev = nfc_get_drvdata(nfc_dev); struct nci_set_config_param param; int rc; param.val = nfc_get_local_general_bytes(nfc_dev, &param.len); if ((param.val == NULL) || (param.len == 0)) return 0; if (param.len > NFC_MAX_GT_LEN) return -EINVAL; param.id = NCI_PN_ATR_REQ_GEN_BYTES; rc = nci_request(ndev, nci_set_config_req, &param, msecs_to_jiffies(NCI_SET_CONFIG_TIMEOUT)); if (rc) return rc; param.id = NCI_LN_ATR_RES_GEN_BYTES; return nci_request(ndev, nci_set_config_req, &param, msecs_to_jiffies(NCI_SET_CONFIG_TIMEOUT)); } static int nci_set_listen_parameters(struct nfc_dev *nfc_dev) { struct nci_dev *ndev = nfc_get_drvdata(nfc_dev); int rc; __u8 val; val = NCI_LA_SEL_INFO_NFC_DEP_MASK; rc = nci_set_config(ndev, NCI_LA_SEL_INFO, 1, &val); if (rc) return rc; val = NCI_LF_PROTOCOL_TYPE_NFC_DEP_MASK; rc = nci_set_config(ndev, NCI_LF_PROTOCOL_TYPE, 1, &val); if (rc) return rc; val = NCI_LF_CON_BITR_F_212 | NCI_LF_CON_BITR_F_424; return nci_set_config(ndev, NCI_LF_CON_BITR_F, 1, &val); } static int nci_start_poll(struct nfc_dev *nfc_dev, __u32 im_protocols, __u32 tm_protocols) { struct nci_dev *ndev = nfc_get_drvdata(nfc_dev); struct nci_rf_discover_param param; int rc; if ((atomic_read(&ndev->state) == NCI_DISCOVERY) || (atomic_read(&ndev->state) == NCI_W4_ALL_DISCOVERIES)) { pr_err("unable to start poll, since poll is already active\n"); return -EBUSY; } if (ndev->target_active_prot) { pr_err("there is an active target\n"); return -EBUSY; } if ((atomic_read(&ndev->state) == NCI_W4_HOST_SELECT) || (atomic_read(&ndev->state) == NCI_POLL_ACTIVE)) { pr_debug("target active or w4 select, implicitly deactivate\n"); rc = nci_request(ndev, nci_rf_deactivate_req, (void *)NCI_DEACTIVATE_TYPE_IDLE_MODE, msecs_to_jiffies(NCI_RF_DEACTIVATE_TIMEOUT)); if (rc) return -EBUSY; } if ((im_protocols | tm_protocols) & NFC_PROTO_NFC_DEP_MASK) { rc = nci_set_local_general_bytes(nfc_dev); if (rc) { pr_err("failed to set local general bytes\n"); return rc; } } if (tm_protocols & NFC_PROTO_NFC_DEP_MASK) { rc = nci_set_listen_parameters(nfc_dev); if (rc) pr_err("failed to set listen parameters\n"); } param.im_protocols = im_protocols; param.tm_protocols = tm_protocols; rc = nci_request(ndev, nci_rf_discover_req, &param, msecs_to_jiffies(NCI_RF_DISC_TIMEOUT)); if (!rc) ndev->poll_prots = im_protocols; return rc; } static void nci_stop_poll(struct nfc_dev *nfc_dev) { struct nci_dev *ndev = nfc_get_drvdata(nfc_dev); if ((atomic_read(&ndev->state) != NCI_DISCOVERY) && (atomic_read(&ndev->state) != NCI_W4_ALL_DISCOVERIES)) { pr_err("unable to stop poll, since poll is not active\n"); return; } nci_request(ndev, nci_rf_deactivate_req, (void *)NCI_DEACTIVATE_TYPE_IDLE_MODE, msecs_to_jiffies(NCI_RF_DEACTIVATE_TIMEOUT)); } static int nci_activate_target(struct nfc_dev *nfc_dev, struct nfc_target *target, __u32 protocol) { struct nci_dev *ndev = nfc_get_drvdata(nfc_dev); struct nci_rf_discover_select_param param; const struct nfc_target *nci_target = NULL; int i; int rc = 0; pr_debug("target_idx %d, protocol 0x%x\n", target->idx, protocol); if ((atomic_read(&ndev->state) != NCI_W4_HOST_SELECT) && (atomic_read(&ndev->state) != NCI_POLL_ACTIVE)) { pr_err("there is no available target to activate\n"); return -EINVAL; } if (ndev->target_active_prot) { pr_err("there is already an active target\n"); return -EBUSY; } for (i = 0; i < ndev->n_targets; i++) { if (ndev->targets[i].idx == target->idx) { nci_target = &ndev->targets[i]; break; } } if (!nci_target) { pr_err("unable to find the selected target\n"); return -EINVAL; } if (!(nci_target->supported_protocols & (1 << protocol))) { pr_err("target does not support the requested protocol 0x%x\n", protocol); return -EINVAL; } if (atomic_read(&ndev->state) == NCI_W4_HOST_SELECT) { param.rf_discovery_id = nci_target->logical_idx; if (protocol == NFC_PROTO_JEWEL) param.rf_protocol = NCI_RF_PROTOCOL_T1T; else if (protocol == NFC_PROTO_MIFARE) param.rf_protocol = NCI_RF_PROTOCOL_T2T; else if (protocol == NFC_PROTO_FELICA) param.rf_protocol = NCI_RF_PROTOCOL_T3T; else if (protocol == NFC_PROTO_ISO14443 || protocol == NFC_PROTO_ISO14443_B) param.rf_protocol = NCI_RF_PROTOCOL_ISO_DEP; else param.rf_protocol = NCI_RF_PROTOCOL_NFC_DEP; rc = nci_request(ndev, nci_rf_discover_select_req, &param, msecs_to_jiffies(NCI_RF_DISC_SELECT_TIMEOUT)); } if (!rc) ndev->target_active_prot = protocol; return rc; } static void nci_deactivate_target(struct nfc_dev *nfc_dev, struct nfc_target *target, __u8 mode) { struct nci_dev *ndev = nfc_get_drvdata(nfc_dev); unsigned long nci_mode = NCI_DEACTIVATE_TYPE_IDLE_MODE; if (!ndev->target_active_prot) { pr_err("unable to deactivate target, no active target\n"); return; } ndev->target_active_prot = 0; switch (mode) { case NFC_TARGET_MODE_SLEEP: nci_mode = NCI_DEACTIVATE_TYPE_SLEEP_MODE; break; } if (atomic_read(&ndev->state) == NCI_POLL_ACTIVE) { nci_request(ndev, nci_rf_deactivate_req, (void *)nci_mode, msecs_to_jiffies(NCI_RF_DEACTIVATE_TIMEOUT)); } } static int nci_dep_link_up(struct nfc_dev *nfc_dev, struct nfc_target *target, __u8 comm_mode, __u8 *gb, size_t gb_len) { struct nci_dev *ndev = nfc_get_drvdata(nfc_dev); int rc; pr_debug("target_idx %d, comm_mode %d\n", target->idx, comm_mode); rc = nci_activate_target(nfc_dev, target, NFC_PROTO_NFC_DEP); if (rc) return rc; rc = nfc_set_remote_general_bytes(nfc_dev, ndev->remote_gb, ndev->remote_gb_len); if (!rc) rc = nfc_dep_link_is_up(nfc_dev, target->idx, NFC_COMM_PASSIVE, NFC_RF_INITIATOR); return rc; } static int nci_dep_link_down(struct nfc_dev *nfc_dev) { struct nci_dev *ndev = nfc_get_drvdata(nfc_dev); int rc; if (nfc_dev->rf_mode == NFC_RF_INITIATOR) { nci_deactivate_target(nfc_dev, NULL, NCI_DEACTIVATE_TYPE_IDLE_MODE); } else { if (atomic_read(&ndev->state) == NCI_LISTEN_ACTIVE || atomic_read(&ndev->state) == NCI_DISCOVERY) { nci_request(ndev, nci_rf_deactivate_req, (void *)0, msecs_to_jiffies(NCI_RF_DEACTIVATE_TIMEOUT)); } rc = nfc_tm_deactivated(nfc_dev); if (rc) pr_err("error when signaling tm deactivation\n"); } return 0; } static int nci_transceive(struct nfc_dev *nfc_dev, struct nfc_target *target, struct sk_buff *skb, data_exchange_cb_t cb, void *cb_context) { struct nci_dev *ndev = nfc_get_drvdata(nfc_dev); int rc; struct nci_conn_info *conn_info; conn_info = ndev->rf_conn_info; if (!conn_info) return -EPROTO; pr_debug("target_idx %d, len %d\n", target->idx, skb->len); if (!ndev->target_active_prot) { pr_err("unable to exchange data, no active target\n"); return -EINVAL; } if (test_and_set_bit(NCI_DATA_EXCHANGE, &ndev->flags)) return -EBUSY; /* store cb and context to be used on receiving data */ conn_info->data_exchange_cb = cb; conn_info->data_exchange_cb_context = cb_context; rc = nci_send_data(ndev, NCI_STATIC_RF_CONN_ID, skb); if (rc) clear_bit(NCI_DATA_EXCHANGE, &ndev->flags); return rc; } static int nci_tm_send(struct nfc_dev *nfc_dev, struct sk_buff *skb) { struct nci_dev *ndev = nfc_get_drvdata(nfc_dev); int rc; rc = nci_send_data(ndev, NCI_STATIC_RF_CONN_ID, skb); if (rc) pr_err("unable to send data\n"); return rc; } static int nci_enable_se(struct nfc_dev *nfc_dev, u32 se_idx) { struct nci_dev *ndev = nfc_get_drvdata(nfc_dev); if (ndev->ops->enable_se) return ndev->ops->enable_se(ndev, se_idx); return 0; } static int nci_disable_se(struct nfc_dev *nfc_dev, u32 se_idx) { struct nci_dev *ndev = nfc_get_drvdata(nfc_dev); if (ndev->ops->disable_se) return ndev->ops->disable_se(ndev, se_idx); return 0; } static int nci_discover_se(struct nfc_dev *nfc_dev) { int r; struct nci_dev *ndev = nfc_get_drvdata(nfc_dev); if (ndev->ops->discover_se) { r = nci_nfcee_discover(ndev, NCI_NFCEE_DISCOVERY_ACTION_ENABLE); if (r != NCI_STATUS_OK) return -EPROTO; return ndev->ops->discover_se(ndev); } return 0; } static int nci_se_io(struct nfc_dev *nfc_dev, u32 se_idx, u8 *apdu, size_t apdu_length, se_io_cb_t cb, void *cb_context) { struct nci_dev *ndev = nfc_get_drvdata(nfc_dev); if (ndev->ops->se_io) return ndev->ops->se_io(ndev, se_idx, apdu, apdu_length, cb, cb_context); return 0; } static int nci_fw_download(struct nfc_dev *nfc_dev, const char *firmware_name) { struct nci_dev *ndev = nfc_get_drvdata(nfc_dev); if (!ndev->ops->fw_download) return -ENOTSUPP; return ndev->ops->fw_download(ndev, firmware_name); } static const struct nfc_ops nci_nfc_ops = { .dev_up = nci_dev_up, .dev_down = nci_dev_down, .start_poll = nci_start_poll, .stop_poll = nci_stop_poll, .dep_link_up = nci_dep_link_up, .dep_link_down = nci_dep_link_down, .activate_target = nci_activate_target, .deactivate_target = nci_deactivate_target, .im_transceive = nci_transceive, .tm_send = nci_tm_send, .enable_se = nci_enable_se, .disable_se = nci_disable_se, .discover_se = nci_discover_se, .se_io = nci_se_io, .fw_download = nci_fw_download, }; /* ---- Interface to NCI drivers ---- */ /** * nci_allocate_device - allocate a new nci device * * @ops: device operations * @supported_protocols: NFC protocols supported by the device * @tx_headroom: Reserved space at beginning of skb * @tx_tailroom: Reserved space at end of skb */ struct nci_dev *nci_allocate_device(const struct nci_ops *ops, __u32 supported_protocols, int tx_headroom, int tx_tailroom) { struct nci_dev *ndev; pr_debug("supported_protocols 0x%x\n", supported_protocols); if (!ops->open || !ops->close || !ops->send) return NULL; if (!supported_protocols) return NULL; ndev = kzalloc(sizeof(struct nci_dev), GFP_KERNEL); if (!ndev) return NULL; ndev->ops = ops; if (ops->n_prop_ops > NCI_MAX_PROPRIETARY_CMD) { pr_err("Too many proprietary commands: %zd\n", ops->n_prop_ops); goto free_nci; } ndev->tx_headroom = tx_headroom; ndev->tx_tailroom = tx_tailroom; init_completion(&ndev->req_completion); ndev->nfc_dev = nfc_allocate_device(&nci_nfc_ops, supported_protocols, tx_headroom + NCI_DATA_HDR_SIZE, tx_tailroom); if (!ndev->nfc_dev) goto free_nci; ndev->hci_dev = nci_hci_allocate(ndev); if (!ndev->hci_dev) goto free_nfc; nfc_set_drvdata(ndev->nfc_dev, ndev); return ndev; free_nfc: nfc_free_device(ndev->nfc_dev); free_nci: kfree(ndev); return NULL; } EXPORT_SYMBOL(nci_allocate_device); /** * nci_free_device - deallocate nci device * * @ndev: The nci device to deallocate */ void nci_free_device(struct nci_dev *ndev) { nfc_free_device(ndev->nfc_dev); nci_hci_deallocate(ndev); kfree(ndev); } EXPORT_SYMBOL(nci_free_device); /** * nci_register_device - register a nci device in the nfc subsystem * * @ndev: The nci device to register */ int nci_register_device(struct nci_dev *ndev) { int rc; struct device *dev = &ndev->nfc_dev->dev; char name[32]; ndev->flags = 0; INIT_WORK(&ndev->cmd_work, nci_cmd_work); snprintf(name, sizeof(name), "%s_nci_cmd_wq", dev_name(dev)); ndev->cmd_wq = create_singlethread_workqueue(name); if (!ndev->cmd_wq) { rc = -ENOMEM; goto exit; } INIT_WORK(&ndev->rx_work, nci_rx_work); snprintf(name, sizeof(name), "%s_nci_rx_wq", dev_name(dev)); ndev->rx_wq = create_singlethread_workqueue(name); if (!ndev->rx_wq) { rc = -ENOMEM; goto destroy_cmd_wq_exit; } INIT_WORK(&ndev->tx_work, nci_tx_work); snprintf(name, sizeof(name), "%s_nci_tx_wq", dev_name(dev)); ndev->tx_wq = create_singlethread_workqueue(name); if (!ndev->tx_wq) { rc = -ENOMEM; goto destroy_rx_wq_exit; } skb_queue_head_init(&ndev->cmd_q); skb_queue_head_init(&ndev->rx_q); skb_queue_head_init(&ndev->tx_q); timer_setup(&ndev->cmd_timer, nci_cmd_timer, 0); timer_setup(&ndev->data_timer, nci_data_timer, 0); mutex_init(&ndev->req_lock); INIT_LIST_HEAD(&ndev->conn_info_list); rc = nfc_register_device(ndev->nfc_dev); if (rc) goto destroy_tx_wq_exit; goto exit; destroy_tx_wq_exit: destroy_workqueue(ndev->tx_wq); destroy_rx_wq_exit: destroy_workqueue(ndev->rx_wq); destroy_cmd_wq_exit: destroy_workqueue(ndev->cmd_wq); exit: return rc; } EXPORT_SYMBOL(nci_register_device); /** * nci_unregister_device - unregister a nci device in the nfc subsystem * * @ndev: The nci device to unregister */ void nci_unregister_device(struct nci_dev *ndev) { struct nci_conn_info *conn_info, *n; /* This set_bit is not protected with specialized barrier, * However, it is fine because the mutex_lock(&ndev->req_lock); * in nci_close_device() will help to emit one. */ set_bit(NCI_UNREG, &ndev->flags); nci_close_device(ndev); destroy_workqueue(ndev->cmd_wq); destroy_workqueue(ndev->rx_wq); destroy_workqueue(ndev->tx_wq); list_for_each_entry_safe(conn_info, n, &ndev->conn_info_list, list) { list_del(&conn_info->list); /* conn_info is allocated with devm_kzalloc */ } nfc_unregister_device(ndev->nfc_dev); } EXPORT_SYMBOL(nci_unregister_device); /** * nci_recv_frame - receive frame from NCI drivers * * @ndev: The nci device * @skb: The sk_buff to receive */ int nci_recv_frame(struct nci_dev *ndev, struct sk_buff *skb) { pr_debug("len %d\n", skb->len); if (!ndev || (!test_bit(NCI_UP, &ndev->flags) && !test_bit(NCI_INIT, &ndev->flags))) { kfree_skb(skb); return -ENXIO; } /* Queue frame for rx worker thread */ skb_queue_tail(&ndev->rx_q, skb); queue_work(ndev->rx_wq, &ndev->rx_work); return 0; } EXPORT_SYMBOL(nci_recv_frame); int nci_send_frame(struct nci_dev *ndev, struct sk_buff *skb) { pr_debug("len %d\n", skb->len); if (!ndev) { kfree_skb(skb); return -ENODEV; } /* Get rid of skb owner, prior to sending to the driver. */ skb_orphan(skb); /* Send copy to sniffer */ nfc_send_to_raw_sock(ndev->nfc_dev, skb, RAW_PAYLOAD_NCI, NFC_DIRECTION_TX); return ndev->ops->send(ndev, skb); } EXPORT_SYMBOL(nci_send_frame); /* Send NCI command */ int nci_send_cmd(struct nci_dev *ndev, __u16 opcode, __u8 plen, const void *payload) { struct nci_ctrl_hdr *hdr; struct sk_buff *skb; pr_debug("opcode 0x%x, plen %d\n", opcode, plen); skb = nci_skb_alloc(ndev, (NCI_CTRL_HDR_SIZE + plen), GFP_KERNEL); if (!skb) { pr_err("no memory for command\n"); return -ENOMEM; } hdr = skb_put(skb, NCI_CTRL_HDR_SIZE); hdr->gid = nci_opcode_gid(opcode); hdr->oid = nci_opcode_oid(opcode); hdr->plen = plen; nci_mt_set((__u8 *)hdr, NCI_MT_CMD_PKT); nci_pbf_set((__u8 *)hdr, NCI_PBF_LAST); if (plen) skb_put_data(skb, payload, plen); skb_queue_tail(&ndev->cmd_q, skb); queue_work(ndev->cmd_wq, &ndev->cmd_work); return 0; } EXPORT_SYMBOL(nci_send_cmd); /* Proprietary commands API */ static const struct nci_driver_ops *ops_cmd_lookup(const struct nci_driver_ops *ops, size_t n_ops, __u16 opcode) { size_t i; const struct nci_driver_ops *op; if (!ops || !n_ops) return NULL; for (i = 0; i < n_ops; i++) { op = &ops[i]; if (op->opcode == opcode) return op; } return NULL; } static int nci_op_rsp_packet(struct nci_dev *ndev, __u16 rsp_opcode, struct sk_buff *skb, const struct nci_driver_ops *ops, size_t n_ops) { const struct nci_driver_ops *op; op = ops_cmd_lookup(ops, n_ops, rsp_opcode); if (!op || !op->rsp) return -ENOTSUPP; return op->rsp(ndev, skb); } static int nci_op_ntf_packet(struct nci_dev *ndev, __u16 ntf_opcode, struct sk_buff *skb, const struct nci_driver_ops *ops, size_t n_ops) { const struct nci_driver_ops *op; op = ops_cmd_lookup(ops, n_ops, ntf_opcode); if (!op || !op->ntf) return -ENOTSUPP; return op->ntf(ndev, skb); } int nci_prop_rsp_packet(struct nci_dev *ndev, __u16 opcode, struct sk_buff *skb) { return nci_op_rsp_packet(ndev, opcode, skb, ndev->ops->prop_ops, ndev->ops->n_prop_ops); } int nci_prop_ntf_packet(struct nci_dev *ndev, __u16 opcode, struct sk_buff *skb) { return nci_op_ntf_packet(ndev, opcode, skb, ndev->ops->prop_ops, ndev->ops->n_prop_ops); } int nci_core_rsp_packet(struct nci_dev *ndev, __u16 opcode, struct sk_buff *skb) { return nci_op_rsp_packet(ndev, opcode, skb, ndev->ops->core_ops, ndev->ops->n_core_ops); } int nci_core_ntf_packet(struct nci_dev *ndev, __u16 opcode, struct sk_buff *skb) { return nci_op_ntf_packet(ndev, opcode, skb, ndev->ops->core_ops, ndev->ops->n_core_ops); } /* ---- NCI TX Data worker thread ---- */ static void nci_tx_work(struct work_struct *work) { struct nci_dev *ndev = container_of(work, struct nci_dev, tx_work); struct nci_conn_info *conn_info; struct sk_buff *skb; conn_info = nci_get_conn_info_by_conn_id(ndev, ndev->cur_conn_id); if (!conn_info) return; pr_debug("credits_cnt %d\n", atomic_read(&conn_info->credits_cnt)); /* Send queued tx data */ while (atomic_read(&conn_info->credits_cnt)) { skb = skb_dequeue(&ndev->tx_q); if (!skb) return; kcov_remote_start_common(skb_get_kcov_handle(skb)); /* Check if data flow control is used */ if (atomic_read(&conn_info->credits_cnt) != NCI_DATA_FLOW_CONTROL_NOT_USED) atomic_dec(&conn_info->credits_cnt); pr_debug("NCI TX: MT=data, PBF=%d, conn_id=%d, plen=%d\n", nci_pbf(skb->data), nci_conn_id(skb->data), nci_plen(skb->data)); nci_send_frame(ndev, skb); mod_timer(&ndev->data_timer, jiffies + msecs_to_jiffies(NCI_DATA_TIMEOUT)); kcov_remote_stop(); } } /* ----- NCI RX worker thread (data & control) ----- */ static void nci_rx_work(struct work_struct *work) { struct nci_dev *ndev = container_of(work, struct nci_dev, rx_work); struct sk_buff *skb; for (; (skb = skb_dequeue(&ndev->rx_q)); kcov_remote_stop()) { kcov_remote_start_common(skb_get_kcov_handle(skb)); /* Send copy to sniffer */ nfc_send_to_raw_sock(ndev->nfc_dev, skb, RAW_PAYLOAD_NCI, NFC_DIRECTION_RX); /* Process frame */ switch (nci_mt(skb->data)) { case NCI_MT_RSP_PKT: nci_rsp_packet(ndev, skb); break; case NCI_MT_NTF_PKT: nci_ntf_packet(ndev, skb); break; case NCI_MT_DATA_PKT: nci_rx_data_packet(ndev, skb); break; default: pr_err("unknown MT 0x%x\n", nci_mt(skb->data)); kfree_skb(skb); break; } } /* check if a data exchange timeout has occurred */ if (test_bit(NCI_DATA_EXCHANGE_TO, &ndev->flags)) { /* complete the data exchange transaction, if exists */ if (test_bit(NCI_DATA_EXCHANGE, &ndev->flags)) nci_data_exchange_complete(ndev, NULL, ndev->cur_conn_id, -ETIMEDOUT); clear_bit(NCI_DATA_EXCHANGE_TO, &ndev->flags); } } /* ----- NCI TX CMD worker thread ----- */ static void nci_cmd_work(struct work_struct *work) { struct nci_dev *ndev = container_of(work, struct nci_dev, cmd_work); struct sk_buff *skb; pr_debug("cmd_cnt %d\n", atomic_read(&ndev->cmd_cnt)); /* Send queued command */ if (atomic_read(&ndev->cmd_cnt)) { skb = skb_dequeue(&ndev->cmd_q); if (!skb) return; kcov_remote_start_common(skb_get_kcov_handle(skb)); atomic_dec(&ndev->cmd_cnt); pr_debug("NCI TX: MT=cmd, PBF=%d, GID=0x%x, OID=0x%x, plen=%d\n", nci_pbf(skb->data), nci_opcode_gid(nci_opcode(skb->data)), nci_opcode_oid(nci_opcode(skb->data)), nci_plen(skb->data)); nci_send_frame(ndev, skb); mod_timer(&ndev->cmd_timer, jiffies + msecs_to_jiffies(NCI_CMD_TIMEOUT)); kcov_remote_stop(); } } MODULE_LICENSE("GPL");
linux-master
net/nfc/nci/core.c
// SPDX-License-Identifier: GPL-2.0-only /* * The NFC Controller Interface is the communication protocol between an * NFC Controller (NFCC) and a Device Host (DH). * This is the HCI over NCI implementation, as specified in the 10.2 * section of the NCI 1.1 specification. * * Copyright (C) 2014 STMicroelectronics SAS. All rights reserved. */ #include <linux/skbuff.h> #include "../nfc.h" #include <net/nfc/nci.h> #include <net/nfc/nci_core.h> #include <linux/nfc.h> #include <linux/kcov.h> struct nci_data { u8 conn_id; u8 pipe; u8 cmd; const u8 *data; u32 data_len; } __packed; struct nci_hci_create_pipe_params { u8 src_gate; u8 dest_host; u8 dest_gate; } __packed; struct nci_hci_create_pipe_resp { u8 src_host; u8 src_gate; u8 dest_host; u8 dest_gate; u8 pipe; } __packed; struct nci_hci_delete_pipe_noti { u8 pipe; } __packed; struct nci_hci_all_pipe_cleared_noti { u8 host; } __packed; struct nci_hcp_message { u8 header; /* type -cmd,evt,rsp- + instruction */ u8 data[]; } __packed; struct nci_hcp_packet { u8 header; /* cbit+pipe */ struct nci_hcp_message message; } __packed; #define NCI_HCI_ANY_SET_PARAMETER 0x01 #define NCI_HCI_ANY_GET_PARAMETER 0x02 #define NCI_HCI_ANY_CLOSE_PIPE 0x04 #define NCI_HCI_ADM_CLEAR_ALL_PIPE 0x14 #define NCI_HFP_NO_CHAINING 0x80 #define NCI_NFCEE_ID_HCI 0x80 #define NCI_EVT_HOT_PLUG 0x03 #define NCI_HCI_ADMIN_PARAM_SESSION_IDENTITY 0x01 #define NCI_HCI_ADM_CREATE_PIPE 0x10 #define NCI_HCI_ADM_DELETE_PIPE 0x11 /* HCP headers */ #define NCI_HCI_HCP_PACKET_HEADER_LEN 1 #define NCI_HCI_HCP_MESSAGE_HEADER_LEN 1 #define NCI_HCI_HCP_HEADER_LEN 2 /* HCP types */ #define NCI_HCI_HCP_COMMAND 0x00 #define NCI_HCI_HCP_EVENT 0x01 #define NCI_HCI_HCP_RESPONSE 0x02 #define NCI_HCI_ADM_NOTIFY_PIPE_CREATED 0x12 #define NCI_HCI_ADM_NOTIFY_PIPE_DELETED 0x13 #define NCI_HCI_ADM_NOTIFY_ALL_PIPE_CLEARED 0x15 #define NCI_HCI_FRAGMENT 0x7f #define NCI_HCP_HEADER(type, instr) ((((type) & 0x03) << 6) |\ ((instr) & 0x3f)) #define NCI_HCP_MSG_GET_TYPE(header) ((header & 0xc0) >> 6) #define NCI_HCP_MSG_GET_CMD(header) (header & 0x3f) #define NCI_HCP_MSG_GET_PIPE(header) (header & 0x7f) static int nci_hci_result_to_errno(u8 result) { switch (result) { case NCI_HCI_ANY_OK: return 0; case NCI_HCI_ANY_E_REG_PAR_UNKNOWN: return -EOPNOTSUPP; case NCI_HCI_ANY_E_TIMEOUT: return -ETIME; default: return -1; } } /* HCI core */ static void nci_hci_reset_pipes(struct nci_hci_dev *hdev) { int i; for (i = 0; i < NCI_HCI_MAX_PIPES; i++) { hdev->pipes[i].gate = NCI_HCI_INVALID_GATE; hdev->pipes[i].host = NCI_HCI_INVALID_HOST; } memset(hdev->gate2pipe, NCI_HCI_INVALID_PIPE, sizeof(hdev->gate2pipe)); } static void nci_hci_reset_pipes_per_host(struct nci_dev *ndev, u8 host) { int i; for (i = 0; i < NCI_HCI_MAX_PIPES; i++) { if (ndev->hci_dev->pipes[i].host == host) { ndev->hci_dev->pipes[i].gate = NCI_HCI_INVALID_GATE; ndev->hci_dev->pipes[i].host = NCI_HCI_INVALID_HOST; } } } /* Fragment HCI data over NCI packet. * NFC Forum NCI 10.2.2 Data Exchange: * The payload of the Data Packets sent on the Logical Connection SHALL be * valid HCP packets, as defined within [ETSI_102622]. Each Data Packet SHALL * contain a single HCP packet. NCI Segmentation and Reassembly SHALL NOT be * applied to Data Messages in either direction. The HCI fragmentation mechanism * is used if required. */ static int nci_hci_send_data(struct nci_dev *ndev, u8 pipe, const u8 data_type, const u8 *data, size_t data_len) { const struct nci_conn_info *conn_info; struct sk_buff *skb; int len, i, r; u8 cb = pipe; conn_info = ndev->hci_dev->conn_info; if (!conn_info) return -EPROTO; i = 0; skb = nci_skb_alloc(ndev, conn_info->max_pkt_payload_len + NCI_DATA_HDR_SIZE, GFP_ATOMIC); if (!skb) return -ENOMEM; skb_reserve(skb, NCI_DATA_HDR_SIZE + 2); *(u8 *)skb_push(skb, 1) = data_type; do { /* If last packet add NCI_HFP_NO_CHAINING */ if (i + conn_info->max_pkt_payload_len - (skb->len + 1) >= data_len) { cb |= NCI_HFP_NO_CHAINING; len = data_len - i; } else { len = conn_info->max_pkt_payload_len - skb->len - 1; } *(u8 *)skb_push(skb, 1) = cb; if (len > 0) skb_put_data(skb, data + i, len); r = nci_send_data(ndev, conn_info->conn_id, skb); if (r < 0) return r; i += len; if (i < data_len) { skb = nci_skb_alloc(ndev, conn_info->max_pkt_payload_len + NCI_DATA_HDR_SIZE, GFP_ATOMIC); if (!skb) return -ENOMEM; skb_reserve(skb, NCI_DATA_HDR_SIZE + 1); } } while (i < data_len); return i; } static void nci_hci_send_data_req(struct nci_dev *ndev, const void *opt) { const struct nci_data *data = opt; nci_hci_send_data(ndev, data->pipe, data->cmd, data->data, data->data_len); } int nci_hci_send_event(struct nci_dev *ndev, u8 gate, u8 event, const u8 *param, size_t param_len) { u8 pipe = ndev->hci_dev->gate2pipe[gate]; if (pipe == NCI_HCI_INVALID_PIPE) return -EADDRNOTAVAIL; return nci_hci_send_data(ndev, pipe, NCI_HCP_HEADER(NCI_HCI_HCP_EVENT, event), param, param_len); } EXPORT_SYMBOL(nci_hci_send_event); int nci_hci_send_cmd(struct nci_dev *ndev, u8 gate, u8 cmd, const u8 *param, size_t param_len, struct sk_buff **skb) { const struct nci_hcp_message *message; const struct nci_conn_info *conn_info; struct nci_data data; int r; u8 pipe = ndev->hci_dev->gate2pipe[gate]; if (pipe == NCI_HCI_INVALID_PIPE) return -EADDRNOTAVAIL; conn_info = ndev->hci_dev->conn_info; if (!conn_info) return -EPROTO; data.conn_id = conn_info->conn_id; data.pipe = pipe; data.cmd = NCI_HCP_HEADER(NCI_HCI_HCP_COMMAND, cmd); data.data = param; data.data_len = param_len; r = nci_request(ndev, nci_hci_send_data_req, &data, msecs_to_jiffies(NCI_DATA_TIMEOUT)); if (r == NCI_STATUS_OK) { message = (struct nci_hcp_message *)conn_info->rx_skb->data; r = nci_hci_result_to_errno( NCI_HCP_MSG_GET_CMD(message->header)); skb_pull(conn_info->rx_skb, NCI_HCI_HCP_MESSAGE_HEADER_LEN); if (!r && skb) *skb = conn_info->rx_skb; } return r; } EXPORT_SYMBOL(nci_hci_send_cmd); int nci_hci_clear_all_pipes(struct nci_dev *ndev) { int r; r = nci_hci_send_cmd(ndev, NCI_HCI_ADMIN_GATE, NCI_HCI_ADM_CLEAR_ALL_PIPE, NULL, 0, NULL); if (r < 0) return r; nci_hci_reset_pipes(ndev->hci_dev); return r; } EXPORT_SYMBOL(nci_hci_clear_all_pipes); static void nci_hci_event_received(struct nci_dev *ndev, u8 pipe, u8 event, struct sk_buff *skb) { if (ndev->ops->hci_event_received) ndev->ops->hci_event_received(ndev, pipe, event, skb); } static void nci_hci_cmd_received(struct nci_dev *ndev, u8 pipe, u8 cmd, struct sk_buff *skb) { u8 gate = ndev->hci_dev->pipes[pipe].gate; u8 status = NCI_HCI_ANY_OK | ~NCI_HCI_FRAGMENT; u8 dest_gate, new_pipe; struct nci_hci_create_pipe_resp *create_info; struct nci_hci_delete_pipe_noti *delete_info; struct nci_hci_all_pipe_cleared_noti *cleared_info; pr_debug("from gate %x pipe %x cmd %x\n", gate, pipe, cmd); switch (cmd) { case NCI_HCI_ADM_NOTIFY_PIPE_CREATED: if (skb->len != 5) { status = NCI_HCI_ANY_E_NOK; goto exit; } create_info = (struct nci_hci_create_pipe_resp *)skb->data; dest_gate = create_info->dest_gate; new_pipe = create_info->pipe; if (new_pipe >= NCI_HCI_MAX_PIPES) { status = NCI_HCI_ANY_E_NOK; goto exit; } /* Save the new created pipe and bind with local gate, * the description for skb->data[3] is destination gate id * but since we received this cmd from host controller, we * are the destination and it is our local gate */ ndev->hci_dev->gate2pipe[dest_gate] = new_pipe; ndev->hci_dev->pipes[new_pipe].gate = dest_gate; ndev->hci_dev->pipes[new_pipe].host = create_info->src_host; break; case NCI_HCI_ANY_OPEN_PIPE: /* If the pipe is not created report an error */ if (gate == NCI_HCI_INVALID_GATE) { status = NCI_HCI_ANY_E_NOK; goto exit; } break; case NCI_HCI_ADM_NOTIFY_PIPE_DELETED: if (skb->len != 1) { status = NCI_HCI_ANY_E_NOK; goto exit; } delete_info = (struct nci_hci_delete_pipe_noti *)skb->data; if (delete_info->pipe >= NCI_HCI_MAX_PIPES) { status = NCI_HCI_ANY_E_NOK; goto exit; } ndev->hci_dev->pipes[delete_info->pipe].gate = NCI_HCI_INVALID_GATE; ndev->hci_dev->pipes[delete_info->pipe].host = NCI_HCI_INVALID_HOST; break; case NCI_HCI_ADM_NOTIFY_ALL_PIPE_CLEARED: if (skb->len != 1) { status = NCI_HCI_ANY_E_NOK; goto exit; } cleared_info = (struct nci_hci_all_pipe_cleared_noti *)skb->data; nci_hci_reset_pipes_per_host(ndev, cleared_info->host); break; default: pr_debug("Discarded unknown cmd %x to gate %x\n", cmd, gate); break; } if (ndev->ops->hci_cmd_received) ndev->ops->hci_cmd_received(ndev, pipe, cmd, skb); exit: nci_hci_send_data(ndev, pipe, status, NULL, 0); kfree_skb(skb); } static void nci_hci_resp_received(struct nci_dev *ndev, u8 pipe, struct sk_buff *skb) { struct nci_conn_info *conn_info; conn_info = ndev->hci_dev->conn_info; if (!conn_info) goto exit; conn_info->rx_skb = skb; exit: nci_req_complete(ndev, NCI_STATUS_OK); } /* Receive hcp message for pipe, with type and cmd. * skb contains optional message data only. */ static void nci_hci_hcp_message_rx(struct nci_dev *ndev, u8 pipe, u8 type, u8 instruction, struct sk_buff *skb) { switch (type) { case NCI_HCI_HCP_RESPONSE: nci_hci_resp_received(ndev, pipe, skb); break; case NCI_HCI_HCP_COMMAND: nci_hci_cmd_received(ndev, pipe, instruction, skb); break; case NCI_HCI_HCP_EVENT: nci_hci_event_received(ndev, pipe, instruction, skb); break; default: pr_err("UNKNOWN MSG Type %d, instruction=%d\n", type, instruction); kfree_skb(skb); break; } nci_req_complete(ndev, NCI_STATUS_OK); } static void nci_hci_msg_rx_work(struct work_struct *work) { struct nci_hci_dev *hdev = container_of(work, struct nci_hci_dev, msg_rx_work); struct sk_buff *skb; const struct nci_hcp_message *message; u8 pipe, type, instruction; for (; (skb = skb_dequeue(&hdev->msg_rx_queue)); kcov_remote_stop()) { kcov_remote_start_common(skb_get_kcov_handle(skb)); pipe = NCI_HCP_MSG_GET_PIPE(skb->data[0]); skb_pull(skb, NCI_HCI_HCP_PACKET_HEADER_LEN); message = (struct nci_hcp_message *)skb->data; type = NCI_HCP_MSG_GET_TYPE(message->header); instruction = NCI_HCP_MSG_GET_CMD(message->header); skb_pull(skb, NCI_HCI_HCP_MESSAGE_HEADER_LEN); nci_hci_hcp_message_rx(hdev->ndev, pipe, type, instruction, skb); } } void nci_hci_data_received_cb(void *context, struct sk_buff *skb, int err) { struct nci_dev *ndev = (struct nci_dev *)context; struct nci_hcp_packet *packet; u8 pipe, type; struct sk_buff *hcp_skb; struct sk_buff *frag_skb; int msg_len; if (err) { nci_req_complete(ndev, err); return; } packet = (struct nci_hcp_packet *)skb->data; if ((packet->header & ~NCI_HCI_FRAGMENT) == 0) { skb_queue_tail(&ndev->hci_dev->rx_hcp_frags, skb); return; } /* it's the last fragment. Does it need re-aggregation? */ if (skb_queue_len(&ndev->hci_dev->rx_hcp_frags)) { pipe = NCI_HCP_MSG_GET_PIPE(packet->header); skb_queue_tail(&ndev->hci_dev->rx_hcp_frags, skb); msg_len = 0; skb_queue_walk(&ndev->hci_dev->rx_hcp_frags, frag_skb) { msg_len += (frag_skb->len - NCI_HCI_HCP_PACKET_HEADER_LEN); } hcp_skb = nfc_alloc_recv_skb(NCI_HCI_HCP_PACKET_HEADER_LEN + msg_len, GFP_KERNEL); if (!hcp_skb) { nci_req_complete(ndev, -ENOMEM); return; } skb_put_u8(hcp_skb, pipe); skb_queue_walk(&ndev->hci_dev->rx_hcp_frags, frag_skb) { msg_len = frag_skb->len - NCI_HCI_HCP_PACKET_HEADER_LEN; skb_put_data(hcp_skb, frag_skb->data + NCI_HCI_HCP_PACKET_HEADER_LEN, msg_len); } skb_queue_purge(&ndev->hci_dev->rx_hcp_frags); } else { packet->header &= NCI_HCI_FRAGMENT; hcp_skb = skb; } /* if this is a response, dispatch immediately to * unblock waiting cmd context. Otherwise, enqueue to dispatch * in separate context where handler can also execute command. */ packet = (struct nci_hcp_packet *)hcp_skb->data; type = NCI_HCP_MSG_GET_TYPE(packet->message.header); if (type == NCI_HCI_HCP_RESPONSE) { pipe = NCI_HCP_MSG_GET_PIPE(packet->header); skb_pull(hcp_skb, NCI_HCI_HCP_PACKET_HEADER_LEN); nci_hci_hcp_message_rx(ndev, pipe, type, NCI_STATUS_OK, hcp_skb); } else { skb_queue_tail(&ndev->hci_dev->msg_rx_queue, hcp_skb); schedule_work(&ndev->hci_dev->msg_rx_work); } } int nci_hci_open_pipe(struct nci_dev *ndev, u8 pipe) { struct nci_data data; const struct nci_conn_info *conn_info; conn_info = ndev->hci_dev->conn_info; if (!conn_info) return -EPROTO; data.conn_id = conn_info->conn_id; data.pipe = pipe; data.cmd = NCI_HCP_HEADER(NCI_HCI_HCP_COMMAND, NCI_HCI_ANY_OPEN_PIPE); data.data = NULL; data.data_len = 0; return nci_request(ndev, nci_hci_send_data_req, &data, msecs_to_jiffies(NCI_DATA_TIMEOUT)); } EXPORT_SYMBOL(nci_hci_open_pipe); static u8 nci_hci_create_pipe(struct nci_dev *ndev, u8 dest_host, u8 dest_gate, int *result) { u8 pipe; struct sk_buff *skb; struct nci_hci_create_pipe_params params; const struct nci_hci_create_pipe_resp *resp; pr_debug("gate=%d\n", dest_gate); params.src_gate = NCI_HCI_ADMIN_GATE; params.dest_host = dest_host; params.dest_gate = dest_gate; *result = nci_hci_send_cmd(ndev, NCI_HCI_ADMIN_GATE, NCI_HCI_ADM_CREATE_PIPE, (u8 *)&params, sizeof(params), &skb); if (*result < 0) return NCI_HCI_INVALID_PIPE; resp = (struct nci_hci_create_pipe_resp *)skb->data; pipe = resp->pipe; kfree_skb(skb); pr_debug("pipe created=%d\n", pipe); return pipe; } static int nci_hci_delete_pipe(struct nci_dev *ndev, u8 pipe) { return nci_hci_send_cmd(ndev, NCI_HCI_ADMIN_GATE, NCI_HCI_ADM_DELETE_PIPE, &pipe, 1, NULL); } int nci_hci_set_param(struct nci_dev *ndev, u8 gate, u8 idx, const u8 *param, size_t param_len) { const struct nci_hcp_message *message; const struct nci_conn_info *conn_info; struct nci_data data; int r; u8 *tmp; u8 pipe = ndev->hci_dev->gate2pipe[gate]; pr_debug("idx=%d to gate %d\n", idx, gate); if (pipe == NCI_HCI_INVALID_PIPE) return -EADDRNOTAVAIL; conn_info = ndev->hci_dev->conn_info; if (!conn_info) return -EPROTO; tmp = kmalloc(1 + param_len, GFP_KERNEL); if (!tmp) return -ENOMEM; *tmp = idx; memcpy(tmp + 1, param, param_len); data.conn_id = conn_info->conn_id; data.pipe = pipe; data.cmd = NCI_HCP_HEADER(NCI_HCI_HCP_COMMAND, NCI_HCI_ANY_SET_PARAMETER); data.data = tmp; data.data_len = param_len + 1; r = nci_request(ndev, nci_hci_send_data_req, &data, msecs_to_jiffies(NCI_DATA_TIMEOUT)); if (r == NCI_STATUS_OK) { message = (struct nci_hcp_message *)conn_info->rx_skb->data; r = nci_hci_result_to_errno( NCI_HCP_MSG_GET_CMD(message->header)); skb_pull(conn_info->rx_skb, NCI_HCI_HCP_MESSAGE_HEADER_LEN); } kfree(tmp); return r; } EXPORT_SYMBOL(nci_hci_set_param); int nci_hci_get_param(struct nci_dev *ndev, u8 gate, u8 idx, struct sk_buff **skb) { const struct nci_hcp_message *message; const struct nci_conn_info *conn_info; struct nci_data data; int r; u8 pipe = ndev->hci_dev->gate2pipe[gate]; pr_debug("idx=%d to gate %d\n", idx, gate); if (pipe == NCI_HCI_INVALID_PIPE) return -EADDRNOTAVAIL; conn_info = ndev->hci_dev->conn_info; if (!conn_info) return -EPROTO; data.conn_id = conn_info->conn_id; data.pipe = pipe; data.cmd = NCI_HCP_HEADER(NCI_HCI_HCP_COMMAND, NCI_HCI_ANY_GET_PARAMETER); data.data = &idx; data.data_len = 1; r = nci_request(ndev, nci_hci_send_data_req, &data, msecs_to_jiffies(NCI_DATA_TIMEOUT)); if (r == NCI_STATUS_OK) { message = (struct nci_hcp_message *)conn_info->rx_skb->data; r = nci_hci_result_to_errno( NCI_HCP_MSG_GET_CMD(message->header)); skb_pull(conn_info->rx_skb, NCI_HCI_HCP_MESSAGE_HEADER_LEN); if (!r && skb) *skb = conn_info->rx_skb; } return r; } EXPORT_SYMBOL(nci_hci_get_param); int nci_hci_connect_gate(struct nci_dev *ndev, u8 dest_host, u8 dest_gate, u8 pipe) { bool pipe_created = false; int r; if (pipe == NCI_HCI_DO_NOT_OPEN_PIPE) return 0; if (ndev->hci_dev->gate2pipe[dest_gate] != NCI_HCI_INVALID_PIPE) return -EADDRINUSE; if (pipe != NCI_HCI_INVALID_PIPE) goto open_pipe; switch (dest_gate) { case NCI_HCI_LINK_MGMT_GATE: pipe = NCI_HCI_LINK_MGMT_PIPE; break; case NCI_HCI_ADMIN_GATE: pipe = NCI_HCI_ADMIN_PIPE; break; default: pipe = nci_hci_create_pipe(ndev, dest_host, dest_gate, &r); if (pipe == NCI_HCI_INVALID_PIPE) return r; pipe_created = true; break; } open_pipe: r = nci_hci_open_pipe(ndev, pipe); if (r < 0) { if (pipe_created) { if (nci_hci_delete_pipe(ndev, pipe) < 0) { /* TODO: Cannot clean by deleting pipe... * -> inconsistent state */ } } return r; } ndev->hci_dev->pipes[pipe].gate = dest_gate; ndev->hci_dev->pipes[pipe].host = dest_host; ndev->hci_dev->gate2pipe[dest_gate] = pipe; return 0; } EXPORT_SYMBOL(nci_hci_connect_gate); static int nci_hci_dev_connect_gates(struct nci_dev *ndev, u8 gate_count, const struct nci_hci_gate *gates) { int r; while (gate_count--) { r = nci_hci_connect_gate(ndev, gates->dest_host, gates->gate, gates->pipe); if (r < 0) return r; gates++; } return 0; } int nci_hci_dev_session_init(struct nci_dev *ndev) { struct nci_conn_info *conn_info; struct sk_buff *skb; int r; ndev->hci_dev->count_pipes = 0; ndev->hci_dev->expected_pipes = 0; conn_info = ndev->hci_dev->conn_info; if (!conn_info) return -EPROTO; conn_info->data_exchange_cb = nci_hci_data_received_cb; conn_info->data_exchange_cb_context = ndev; nci_hci_reset_pipes(ndev->hci_dev); if (ndev->hci_dev->init_data.gates[0].gate != NCI_HCI_ADMIN_GATE) return -EPROTO; r = nci_hci_connect_gate(ndev, ndev->hci_dev->init_data.gates[0].dest_host, ndev->hci_dev->init_data.gates[0].gate, ndev->hci_dev->init_data.gates[0].pipe); if (r < 0) return r; r = nci_hci_get_param(ndev, NCI_HCI_ADMIN_GATE, NCI_HCI_ADMIN_PARAM_SESSION_IDENTITY, &skb); if (r < 0) return r; if (skb->len && skb->len == strlen(ndev->hci_dev->init_data.session_id) && !memcmp(ndev->hci_dev->init_data.session_id, skb->data, skb->len) && ndev->ops->hci_load_session) { /* Restore gate<->pipe table from some proprietary location. */ r = ndev->ops->hci_load_session(ndev); } else { r = nci_hci_clear_all_pipes(ndev); if (r < 0) goto exit; r = nci_hci_dev_connect_gates(ndev, ndev->hci_dev->init_data.gate_count, ndev->hci_dev->init_data.gates); if (r < 0) goto exit; r = nci_hci_set_param(ndev, NCI_HCI_ADMIN_GATE, NCI_HCI_ADMIN_PARAM_SESSION_IDENTITY, ndev->hci_dev->init_data.session_id, strlen(ndev->hci_dev->init_data.session_id)); } exit: kfree_skb(skb); return r; } EXPORT_SYMBOL(nci_hci_dev_session_init); struct nci_hci_dev *nci_hci_allocate(struct nci_dev *ndev) { struct nci_hci_dev *hdev; hdev = kzalloc(sizeof(*hdev), GFP_KERNEL); if (!hdev) return NULL; skb_queue_head_init(&hdev->rx_hcp_frags); INIT_WORK(&hdev->msg_rx_work, nci_hci_msg_rx_work); skb_queue_head_init(&hdev->msg_rx_queue); hdev->ndev = ndev; return hdev; } void nci_hci_deallocate(struct nci_dev *ndev) { kfree(ndev->hci_dev); }
linux-master
net/nfc/nci/hci.c
// SPDX-License-Identifier: GPL-2.0-only /* * The NFC Controller Interface is the communication protocol between an * NFC Controller (NFCC) and a Device Host (DH). * * Copyright (C) 2011 Texas Instruments, Inc. * Copyright (C) 2014 Marvell International Ltd. * * Written by Ilan Elias <[email protected]> */ #define pr_fmt(fmt) KBUILD_MODNAME ": %s: " fmt, __func__ #include <linux/types.h> #include <linux/interrupt.h> #include <linux/wait.h> #include <linux/bitops.h> #include <linux/skbuff.h> #include "../nfc.h" #include <net/nfc/nci.h> #include <net/nfc/nci_core.h> #include <linux/nfc.h> /* Complete data exchange transaction and forward skb to nfc core */ void nci_data_exchange_complete(struct nci_dev *ndev, struct sk_buff *skb, __u8 conn_id, int err) { const struct nci_conn_info *conn_info; data_exchange_cb_t cb; void *cb_context; conn_info = nci_get_conn_info_by_conn_id(ndev, conn_id); if (!conn_info) { kfree_skb(skb); goto exit; } cb = conn_info->data_exchange_cb; cb_context = conn_info->data_exchange_cb_context; pr_debug("len %d, err %d\n", skb ? skb->len : 0, err); /* data exchange is complete, stop the data timer */ del_timer_sync(&ndev->data_timer); clear_bit(NCI_DATA_EXCHANGE_TO, &ndev->flags); if (cb) { /* forward skb to nfc core */ cb(cb_context, skb, err); } else if (skb) { pr_err("no rx callback, dropping rx data...\n"); /* no waiting callback, free skb */ kfree_skb(skb); } exit: clear_bit(NCI_DATA_EXCHANGE, &ndev->flags); } /* ----------------- NCI TX Data ----------------- */ static inline void nci_push_data_hdr(struct nci_dev *ndev, __u8 conn_id, struct sk_buff *skb, __u8 pbf) { struct nci_data_hdr *hdr; int plen = skb->len; hdr = skb_push(skb, NCI_DATA_HDR_SIZE); hdr->conn_id = conn_id; hdr->rfu = 0; hdr->plen = plen; nci_mt_set((__u8 *)hdr, NCI_MT_DATA_PKT); nci_pbf_set((__u8 *)hdr, pbf); } int nci_conn_max_data_pkt_payload_size(struct nci_dev *ndev, __u8 conn_id) { const struct nci_conn_info *conn_info; conn_info = nci_get_conn_info_by_conn_id(ndev, conn_id); if (!conn_info) return -EPROTO; return conn_info->max_pkt_payload_len; } EXPORT_SYMBOL(nci_conn_max_data_pkt_payload_size); static int nci_queue_tx_data_frags(struct nci_dev *ndev, __u8 conn_id, struct sk_buff *skb) { const struct nci_conn_info *conn_info; int total_len = skb->len; const unsigned char *data = skb->data; unsigned long flags; struct sk_buff_head frags_q; struct sk_buff *skb_frag; int frag_len; int rc = 0; pr_debug("conn_id 0x%x, total_len %d\n", conn_id, total_len); conn_info = nci_get_conn_info_by_conn_id(ndev, conn_id); if (!conn_info) { rc = -EPROTO; goto exit; } __skb_queue_head_init(&frags_q); while (total_len) { frag_len = min_t(int, total_len, conn_info->max_pkt_payload_len); skb_frag = nci_skb_alloc(ndev, (NCI_DATA_HDR_SIZE + frag_len), GFP_ATOMIC); if (skb_frag == NULL) { rc = -ENOMEM; goto free_exit; } skb_reserve(skb_frag, NCI_DATA_HDR_SIZE); /* first, copy the data */ skb_put_data(skb_frag, data, frag_len); /* second, set the header */ nci_push_data_hdr(ndev, conn_id, skb_frag, ((total_len == frag_len) ? (NCI_PBF_LAST) : (NCI_PBF_CONT))); __skb_queue_tail(&frags_q, skb_frag); data += frag_len; total_len -= frag_len; pr_debug("frag_len %d, remaining total_len %d\n", frag_len, total_len); } /* queue all fragments atomically */ spin_lock_irqsave(&ndev->tx_q.lock, flags); while ((skb_frag = __skb_dequeue(&frags_q)) != NULL) __skb_queue_tail(&ndev->tx_q, skb_frag); spin_unlock_irqrestore(&ndev->tx_q.lock, flags); /* free the original skb */ kfree_skb(skb); goto exit; free_exit: while ((skb_frag = __skb_dequeue(&frags_q)) != NULL) kfree_skb(skb_frag); exit: return rc; } /* Send NCI data */ int nci_send_data(struct nci_dev *ndev, __u8 conn_id, struct sk_buff *skb) { const struct nci_conn_info *conn_info; int rc = 0; pr_debug("conn_id 0x%x, plen %d\n", conn_id, skb->len); conn_info = nci_get_conn_info_by_conn_id(ndev, conn_id); if (!conn_info) { rc = -EPROTO; goto free_exit; } /* check if the packet need to be fragmented */ if (skb->len <= conn_info->max_pkt_payload_len) { /* no need to fragment packet */ nci_push_data_hdr(ndev, conn_id, skb, NCI_PBF_LAST); skb_queue_tail(&ndev->tx_q, skb); } else { /* fragment packet and queue the fragments */ rc = nci_queue_tx_data_frags(ndev, conn_id, skb); if (rc) { pr_err("failed to fragment tx data packet\n"); goto free_exit; } } ndev->cur_conn_id = conn_id; queue_work(ndev->tx_wq, &ndev->tx_work); goto exit; free_exit: kfree_skb(skb); exit: return rc; } EXPORT_SYMBOL(nci_send_data); /* ----------------- NCI RX Data ----------------- */ static void nci_add_rx_data_frag(struct nci_dev *ndev, struct sk_buff *skb, __u8 pbf, __u8 conn_id, __u8 status) { int reassembly_len; int err = 0; if (status) { err = status; goto exit; } if (ndev->rx_data_reassembly) { reassembly_len = ndev->rx_data_reassembly->len; /* first, make enough room for the already accumulated data */ if (skb_cow_head(skb, reassembly_len)) { pr_err("error adding room for accumulated rx data\n"); kfree_skb(skb); skb = NULL; kfree_skb(ndev->rx_data_reassembly); ndev->rx_data_reassembly = NULL; err = -ENOMEM; goto exit; } /* second, combine the two fragments */ memcpy(skb_push(skb, reassembly_len), ndev->rx_data_reassembly->data, reassembly_len); /* third, free old reassembly */ kfree_skb(ndev->rx_data_reassembly); ndev->rx_data_reassembly = NULL; } if (pbf == NCI_PBF_CONT) { /* need to wait for next fragment, store skb and exit */ ndev->rx_data_reassembly = skb; return; } exit: if (ndev->nfc_dev->rf_mode == NFC_RF_TARGET) { /* Data received in Target mode, forward to nfc core */ err = nfc_tm_data_received(ndev->nfc_dev, skb); if (err) pr_err("unable to handle received data\n"); } else { nci_data_exchange_complete(ndev, skb, conn_id, err); } } /* Rx Data packet */ void nci_rx_data_packet(struct nci_dev *ndev, struct sk_buff *skb) { __u8 pbf = nci_pbf(skb->data); __u8 status = 0; __u8 conn_id = nci_conn_id(skb->data); const struct nci_conn_info *conn_info; pr_debug("len %d\n", skb->len); pr_debug("NCI RX: MT=data, PBF=%d, conn_id=%d, plen=%d\n", nci_pbf(skb->data), nci_conn_id(skb->data), nci_plen(skb->data)); conn_info = nci_get_conn_info_by_conn_id(ndev, nci_conn_id(skb->data)); if (!conn_info) { kfree_skb(skb); return; } /* strip the nci data header */ skb_pull(skb, NCI_DATA_HDR_SIZE); if (ndev->target_active_prot == NFC_PROTO_MIFARE || ndev->target_active_prot == NFC_PROTO_JEWEL || ndev->target_active_prot == NFC_PROTO_FELICA || ndev->target_active_prot == NFC_PROTO_ISO15693) { /* frame I/F => remove the status byte */ pr_debug("frame I/F => remove the status byte\n"); status = skb->data[skb->len - 1]; skb_trim(skb, (skb->len - 1)); } nci_add_rx_data_frag(ndev, skb, pbf, conn_id, nci_to_errno(status)); }
linux-master
net/nfc/nci/data.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (C) 2015, Marvell International Ltd. * * Inspired (hugely) by HCI LDISC implementation in Bluetooth. * * Copyright (C) 2000-2001 Qualcomm Incorporated * Copyright (C) 2002-2003 Maxim Krasnyansky <[email protected]> * Copyright (C) 2004-2005 Marcel Holtmann <[email protected]> */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/types.h> #include <linux/fcntl.h> #include <linux/interrupt.h> #include <linux/ptrace.h> #include <linux/poll.h> #include <linux/slab.h> #include <linux/tty.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/signal.h> #include <linux/ioctl.h> #include <linux/skbuff.h> #include <net/nfc/nci.h> #include <net/nfc/nci_core.h> /* TX states */ #define NCI_UART_SENDING 1 #define NCI_UART_TX_WAKEUP 2 static struct nci_uart *nci_uart_drivers[NCI_UART_DRIVER_MAX]; static inline struct sk_buff *nci_uart_dequeue(struct nci_uart *nu) { struct sk_buff *skb = nu->tx_skb; if (!skb) skb = skb_dequeue(&nu->tx_q); else nu->tx_skb = NULL; return skb; } static inline int nci_uart_queue_empty(struct nci_uart *nu) { if (nu->tx_skb) return 0; return skb_queue_empty(&nu->tx_q); } static int nci_uart_tx_wakeup(struct nci_uart *nu) { if (test_and_set_bit(NCI_UART_SENDING, &nu->tx_state)) { set_bit(NCI_UART_TX_WAKEUP, &nu->tx_state); return 0; } schedule_work(&nu->write_work); return 0; } static void nci_uart_write_work(struct work_struct *work) { struct nci_uart *nu = container_of(work, struct nci_uart, write_work); struct tty_struct *tty = nu->tty; struct sk_buff *skb; restart: clear_bit(NCI_UART_TX_WAKEUP, &nu->tx_state); if (nu->ops.tx_start) nu->ops.tx_start(nu); while ((skb = nci_uart_dequeue(nu))) { int len; set_bit(TTY_DO_WRITE_WAKEUP, &tty->flags); len = tty->ops->write(tty, skb->data, skb->len); skb_pull(skb, len); if (skb->len) { nu->tx_skb = skb; break; } kfree_skb(skb); } if (test_bit(NCI_UART_TX_WAKEUP, &nu->tx_state)) goto restart; if (nu->ops.tx_done && nci_uart_queue_empty(nu)) nu->ops.tx_done(nu); clear_bit(NCI_UART_SENDING, &nu->tx_state); } static int nci_uart_set_driver(struct tty_struct *tty, unsigned int driver) { struct nci_uart *nu = NULL; int ret; if (driver >= NCI_UART_DRIVER_MAX) return -EINVAL; if (!nci_uart_drivers[driver]) return -ENOENT; nu = kzalloc(sizeof(*nu), GFP_KERNEL); if (!nu) return -ENOMEM; memcpy(nu, nci_uart_drivers[driver], sizeof(struct nci_uart)); nu->tty = tty; tty->disc_data = nu; skb_queue_head_init(&nu->tx_q); INIT_WORK(&nu->write_work, nci_uart_write_work); spin_lock_init(&nu->rx_lock); ret = nu->ops.open(nu); if (ret) { tty->disc_data = NULL; kfree(nu); } else if (!try_module_get(nu->owner)) { nu->ops.close(nu); tty->disc_data = NULL; kfree(nu); return -ENOENT; } return ret; } /* ------ LDISC part ------ */ /* nci_uart_tty_open * * Called when line discipline changed to NCI_UART. * * Arguments: * tty pointer to tty info structure * Return Value: * 0 if success, otherwise error code */ static int nci_uart_tty_open(struct tty_struct *tty) { /* Error if the tty has no write op instead of leaving an exploitable * hole */ if (!tty->ops->write) return -EOPNOTSUPP; tty->disc_data = NULL; tty->receive_room = 65536; /* Flush any pending characters in the driver */ tty_driver_flush_buffer(tty); return 0; } /* nci_uart_tty_close() * * Called when the line discipline is changed to something * else, the tty is closed, or the tty detects a hangup. */ static void nci_uart_tty_close(struct tty_struct *tty) { struct nci_uart *nu = tty->disc_data; /* Detach from the tty */ tty->disc_data = NULL; if (!nu) return; kfree_skb(nu->tx_skb); kfree_skb(nu->rx_skb); skb_queue_purge(&nu->tx_q); nu->ops.close(nu); nu->tty = NULL; module_put(nu->owner); cancel_work_sync(&nu->write_work); kfree(nu); } /* nci_uart_tty_wakeup() * * Callback for transmit wakeup. Called when low level * device driver can accept more send data. * * Arguments: tty pointer to associated tty instance data * Return Value: None */ static void nci_uart_tty_wakeup(struct tty_struct *tty) { struct nci_uart *nu = tty->disc_data; if (!nu) return; clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags); if (tty != nu->tty) return; nci_uart_tx_wakeup(nu); } /* -- Default recv_buf handler -- * * This handler supposes that NCI frames are sent over UART link without any * framing. It reads NCI header, retrieve the packet size and once all packet * bytes are received it passes it to nci_uart driver for processing. */ static int nci_uart_default_recv_buf(struct nci_uart *nu, const u8 *data, int count) { int chunk_len; if (!nu->ndev) { nfc_err(nu->tty->dev, "receive data from tty but no NCI dev is attached yet, drop buffer\n"); return 0; } /* Decode all incoming data in packets * and enqueue then for processing. */ while (count > 0) { /* If this is the first data of a packet, allocate a buffer */ if (!nu->rx_skb) { nu->rx_packet_len = -1; nu->rx_skb = nci_skb_alloc(nu->ndev, NCI_MAX_PACKET_SIZE, GFP_ATOMIC); if (!nu->rx_skb) return -ENOMEM; } /* Eat byte after byte till full packet header is received */ if (nu->rx_skb->len < NCI_CTRL_HDR_SIZE) { skb_put_u8(nu->rx_skb, *data++); --count; continue; } /* Header was received but packet len was not read */ if (nu->rx_packet_len < 0) nu->rx_packet_len = NCI_CTRL_HDR_SIZE + nci_plen(nu->rx_skb->data); /* Compute how many bytes are missing and how many bytes can * be consumed. */ chunk_len = nu->rx_packet_len - nu->rx_skb->len; if (count < chunk_len) chunk_len = count; skb_put_data(nu->rx_skb, data, chunk_len); data += chunk_len; count -= chunk_len; /* Check if packet is fully received */ if (nu->rx_packet_len == nu->rx_skb->len) { /* Pass RX packet to driver */ if (nu->ops.recv(nu, nu->rx_skb) != 0) nfc_err(nu->tty->dev, "corrupted RX packet\n"); /* Next packet will be a new one */ nu->rx_skb = NULL; } } return 0; } /* nci_uart_tty_receive() * * Called by tty low level driver when receive data is * available. * * Arguments: tty pointer to tty instance data * data pointer to received data * flags pointer to flags for data * count count of received data in bytes * * Return Value: None */ static void nci_uart_tty_receive(struct tty_struct *tty, const u8 *data, const u8 *flags, size_t count) { struct nci_uart *nu = tty->disc_data; if (!nu || tty != nu->tty) return; spin_lock(&nu->rx_lock); nci_uart_default_recv_buf(nu, data, count); spin_unlock(&nu->rx_lock); tty_unthrottle(tty); } /* nci_uart_tty_ioctl() * * Process IOCTL system call for the tty device. * * Arguments: * * tty pointer to tty instance data * cmd IOCTL command code * arg argument for IOCTL call (cmd dependent) * * Return Value: Command dependent */ static int nci_uart_tty_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg) { struct nci_uart *nu = tty->disc_data; int err = 0; switch (cmd) { case NCIUARTSETDRIVER: if (!nu) return nci_uart_set_driver(tty, (unsigned int)arg); else return -EBUSY; break; default: err = n_tty_ioctl_helper(tty, cmd, arg); break; } return err; } /* We don't provide read/write/poll interface for user space. */ static ssize_t nci_uart_tty_read(struct tty_struct *tty, struct file *file, u8 *buf, size_t nr, void **cookie, unsigned long offset) { return 0; } static ssize_t nci_uart_tty_write(struct tty_struct *tty, struct file *file, const u8 *data, size_t count) { return 0; } static int nci_uart_send(struct nci_uart *nu, struct sk_buff *skb) { /* Queue TX packet */ skb_queue_tail(&nu->tx_q, skb); /* Try to start TX (if possible) */ nci_uart_tx_wakeup(nu); return 0; } int nci_uart_register(struct nci_uart *nu) { if (!nu || !nu->ops.open || !nu->ops.recv || !nu->ops.close) return -EINVAL; /* Set the send callback */ nu->ops.send = nci_uart_send; /* Add this driver in the driver list */ if (nci_uart_drivers[nu->driver]) { pr_err("driver %d is already registered\n", nu->driver); return -EBUSY; } nci_uart_drivers[nu->driver] = nu; pr_info("NCI uart driver '%s [%d]' registered\n", nu->name, nu->driver); return 0; } EXPORT_SYMBOL_GPL(nci_uart_register); void nci_uart_unregister(struct nci_uart *nu) { pr_info("NCI uart driver '%s [%d]' unregistered\n", nu->name, nu->driver); /* Remove this driver from the driver list */ nci_uart_drivers[nu->driver] = NULL; } EXPORT_SYMBOL_GPL(nci_uart_unregister); void nci_uart_set_config(struct nci_uart *nu, int baudrate, int flow_ctrl) { struct ktermios new_termios; if (!nu->tty) return; down_read(&nu->tty->termios_rwsem); new_termios = nu->tty->termios; up_read(&nu->tty->termios_rwsem); tty_termios_encode_baud_rate(&new_termios, baudrate, baudrate); if (flow_ctrl) new_termios.c_cflag |= CRTSCTS; else new_termios.c_cflag &= ~CRTSCTS; tty_set_termios(nu->tty, &new_termios); } EXPORT_SYMBOL_GPL(nci_uart_set_config); static struct tty_ldisc_ops nci_uart_ldisc = { .owner = THIS_MODULE, .num = N_NCI, .name = "n_nci", .open = nci_uart_tty_open, .close = nci_uart_tty_close, .read = nci_uart_tty_read, .write = nci_uart_tty_write, .receive_buf = nci_uart_tty_receive, .write_wakeup = nci_uart_tty_wakeup, .ioctl = nci_uart_tty_ioctl, .compat_ioctl = nci_uart_tty_ioctl, }; static int __init nci_uart_init(void) { return tty_register_ldisc(&nci_uart_ldisc); } static void __exit nci_uart_exit(void) { tty_unregister_ldisc(&nci_uart_ldisc); } module_init(nci_uart_init); module_exit(nci_uart_exit); MODULE_AUTHOR("Marvell International Ltd."); MODULE_DESCRIPTION("NFC NCI UART driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS_LDISC(N_NCI);
linux-master
net/nfc/nci/uart.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2021 Intel Corporation */ #include <net/bluetooth/bluetooth.h> #include <net/bluetooth/hci_core.h> #include "aosp.h" /* Command complete parameters of LE_Get_Vendor_Capabilities_Command * The parameters grow over time. The base version that declares the * version_supported field is v0.95. Refer to * https://cs.android.com/android/platform/superproject/+/master:system/ * bt/gd/hci/controller.cc;l=452?q=le_get_vendor_capabilities_handler */ struct aosp_rp_le_get_vendor_capa { /* v0.95: 15 octets */ __u8 status; __u8 max_advt_instances; __u8 offloaded_resolution_of_private_address; __le16 total_scan_results_storage; __u8 max_irk_list_sz; __u8 filtering_support; __u8 max_filter; __u8 activity_energy_info_support; __le16 version_supported; __le16 total_num_of_advt_tracked; __u8 extended_scan_support; __u8 debug_logging_supported; /* v0.96: 16 octets */ __u8 le_address_generation_offloading_support; /* v0.98: 21 octets */ __le32 a2dp_source_offload_capability_mask; __u8 bluetooth_quality_report_support; /* v1.00: 25 octets */ __le32 dynamic_audio_buffer_support; } __packed; #define VENDOR_CAPA_BASE_SIZE 15 #define VENDOR_CAPA_0_98_SIZE 21 void aosp_do_open(struct hci_dev *hdev) { struct sk_buff *skb; struct aosp_rp_le_get_vendor_capa *rp; u16 version_supported; if (!hdev->aosp_capable) return; bt_dev_dbg(hdev, "Initialize AOSP extension"); /* LE Get Vendor Capabilities Command */ skb = __hci_cmd_sync(hdev, hci_opcode_pack(0x3f, 0x153), 0, NULL, HCI_CMD_TIMEOUT); if (IS_ERR_OR_NULL(skb)) { if (!skb) skb = ERR_PTR(-EIO); bt_dev_err(hdev, "AOSP get vendor capabilities (%ld)", PTR_ERR(skb)); return; } /* A basic length check */ if (skb->len < VENDOR_CAPA_BASE_SIZE) goto length_error; rp = (struct aosp_rp_le_get_vendor_capa *)skb->data; version_supported = le16_to_cpu(rp->version_supported); /* AOSP displays the verion number like v0.98, v1.00, etc. */ bt_dev_info(hdev, "AOSP extensions version v%u.%02u", version_supported >> 8, version_supported & 0xff); /* Do not support very old versions. */ if (version_supported < 95) { bt_dev_warn(hdev, "AOSP capabilities version %u too old", version_supported); goto done; } if (version_supported < 98) { bt_dev_warn(hdev, "AOSP quality report is not supported"); goto done; } if (skb->len < VENDOR_CAPA_0_98_SIZE) goto length_error; /* The bluetooth_quality_report_support is defined at version * v0.98. Refer to * https://cs.android.com/android/platform/superproject/+/ * master:system/bt/gd/hci/controller.cc;l=477 */ if (rp->bluetooth_quality_report_support) { hdev->aosp_quality_report = true; bt_dev_info(hdev, "AOSP quality report is supported"); } goto done; length_error: bt_dev_err(hdev, "AOSP capabilities length %d too short", skb->len); done: kfree_skb(skb); } void aosp_do_close(struct hci_dev *hdev) { if (!hdev->aosp_capable) return; bt_dev_dbg(hdev, "Cleanup of AOSP extension"); } /* BQR command */ #define BQR_OPCODE hci_opcode_pack(0x3f, 0x015e) /* BQR report action */ #define REPORT_ACTION_ADD 0x00 #define REPORT_ACTION_DELETE 0x01 #define REPORT_ACTION_CLEAR 0x02 /* BQR event masks */ #define QUALITY_MONITORING BIT(0) #define APPRAOCHING_LSTO BIT(1) #define A2DP_AUDIO_CHOPPY BIT(2) #define SCO_VOICE_CHOPPY BIT(3) #define DEFAULT_BQR_EVENT_MASK (QUALITY_MONITORING | APPRAOCHING_LSTO | \ A2DP_AUDIO_CHOPPY | SCO_VOICE_CHOPPY) /* Reporting at milliseconds so as not to stress the controller too much. * Range: 0 ~ 65535 ms */ #define DEFALUT_REPORT_INTERVAL_MS 5000 struct aosp_bqr_cp { __u8 report_action; __u32 event_mask; __u16 min_report_interval; } __packed; static int enable_quality_report(struct hci_dev *hdev) { struct sk_buff *skb; struct aosp_bqr_cp cp; cp.report_action = REPORT_ACTION_ADD; cp.event_mask = DEFAULT_BQR_EVENT_MASK; cp.min_report_interval = DEFALUT_REPORT_INTERVAL_MS; skb = __hci_cmd_sync(hdev, BQR_OPCODE, sizeof(cp), &cp, HCI_CMD_TIMEOUT); if (IS_ERR_OR_NULL(skb)) { if (!skb) skb = ERR_PTR(-EIO); bt_dev_err(hdev, "Enabling Android BQR failed (%ld)", PTR_ERR(skb)); return PTR_ERR(skb); } kfree_skb(skb); return 0; } static int disable_quality_report(struct hci_dev *hdev) { struct sk_buff *skb; struct aosp_bqr_cp cp = { 0 }; cp.report_action = REPORT_ACTION_CLEAR; skb = __hci_cmd_sync(hdev, BQR_OPCODE, sizeof(cp), &cp, HCI_CMD_TIMEOUT); if (IS_ERR_OR_NULL(skb)) { if (!skb) skb = ERR_PTR(-EIO); bt_dev_err(hdev, "Disabling Android BQR failed (%ld)", PTR_ERR(skb)); return PTR_ERR(skb); } kfree_skb(skb); return 0; } bool aosp_has_quality_report(struct hci_dev *hdev) { return hdev->aosp_quality_report; } int aosp_set_quality_report(struct hci_dev *hdev, bool enable) { if (!aosp_has_quality_report(hdev)) return -EOPNOTSUPP; bt_dev_dbg(hdev, "quality report enable %d", enable); /* Enable or disable the quality report feature. */ if (enable) return enable_quality_report(hdev); else return disable_quality_report(hdev); }
linux-master
net/bluetooth/aosp.c
/* BlueZ - Bluetooth protocol stack for Linux Copyright (C) 2010 Nokia Corporation Copyright (C) 2011-2012 Intel Corporation This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License version 2 as published by the Free Software Foundation; THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS SOFTWARE IS DISCLAIMED. */ /* Bluetooth HCI Management interface */ #include <linux/module.h> #include <asm/unaligned.h> #include <net/bluetooth/bluetooth.h> #include <net/bluetooth/hci_core.h> #include <net/bluetooth/hci_sock.h> #include <net/bluetooth/l2cap.h> #include <net/bluetooth/mgmt.h> #include "hci_request.h" #include "smp.h" #include "mgmt_util.h" #include "mgmt_config.h" #include "msft.h" #include "eir.h" #include "aosp.h" #define MGMT_VERSION 1 #define MGMT_REVISION 22 static const u16 mgmt_commands[] = { MGMT_OP_READ_INDEX_LIST, MGMT_OP_READ_INFO, MGMT_OP_SET_POWERED, MGMT_OP_SET_DISCOVERABLE, MGMT_OP_SET_CONNECTABLE, MGMT_OP_SET_FAST_CONNECTABLE, MGMT_OP_SET_BONDABLE, MGMT_OP_SET_LINK_SECURITY, MGMT_OP_SET_SSP, MGMT_OP_SET_HS, MGMT_OP_SET_LE, MGMT_OP_SET_DEV_CLASS, MGMT_OP_SET_LOCAL_NAME, MGMT_OP_ADD_UUID, MGMT_OP_REMOVE_UUID, MGMT_OP_LOAD_LINK_KEYS, MGMT_OP_LOAD_LONG_TERM_KEYS, MGMT_OP_DISCONNECT, MGMT_OP_GET_CONNECTIONS, MGMT_OP_PIN_CODE_REPLY, MGMT_OP_PIN_CODE_NEG_REPLY, MGMT_OP_SET_IO_CAPABILITY, MGMT_OP_PAIR_DEVICE, MGMT_OP_CANCEL_PAIR_DEVICE, MGMT_OP_UNPAIR_DEVICE, MGMT_OP_USER_CONFIRM_REPLY, MGMT_OP_USER_CONFIRM_NEG_REPLY, MGMT_OP_USER_PASSKEY_REPLY, MGMT_OP_USER_PASSKEY_NEG_REPLY, MGMT_OP_READ_LOCAL_OOB_DATA, MGMT_OP_ADD_REMOTE_OOB_DATA, MGMT_OP_REMOVE_REMOTE_OOB_DATA, MGMT_OP_START_DISCOVERY, MGMT_OP_STOP_DISCOVERY, MGMT_OP_CONFIRM_NAME, MGMT_OP_BLOCK_DEVICE, MGMT_OP_UNBLOCK_DEVICE, MGMT_OP_SET_DEVICE_ID, MGMT_OP_SET_ADVERTISING, MGMT_OP_SET_BREDR, MGMT_OP_SET_STATIC_ADDRESS, MGMT_OP_SET_SCAN_PARAMS, MGMT_OP_SET_SECURE_CONN, MGMT_OP_SET_DEBUG_KEYS, MGMT_OP_SET_PRIVACY, MGMT_OP_LOAD_IRKS, MGMT_OP_GET_CONN_INFO, MGMT_OP_GET_CLOCK_INFO, MGMT_OP_ADD_DEVICE, MGMT_OP_REMOVE_DEVICE, MGMT_OP_LOAD_CONN_PARAM, MGMT_OP_READ_UNCONF_INDEX_LIST, MGMT_OP_READ_CONFIG_INFO, MGMT_OP_SET_EXTERNAL_CONFIG, MGMT_OP_SET_PUBLIC_ADDRESS, MGMT_OP_START_SERVICE_DISCOVERY, MGMT_OP_READ_LOCAL_OOB_EXT_DATA, MGMT_OP_READ_EXT_INDEX_LIST, MGMT_OP_READ_ADV_FEATURES, MGMT_OP_ADD_ADVERTISING, MGMT_OP_REMOVE_ADVERTISING, MGMT_OP_GET_ADV_SIZE_INFO, MGMT_OP_START_LIMITED_DISCOVERY, MGMT_OP_READ_EXT_INFO, MGMT_OP_SET_APPEARANCE, MGMT_OP_GET_PHY_CONFIGURATION, MGMT_OP_SET_PHY_CONFIGURATION, MGMT_OP_SET_BLOCKED_KEYS, MGMT_OP_SET_WIDEBAND_SPEECH, MGMT_OP_READ_CONTROLLER_CAP, MGMT_OP_READ_EXP_FEATURES_INFO, MGMT_OP_SET_EXP_FEATURE, MGMT_OP_READ_DEF_SYSTEM_CONFIG, MGMT_OP_SET_DEF_SYSTEM_CONFIG, MGMT_OP_READ_DEF_RUNTIME_CONFIG, MGMT_OP_SET_DEF_RUNTIME_CONFIG, MGMT_OP_GET_DEVICE_FLAGS, MGMT_OP_SET_DEVICE_FLAGS, MGMT_OP_READ_ADV_MONITOR_FEATURES, MGMT_OP_ADD_ADV_PATTERNS_MONITOR, MGMT_OP_REMOVE_ADV_MONITOR, MGMT_OP_ADD_EXT_ADV_PARAMS, MGMT_OP_ADD_EXT_ADV_DATA, MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, MGMT_OP_SET_MESH_RECEIVER, MGMT_OP_MESH_READ_FEATURES, MGMT_OP_MESH_SEND, MGMT_OP_MESH_SEND_CANCEL, }; static const u16 mgmt_events[] = { MGMT_EV_CONTROLLER_ERROR, MGMT_EV_INDEX_ADDED, MGMT_EV_INDEX_REMOVED, MGMT_EV_NEW_SETTINGS, MGMT_EV_CLASS_OF_DEV_CHANGED, MGMT_EV_LOCAL_NAME_CHANGED, MGMT_EV_NEW_LINK_KEY, MGMT_EV_NEW_LONG_TERM_KEY, MGMT_EV_DEVICE_CONNECTED, MGMT_EV_DEVICE_DISCONNECTED, MGMT_EV_CONNECT_FAILED, MGMT_EV_PIN_CODE_REQUEST, MGMT_EV_USER_CONFIRM_REQUEST, MGMT_EV_USER_PASSKEY_REQUEST, MGMT_EV_AUTH_FAILED, MGMT_EV_DEVICE_FOUND, MGMT_EV_DISCOVERING, MGMT_EV_DEVICE_BLOCKED, MGMT_EV_DEVICE_UNBLOCKED, MGMT_EV_DEVICE_UNPAIRED, MGMT_EV_PASSKEY_NOTIFY, MGMT_EV_NEW_IRK, MGMT_EV_NEW_CSRK, MGMT_EV_DEVICE_ADDED, MGMT_EV_DEVICE_REMOVED, MGMT_EV_NEW_CONN_PARAM, MGMT_EV_UNCONF_INDEX_ADDED, MGMT_EV_UNCONF_INDEX_REMOVED, MGMT_EV_NEW_CONFIG_OPTIONS, MGMT_EV_EXT_INDEX_ADDED, MGMT_EV_EXT_INDEX_REMOVED, MGMT_EV_LOCAL_OOB_DATA_UPDATED, MGMT_EV_ADVERTISING_ADDED, MGMT_EV_ADVERTISING_REMOVED, MGMT_EV_EXT_INFO_CHANGED, MGMT_EV_PHY_CONFIGURATION_CHANGED, MGMT_EV_EXP_FEATURE_CHANGED, MGMT_EV_DEVICE_FLAGS_CHANGED, MGMT_EV_ADV_MONITOR_ADDED, MGMT_EV_ADV_MONITOR_REMOVED, MGMT_EV_CONTROLLER_SUSPEND, MGMT_EV_CONTROLLER_RESUME, MGMT_EV_ADV_MONITOR_DEVICE_FOUND, MGMT_EV_ADV_MONITOR_DEVICE_LOST, }; static const u16 mgmt_untrusted_commands[] = { MGMT_OP_READ_INDEX_LIST, MGMT_OP_READ_INFO, MGMT_OP_READ_UNCONF_INDEX_LIST, MGMT_OP_READ_CONFIG_INFO, MGMT_OP_READ_EXT_INDEX_LIST, MGMT_OP_READ_EXT_INFO, MGMT_OP_READ_CONTROLLER_CAP, MGMT_OP_READ_EXP_FEATURES_INFO, MGMT_OP_READ_DEF_SYSTEM_CONFIG, MGMT_OP_READ_DEF_RUNTIME_CONFIG, }; static const u16 mgmt_untrusted_events[] = { MGMT_EV_INDEX_ADDED, MGMT_EV_INDEX_REMOVED, MGMT_EV_NEW_SETTINGS, MGMT_EV_CLASS_OF_DEV_CHANGED, MGMT_EV_LOCAL_NAME_CHANGED, MGMT_EV_UNCONF_INDEX_ADDED, MGMT_EV_UNCONF_INDEX_REMOVED, MGMT_EV_NEW_CONFIG_OPTIONS, MGMT_EV_EXT_INDEX_ADDED, MGMT_EV_EXT_INDEX_REMOVED, MGMT_EV_EXT_INFO_CHANGED, MGMT_EV_EXP_FEATURE_CHANGED, }; #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000) #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \ "\x00\x00\x00\x00\x00\x00\x00\x00" /* HCI to MGMT error code conversion table */ static const u8 mgmt_status_table[] = { MGMT_STATUS_SUCCESS, MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */ MGMT_STATUS_NOT_CONNECTED, /* No Connection */ MGMT_STATUS_FAILED, /* Hardware Failure */ MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */ MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */ MGMT_STATUS_AUTH_FAILED, /* PIN or Key Missing */ MGMT_STATUS_NO_RESOURCES, /* Memory Full */ MGMT_STATUS_TIMEOUT, /* Connection Timeout */ MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */ MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */ MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */ MGMT_STATUS_BUSY, /* Command Disallowed */ MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */ MGMT_STATUS_REJECTED, /* Rejected Security */ MGMT_STATUS_REJECTED, /* Rejected Personal */ MGMT_STATUS_TIMEOUT, /* Host Timeout */ MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */ MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */ MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */ MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */ MGMT_STATUS_DISCONNECTED, /* OE Power Off */ MGMT_STATUS_DISCONNECTED, /* Connection Terminated */ MGMT_STATUS_BUSY, /* Repeated Attempts */ MGMT_STATUS_REJECTED, /* Pairing Not Allowed */ MGMT_STATUS_FAILED, /* Unknown LMP PDU */ MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */ MGMT_STATUS_REJECTED, /* SCO Offset Rejected */ MGMT_STATUS_REJECTED, /* SCO Interval Rejected */ MGMT_STATUS_REJECTED, /* Air Mode Rejected */ MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */ MGMT_STATUS_FAILED, /* Unspecified Error */ MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */ MGMT_STATUS_FAILED, /* Role Change Not Allowed */ MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */ MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */ MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */ MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */ MGMT_STATUS_FAILED, /* Unit Link Key Used */ MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */ MGMT_STATUS_TIMEOUT, /* Instant Passed */ MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */ MGMT_STATUS_FAILED, /* Transaction Collision */ MGMT_STATUS_FAILED, /* Reserved for future use */ MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */ MGMT_STATUS_REJECTED, /* QoS Rejected */ MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */ MGMT_STATUS_REJECTED, /* Insufficient Security */ MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */ MGMT_STATUS_FAILED, /* Reserved for future use */ MGMT_STATUS_BUSY, /* Role Switch Pending */ MGMT_STATUS_FAILED, /* Reserved for future use */ MGMT_STATUS_FAILED, /* Slot Violation */ MGMT_STATUS_FAILED, /* Role Switch Failed */ MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */ MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */ MGMT_STATUS_BUSY, /* Host Busy Pairing */ MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */ MGMT_STATUS_BUSY, /* Controller Busy */ MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */ MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */ MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */ MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */ MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */ }; static u8 mgmt_errno_status(int err) { switch (err) { case 0: return MGMT_STATUS_SUCCESS; case -EPERM: return MGMT_STATUS_REJECTED; case -EINVAL: return MGMT_STATUS_INVALID_PARAMS; case -EOPNOTSUPP: return MGMT_STATUS_NOT_SUPPORTED; case -EBUSY: return MGMT_STATUS_BUSY; case -ETIMEDOUT: return MGMT_STATUS_AUTH_FAILED; case -ENOMEM: return MGMT_STATUS_NO_RESOURCES; case -EISCONN: return MGMT_STATUS_ALREADY_CONNECTED; case -ENOTCONN: return MGMT_STATUS_DISCONNECTED; } return MGMT_STATUS_FAILED; } static u8 mgmt_status(int err) { if (err < 0) return mgmt_errno_status(err); if (err < ARRAY_SIZE(mgmt_status_table)) return mgmt_status_table[err]; return MGMT_STATUS_FAILED; } static int mgmt_index_event(u16 event, struct hci_dev *hdev, void *data, u16 len, int flag) { return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len, flag, NULL); } static int mgmt_limited_event(u16 event, struct hci_dev *hdev, void *data, u16 len, int flag, struct sock *skip_sk) { return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len, flag, skip_sk); } static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len, struct sock *skip_sk) { return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len, HCI_SOCK_TRUSTED, skip_sk); } static int mgmt_event_skb(struct sk_buff *skb, struct sock *skip_sk) { return mgmt_send_event_skb(HCI_CHANNEL_CONTROL, skb, HCI_SOCK_TRUSTED, skip_sk); } static u8 le_addr_type(u8 mgmt_addr_type) { if (mgmt_addr_type == BDADDR_LE_PUBLIC) return ADDR_LE_DEV_PUBLIC; else return ADDR_LE_DEV_RANDOM; } void mgmt_fill_version_info(void *ver) { struct mgmt_rp_read_version *rp = ver; rp->version = MGMT_VERSION; rp->revision = cpu_to_le16(MGMT_REVISION); } static int read_version(struct sock *sk, struct hci_dev *hdev, void *data, u16 data_len) { struct mgmt_rp_read_version rp; bt_dev_dbg(hdev, "sock %p", sk); mgmt_fill_version_info(&rp); return mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0, &rp, sizeof(rp)); } static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data, u16 data_len) { struct mgmt_rp_read_commands *rp; u16 num_commands, num_events; size_t rp_size; int i, err; bt_dev_dbg(hdev, "sock %p", sk); if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) { num_commands = ARRAY_SIZE(mgmt_commands); num_events = ARRAY_SIZE(mgmt_events); } else { num_commands = ARRAY_SIZE(mgmt_untrusted_commands); num_events = ARRAY_SIZE(mgmt_untrusted_events); } rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16)); rp = kmalloc(rp_size, GFP_KERNEL); if (!rp) return -ENOMEM; rp->num_commands = cpu_to_le16(num_commands); rp->num_events = cpu_to_le16(num_events); if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) { __le16 *opcode = rp->opcodes; for (i = 0; i < num_commands; i++, opcode++) put_unaligned_le16(mgmt_commands[i], opcode); for (i = 0; i < num_events; i++, opcode++) put_unaligned_le16(mgmt_events[i], opcode); } else { __le16 *opcode = rp->opcodes; for (i = 0; i < num_commands; i++, opcode++) put_unaligned_le16(mgmt_untrusted_commands[i], opcode); for (i = 0; i < num_events; i++, opcode++) put_unaligned_le16(mgmt_untrusted_events[i], opcode); } err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0, rp, rp_size); kfree(rp); return err; } static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data, u16 data_len) { struct mgmt_rp_read_index_list *rp; struct hci_dev *d; size_t rp_len; u16 count; int err; bt_dev_dbg(hdev, "sock %p", sk); read_lock(&hci_dev_list_lock); count = 0; list_for_each_entry(d, &hci_dev_list, list) { if (d->dev_type == HCI_PRIMARY && !hci_dev_test_flag(d, HCI_UNCONFIGURED)) count++; } rp_len = sizeof(*rp) + (2 * count); rp = kmalloc(rp_len, GFP_ATOMIC); if (!rp) { read_unlock(&hci_dev_list_lock); return -ENOMEM; } count = 0; list_for_each_entry(d, &hci_dev_list, list) { if (hci_dev_test_flag(d, HCI_SETUP) || hci_dev_test_flag(d, HCI_CONFIG) || hci_dev_test_flag(d, HCI_USER_CHANNEL)) continue; /* Devices marked as raw-only are neither configured * nor unconfigured controllers. */ if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks)) continue; if (d->dev_type == HCI_PRIMARY && !hci_dev_test_flag(d, HCI_UNCONFIGURED)) { rp->index[count++] = cpu_to_le16(d->id); bt_dev_dbg(hdev, "Added hci%u", d->id); } } rp->num_controllers = cpu_to_le16(count); rp_len = sizeof(*rp) + (2 * count); read_unlock(&hci_dev_list_lock); err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST, 0, rp, rp_len); kfree(rp); return err; } static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev, void *data, u16 data_len) { struct mgmt_rp_read_unconf_index_list *rp; struct hci_dev *d; size_t rp_len; u16 count; int err; bt_dev_dbg(hdev, "sock %p", sk); read_lock(&hci_dev_list_lock); count = 0; list_for_each_entry(d, &hci_dev_list, list) { if (d->dev_type == HCI_PRIMARY && hci_dev_test_flag(d, HCI_UNCONFIGURED)) count++; } rp_len = sizeof(*rp) + (2 * count); rp = kmalloc(rp_len, GFP_ATOMIC); if (!rp) { read_unlock(&hci_dev_list_lock); return -ENOMEM; } count = 0; list_for_each_entry(d, &hci_dev_list, list) { if (hci_dev_test_flag(d, HCI_SETUP) || hci_dev_test_flag(d, HCI_CONFIG) || hci_dev_test_flag(d, HCI_USER_CHANNEL)) continue; /* Devices marked as raw-only are neither configured * nor unconfigured controllers. */ if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks)) continue; if (d->dev_type == HCI_PRIMARY && hci_dev_test_flag(d, HCI_UNCONFIGURED)) { rp->index[count++] = cpu_to_le16(d->id); bt_dev_dbg(hdev, "Added hci%u", d->id); } } rp->num_controllers = cpu_to_le16(count); rp_len = sizeof(*rp) + (2 * count); read_unlock(&hci_dev_list_lock); err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_UNCONF_INDEX_LIST, 0, rp, rp_len); kfree(rp); return err; } static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev, void *data, u16 data_len) { struct mgmt_rp_read_ext_index_list *rp; struct hci_dev *d; u16 count; int err; bt_dev_dbg(hdev, "sock %p", sk); read_lock(&hci_dev_list_lock); count = 0; list_for_each_entry(d, &hci_dev_list, list) { if (d->dev_type == HCI_PRIMARY || d->dev_type == HCI_AMP) count++; } rp = kmalloc(struct_size(rp, entry, count), GFP_ATOMIC); if (!rp) { read_unlock(&hci_dev_list_lock); return -ENOMEM; } count = 0; list_for_each_entry(d, &hci_dev_list, list) { if (hci_dev_test_flag(d, HCI_SETUP) || hci_dev_test_flag(d, HCI_CONFIG) || hci_dev_test_flag(d, HCI_USER_CHANNEL)) continue; /* Devices marked as raw-only are neither configured * nor unconfigured controllers. */ if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks)) continue; if (d->dev_type == HCI_PRIMARY) { if (hci_dev_test_flag(d, HCI_UNCONFIGURED)) rp->entry[count].type = 0x01; else rp->entry[count].type = 0x00; } else if (d->dev_type == HCI_AMP) { rp->entry[count].type = 0x02; } else { continue; } rp->entry[count].bus = d->bus; rp->entry[count++].index = cpu_to_le16(d->id); bt_dev_dbg(hdev, "Added hci%u", d->id); } rp->num_controllers = cpu_to_le16(count); read_unlock(&hci_dev_list_lock); /* If this command is called at least once, then all the * default index and unconfigured index events are disabled * and from now on only extended index events are used. */ hci_sock_set_flag(sk, HCI_MGMT_EXT_INDEX_EVENTS); hci_sock_clear_flag(sk, HCI_MGMT_INDEX_EVENTS); hci_sock_clear_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS); err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_EXT_INDEX_LIST, 0, rp, struct_size(rp, entry, count)); kfree(rp); return err; } static bool is_configured(struct hci_dev *hdev) { if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) && !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED)) return false; if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) || test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) && !bacmp(&hdev->public_addr, BDADDR_ANY)) return false; return true; } static __le32 get_missing_options(struct hci_dev *hdev) { u32 options = 0; if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) && !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED)) options |= MGMT_OPTION_EXTERNAL_CONFIG; if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) || test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) && !bacmp(&hdev->public_addr, BDADDR_ANY)) options |= MGMT_OPTION_PUBLIC_ADDRESS; return cpu_to_le32(options); } static int new_options(struct hci_dev *hdev, struct sock *skip) { __le32 options = get_missing_options(hdev); return mgmt_limited_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options, sizeof(options), HCI_MGMT_OPTION_EVENTS, skip); } static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev) { __le32 options = get_missing_options(hdev); return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &options, sizeof(options)); } static int read_config_info(struct sock *sk, struct hci_dev *hdev, void *data, u16 data_len) { struct mgmt_rp_read_config_info rp; u32 options = 0; bt_dev_dbg(hdev, "sock %p", sk); hci_dev_lock(hdev); memset(&rp, 0, sizeof(rp)); rp.manufacturer = cpu_to_le16(hdev->manufacturer); if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks)) options |= MGMT_OPTION_EXTERNAL_CONFIG; if (hdev->set_bdaddr) options |= MGMT_OPTION_PUBLIC_ADDRESS; rp.supported_options = cpu_to_le32(options); rp.missing_options = get_missing_options(hdev); hci_dev_unlock(hdev); return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0, &rp, sizeof(rp)); } static u32 get_supported_phys(struct hci_dev *hdev) { u32 supported_phys = 0; if (lmp_bredr_capable(hdev)) { supported_phys |= MGMT_PHY_BR_1M_1SLOT; if (hdev->features[0][0] & LMP_3SLOT) supported_phys |= MGMT_PHY_BR_1M_3SLOT; if (hdev->features[0][0] & LMP_5SLOT) supported_phys |= MGMT_PHY_BR_1M_5SLOT; if (lmp_edr_2m_capable(hdev)) { supported_phys |= MGMT_PHY_EDR_2M_1SLOT; if (lmp_edr_3slot_capable(hdev)) supported_phys |= MGMT_PHY_EDR_2M_3SLOT; if (lmp_edr_5slot_capable(hdev)) supported_phys |= MGMT_PHY_EDR_2M_5SLOT; if (lmp_edr_3m_capable(hdev)) { supported_phys |= MGMT_PHY_EDR_3M_1SLOT; if (lmp_edr_3slot_capable(hdev)) supported_phys |= MGMT_PHY_EDR_3M_3SLOT; if (lmp_edr_5slot_capable(hdev)) supported_phys |= MGMT_PHY_EDR_3M_5SLOT; } } } if (lmp_le_capable(hdev)) { supported_phys |= MGMT_PHY_LE_1M_TX; supported_phys |= MGMT_PHY_LE_1M_RX; if (hdev->le_features[1] & HCI_LE_PHY_2M) { supported_phys |= MGMT_PHY_LE_2M_TX; supported_phys |= MGMT_PHY_LE_2M_RX; } if (hdev->le_features[1] & HCI_LE_PHY_CODED) { supported_phys |= MGMT_PHY_LE_CODED_TX; supported_phys |= MGMT_PHY_LE_CODED_RX; } } return supported_phys; } static u32 get_selected_phys(struct hci_dev *hdev) { u32 selected_phys = 0; if (lmp_bredr_capable(hdev)) { selected_phys |= MGMT_PHY_BR_1M_1SLOT; if (hdev->pkt_type & (HCI_DM3 | HCI_DH3)) selected_phys |= MGMT_PHY_BR_1M_3SLOT; if (hdev->pkt_type & (HCI_DM5 | HCI_DH5)) selected_phys |= MGMT_PHY_BR_1M_5SLOT; if (lmp_edr_2m_capable(hdev)) { if (!(hdev->pkt_type & HCI_2DH1)) selected_phys |= MGMT_PHY_EDR_2M_1SLOT; if (lmp_edr_3slot_capable(hdev) && !(hdev->pkt_type & HCI_2DH3)) selected_phys |= MGMT_PHY_EDR_2M_3SLOT; if (lmp_edr_5slot_capable(hdev) && !(hdev->pkt_type & HCI_2DH5)) selected_phys |= MGMT_PHY_EDR_2M_5SLOT; if (lmp_edr_3m_capable(hdev)) { if (!(hdev->pkt_type & HCI_3DH1)) selected_phys |= MGMT_PHY_EDR_3M_1SLOT; if (lmp_edr_3slot_capable(hdev) && !(hdev->pkt_type & HCI_3DH3)) selected_phys |= MGMT_PHY_EDR_3M_3SLOT; if (lmp_edr_5slot_capable(hdev) && !(hdev->pkt_type & HCI_3DH5)) selected_phys |= MGMT_PHY_EDR_3M_5SLOT; } } } if (lmp_le_capable(hdev)) { if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_1M) selected_phys |= MGMT_PHY_LE_1M_TX; if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_1M) selected_phys |= MGMT_PHY_LE_1M_RX; if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_2M) selected_phys |= MGMT_PHY_LE_2M_TX; if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_2M) selected_phys |= MGMT_PHY_LE_2M_RX; if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_CODED) selected_phys |= MGMT_PHY_LE_CODED_TX; if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_CODED) selected_phys |= MGMT_PHY_LE_CODED_RX; } return selected_phys; } static u32 get_configurable_phys(struct hci_dev *hdev) { return (get_supported_phys(hdev) & ~MGMT_PHY_BR_1M_1SLOT & ~MGMT_PHY_LE_1M_TX & ~MGMT_PHY_LE_1M_RX); } static u32 get_supported_settings(struct hci_dev *hdev) { u32 settings = 0; settings |= MGMT_SETTING_POWERED; settings |= MGMT_SETTING_BONDABLE; settings |= MGMT_SETTING_DEBUG_KEYS; settings |= MGMT_SETTING_CONNECTABLE; settings |= MGMT_SETTING_DISCOVERABLE; if (lmp_bredr_capable(hdev)) { if (hdev->hci_ver >= BLUETOOTH_VER_1_2) settings |= MGMT_SETTING_FAST_CONNECTABLE; settings |= MGMT_SETTING_BREDR; settings |= MGMT_SETTING_LINK_SECURITY; if (lmp_ssp_capable(hdev)) { settings |= MGMT_SETTING_SSP; if (IS_ENABLED(CONFIG_BT_HS)) settings |= MGMT_SETTING_HS; } if (lmp_sc_capable(hdev)) settings |= MGMT_SETTING_SECURE_CONN; if (test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks)) settings |= MGMT_SETTING_WIDEBAND_SPEECH; } if (lmp_le_capable(hdev)) { settings |= MGMT_SETTING_LE; settings |= MGMT_SETTING_SECURE_CONN; settings |= MGMT_SETTING_PRIVACY; settings |= MGMT_SETTING_STATIC_ADDRESS; settings |= MGMT_SETTING_ADVERTISING; } if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) || hdev->set_bdaddr) settings |= MGMT_SETTING_CONFIGURATION; if (cis_central_capable(hdev)) settings |= MGMT_SETTING_CIS_CENTRAL; if (cis_peripheral_capable(hdev)) settings |= MGMT_SETTING_CIS_PERIPHERAL; settings |= MGMT_SETTING_PHY_CONFIGURATION; return settings; } static u32 get_current_settings(struct hci_dev *hdev) { u32 settings = 0; if (hdev_is_powered(hdev)) settings |= MGMT_SETTING_POWERED; if (hci_dev_test_flag(hdev, HCI_CONNECTABLE)) settings |= MGMT_SETTING_CONNECTABLE; if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) settings |= MGMT_SETTING_FAST_CONNECTABLE; if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) settings |= MGMT_SETTING_DISCOVERABLE; if (hci_dev_test_flag(hdev, HCI_BONDABLE)) settings |= MGMT_SETTING_BONDABLE; if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) settings |= MGMT_SETTING_BREDR; if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) settings |= MGMT_SETTING_LE; if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) settings |= MGMT_SETTING_LINK_SECURITY; if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) settings |= MGMT_SETTING_SSP; if (hci_dev_test_flag(hdev, HCI_HS_ENABLED)) settings |= MGMT_SETTING_HS; if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) settings |= MGMT_SETTING_ADVERTISING; if (hci_dev_test_flag(hdev, HCI_SC_ENABLED)) settings |= MGMT_SETTING_SECURE_CONN; if (hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS)) settings |= MGMT_SETTING_DEBUG_KEYS; if (hci_dev_test_flag(hdev, HCI_PRIVACY)) settings |= MGMT_SETTING_PRIVACY; /* The current setting for static address has two purposes. The * first is to indicate if the static address will be used and * the second is to indicate if it is actually set. * * This means if the static address is not configured, this flag * will never be set. If the address is configured, then if the * address is actually used decides if the flag is set or not. * * For single mode LE only controllers and dual-mode controllers * with BR/EDR disabled, the existence of the static address will * be evaluated. */ if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) || !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) || !bacmp(&hdev->bdaddr, BDADDR_ANY)) { if (bacmp(&hdev->static_addr, BDADDR_ANY)) settings |= MGMT_SETTING_STATIC_ADDRESS; } if (hci_dev_test_flag(hdev, HCI_WIDEBAND_SPEECH_ENABLED)) settings |= MGMT_SETTING_WIDEBAND_SPEECH; if (cis_central_capable(hdev)) settings |= MGMT_SETTING_CIS_CENTRAL; if (cis_peripheral_capable(hdev)) settings |= MGMT_SETTING_CIS_PERIPHERAL; if (bis_capable(hdev)) settings |= MGMT_SETTING_ISO_BROADCASTER; if (sync_recv_capable(hdev)) settings |= MGMT_SETTING_ISO_SYNC_RECEIVER; return settings; } static struct mgmt_pending_cmd *pending_find(u16 opcode, struct hci_dev *hdev) { return mgmt_pending_find(HCI_CHANNEL_CONTROL, opcode, hdev); } u8 mgmt_get_adv_discov_flags(struct hci_dev *hdev) { struct mgmt_pending_cmd *cmd; /* If there's a pending mgmt command the flags will not yet have * their final values, so check for this first. */ cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev); if (cmd) { struct mgmt_mode *cp = cmd->param; if (cp->val == 0x01) return LE_AD_GENERAL; else if (cp->val == 0x02) return LE_AD_LIMITED; } else { if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) return LE_AD_LIMITED; else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) return LE_AD_GENERAL; } return 0; } bool mgmt_get_connectable(struct hci_dev *hdev) { struct mgmt_pending_cmd *cmd; /* If there's a pending mgmt command the flag will not yet have * it's final value, so check for this first. */ cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev); if (cmd) { struct mgmt_mode *cp = cmd->param; return cp->val; } return hci_dev_test_flag(hdev, HCI_CONNECTABLE); } static int service_cache_sync(struct hci_dev *hdev, void *data) { hci_update_eir_sync(hdev); hci_update_class_sync(hdev); return 0; } static void service_cache_off(struct work_struct *work) { struct hci_dev *hdev = container_of(work, struct hci_dev, service_cache.work); if (!hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) return; hci_cmd_sync_queue(hdev, service_cache_sync, NULL, NULL); } static int rpa_expired_sync(struct hci_dev *hdev, void *data) { /* The generation of a new RPA and programming it into the * controller happens in the hci_req_enable_advertising() * function. */ if (ext_adv_capable(hdev)) return hci_start_ext_adv_sync(hdev, hdev->cur_adv_instance); else return hci_enable_advertising_sync(hdev); } static void rpa_expired(struct work_struct *work) { struct hci_dev *hdev = container_of(work, struct hci_dev, rpa_expired.work); bt_dev_dbg(hdev, ""); hci_dev_set_flag(hdev, HCI_RPA_EXPIRED); if (!hci_dev_test_flag(hdev, HCI_ADVERTISING)) return; hci_cmd_sync_queue(hdev, rpa_expired_sync, NULL, NULL); } static void discov_off(struct work_struct *work) { struct hci_dev *hdev = container_of(work, struct hci_dev, discov_off.work); bt_dev_dbg(hdev, ""); hci_dev_lock(hdev); /* When discoverable timeout triggers, then just make sure * the limited discoverable flag is cleared. Even in the case * of a timeout triggered from general discoverable, it is * safe to unconditionally clear the flag. */ hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE); hci_dev_clear_flag(hdev, HCI_DISCOVERABLE); hdev->discov_timeout = 0; hci_update_discoverable(hdev); mgmt_new_settings(hdev); hci_dev_unlock(hdev); } static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev); static void mesh_send_complete(struct hci_dev *hdev, struct mgmt_mesh_tx *mesh_tx, bool silent) { u8 handle = mesh_tx->handle; if (!silent) mgmt_event(MGMT_EV_MESH_PACKET_CMPLT, hdev, &handle, sizeof(handle), NULL); mgmt_mesh_remove(mesh_tx); } static int mesh_send_done_sync(struct hci_dev *hdev, void *data) { struct mgmt_mesh_tx *mesh_tx; hci_dev_clear_flag(hdev, HCI_MESH_SENDING); hci_disable_advertising_sync(hdev); mesh_tx = mgmt_mesh_next(hdev, NULL); if (mesh_tx) mesh_send_complete(hdev, mesh_tx, false); return 0; } static int mesh_send_sync(struct hci_dev *hdev, void *data); static void mesh_send_start_complete(struct hci_dev *hdev, void *data, int err); static void mesh_next(struct hci_dev *hdev, void *data, int err) { struct mgmt_mesh_tx *mesh_tx = mgmt_mesh_next(hdev, NULL); if (!mesh_tx) return; err = hci_cmd_sync_queue(hdev, mesh_send_sync, mesh_tx, mesh_send_start_complete); if (err < 0) mesh_send_complete(hdev, mesh_tx, false); else hci_dev_set_flag(hdev, HCI_MESH_SENDING); } static void mesh_send_done(struct work_struct *work) { struct hci_dev *hdev = container_of(work, struct hci_dev, mesh_send_done.work); if (!hci_dev_test_flag(hdev, HCI_MESH_SENDING)) return; hci_cmd_sync_queue(hdev, mesh_send_done_sync, NULL, mesh_next); } static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev) { if (hci_dev_test_flag(hdev, HCI_MGMT)) return; BT_INFO("MGMT ver %d.%d", MGMT_VERSION, MGMT_REVISION); INIT_DELAYED_WORK(&hdev->discov_off, discov_off); INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off); INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired); INIT_DELAYED_WORK(&hdev->mesh_send_done, mesh_send_done); /* Non-mgmt controlled devices get this bit set * implicitly so that pairing works for them, however * for mgmt we require user-space to explicitly enable * it */ hci_dev_clear_flag(hdev, HCI_BONDABLE); hci_dev_set_flag(hdev, HCI_MGMT); } static int read_controller_info(struct sock *sk, struct hci_dev *hdev, void *data, u16 data_len) { struct mgmt_rp_read_info rp; bt_dev_dbg(hdev, "sock %p", sk); hci_dev_lock(hdev); memset(&rp, 0, sizeof(rp)); bacpy(&rp.bdaddr, &hdev->bdaddr); rp.version = hdev->hci_ver; rp.manufacturer = cpu_to_le16(hdev->manufacturer); rp.supported_settings = cpu_to_le32(get_supported_settings(hdev)); rp.current_settings = cpu_to_le32(get_current_settings(hdev)); memcpy(rp.dev_class, hdev->dev_class, 3); memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name)); memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name)); hci_dev_unlock(hdev); return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp, sizeof(rp)); } static u16 append_eir_data_to_buf(struct hci_dev *hdev, u8 *eir) { u16 eir_len = 0; size_t name_len; if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) eir_len = eir_append_data(eir, eir_len, EIR_CLASS_OF_DEV, hdev->dev_class, 3); if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) eir_len = eir_append_le16(eir, eir_len, EIR_APPEARANCE, hdev->appearance); name_len = strnlen(hdev->dev_name, sizeof(hdev->dev_name)); eir_len = eir_append_data(eir, eir_len, EIR_NAME_COMPLETE, hdev->dev_name, name_len); name_len = strnlen(hdev->short_name, sizeof(hdev->short_name)); eir_len = eir_append_data(eir, eir_len, EIR_NAME_SHORT, hdev->short_name, name_len); return eir_len; } static int read_ext_controller_info(struct sock *sk, struct hci_dev *hdev, void *data, u16 data_len) { char buf[512]; struct mgmt_rp_read_ext_info *rp = (void *)buf; u16 eir_len; bt_dev_dbg(hdev, "sock %p", sk); memset(&buf, 0, sizeof(buf)); hci_dev_lock(hdev); bacpy(&rp->bdaddr, &hdev->bdaddr); rp->version = hdev->hci_ver; rp->manufacturer = cpu_to_le16(hdev->manufacturer); rp->supported_settings = cpu_to_le32(get_supported_settings(hdev)); rp->current_settings = cpu_to_le32(get_current_settings(hdev)); eir_len = append_eir_data_to_buf(hdev, rp->eir); rp->eir_len = cpu_to_le16(eir_len); hci_dev_unlock(hdev); /* If this command is called at least once, then the events * for class of device and local name changes are disabled * and only the new extended controller information event * is used. */ hci_sock_set_flag(sk, HCI_MGMT_EXT_INFO_EVENTS); hci_sock_clear_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS); hci_sock_clear_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS); return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_EXT_INFO, 0, rp, sizeof(*rp) + eir_len); } static int ext_info_changed(struct hci_dev *hdev, struct sock *skip) { char buf[512]; struct mgmt_ev_ext_info_changed *ev = (void *)buf; u16 eir_len; memset(buf, 0, sizeof(buf)); eir_len = append_eir_data_to_buf(hdev, ev->eir); ev->eir_len = cpu_to_le16(eir_len); return mgmt_limited_event(MGMT_EV_EXT_INFO_CHANGED, hdev, ev, sizeof(*ev) + eir_len, HCI_MGMT_EXT_INFO_EVENTS, skip); } static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev) { __le32 settings = cpu_to_le32(get_current_settings(hdev)); return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &settings, sizeof(settings)); } void mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev, u8 instance) { struct mgmt_ev_advertising_added ev; ev.instance = instance; mgmt_event(MGMT_EV_ADVERTISING_ADDED, hdev, &ev, sizeof(ev), sk); } void mgmt_advertising_removed(struct sock *sk, struct hci_dev *hdev, u8 instance) { struct mgmt_ev_advertising_removed ev; ev.instance = instance; mgmt_event(MGMT_EV_ADVERTISING_REMOVED, hdev, &ev, sizeof(ev), sk); } static void cancel_adv_timeout(struct hci_dev *hdev) { if (hdev->adv_instance_timeout) { hdev->adv_instance_timeout = 0; cancel_delayed_work(&hdev->adv_instance_expire); } } /* This function requires the caller holds hdev->lock */ static void restart_le_actions(struct hci_dev *hdev) { struct hci_conn_params *p; list_for_each_entry(p, &hdev->le_conn_params, list) { /* Needed for AUTO_OFF case where might not "really" * have been powered off. */ hci_pend_le_list_del_init(p); switch (p->auto_connect) { case HCI_AUTO_CONN_DIRECT: case HCI_AUTO_CONN_ALWAYS: hci_pend_le_list_add(p, &hdev->pend_le_conns); break; case HCI_AUTO_CONN_REPORT: hci_pend_le_list_add(p, &hdev->pend_le_reports); break; default: break; } } } static int new_settings(struct hci_dev *hdev, struct sock *skip) { __le32 ev = cpu_to_le32(get_current_settings(hdev)); return mgmt_limited_event(MGMT_EV_NEW_SETTINGS, hdev, &ev, sizeof(ev), HCI_MGMT_SETTING_EVENTS, skip); } static void mgmt_set_powered_complete(struct hci_dev *hdev, void *data, int err) { struct mgmt_pending_cmd *cmd = data; struct mgmt_mode *cp; /* Make sure cmd still outstanding. */ if (cmd != pending_find(MGMT_OP_SET_POWERED, hdev)) return; cp = cmd->param; bt_dev_dbg(hdev, "err %d", err); if (!err) { if (cp->val) { hci_dev_lock(hdev); restart_le_actions(hdev); hci_update_passive_scan(hdev); hci_dev_unlock(hdev); } send_settings_rsp(cmd->sk, cmd->opcode, hdev); /* Only call new_setting for power on as power off is deferred * to hdev->power_off work which does call hci_dev_do_close. */ if (cp->val) new_settings(hdev, cmd->sk); } else { mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, mgmt_status(err)); } mgmt_pending_remove(cmd); } static int set_powered_sync(struct hci_dev *hdev, void *data) { struct mgmt_pending_cmd *cmd = data; struct mgmt_mode *cp = cmd->param; BT_DBG("%s", hdev->name); return hci_set_powered_sync(hdev, cp->val); } static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) { struct mgmt_mode *cp = data; struct mgmt_pending_cmd *cmd; int err; bt_dev_dbg(hdev, "sock %p", sk); if (cp->val != 0x00 && cp->val != 0x01) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED, MGMT_STATUS_INVALID_PARAMS); hci_dev_lock(hdev); if (pending_find(MGMT_OP_SET_POWERED, hdev)) { err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED, MGMT_STATUS_BUSY); goto failed; } if (!!cp->val == hdev_is_powered(hdev)) { err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev); goto failed; } cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len); if (!cmd) { err = -ENOMEM; goto failed; } /* Cancel potentially blocking sync operation before power off */ if (cp->val == 0x00) { __hci_cmd_sync_cancel(hdev, -EHOSTDOWN); err = hci_cmd_sync_queue(hdev, set_powered_sync, cmd, mgmt_set_powered_complete); } else { /* Use hci_cmd_sync_submit since hdev might not be running */ err = hci_cmd_sync_submit(hdev, set_powered_sync, cmd, mgmt_set_powered_complete); } if (err < 0) mgmt_pending_remove(cmd); failed: hci_dev_unlock(hdev); return err; } int mgmt_new_settings(struct hci_dev *hdev) { return new_settings(hdev, NULL); } struct cmd_lookup { struct sock *sk; struct hci_dev *hdev; u8 mgmt_status; }; static void settings_rsp(struct mgmt_pending_cmd *cmd, void *data) { struct cmd_lookup *match = data; send_settings_rsp(cmd->sk, cmd->opcode, match->hdev); list_del(&cmd->list); if (match->sk == NULL) { match->sk = cmd->sk; sock_hold(match->sk); } mgmt_pending_free(cmd); } static void cmd_status_rsp(struct mgmt_pending_cmd *cmd, void *data) { u8 *status = data; mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, *status); mgmt_pending_remove(cmd); } static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data) { if (cmd->cmd_complete) { u8 *status = data; cmd->cmd_complete(cmd, *status); mgmt_pending_remove(cmd); return; } cmd_status_rsp(cmd, data); } static int generic_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status) { return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, cmd->param, cmd->param_len); } static int addr_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status) { return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, cmd->param, sizeof(struct mgmt_addr_info)); } static u8 mgmt_bredr_support(struct hci_dev *hdev) { if (!lmp_bredr_capable(hdev)) return MGMT_STATUS_NOT_SUPPORTED; else if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) return MGMT_STATUS_REJECTED; else return MGMT_STATUS_SUCCESS; } static u8 mgmt_le_support(struct hci_dev *hdev) { if (!lmp_le_capable(hdev)) return MGMT_STATUS_NOT_SUPPORTED; else if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) return MGMT_STATUS_REJECTED; else return MGMT_STATUS_SUCCESS; } static void mgmt_set_discoverable_complete(struct hci_dev *hdev, void *data, int err) { struct mgmt_pending_cmd *cmd = data; bt_dev_dbg(hdev, "err %d", err); /* Make sure cmd still outstanding. */ if (cmd != pending_find(MGMT_OP_SET_DISCOVERABLE, hdev)) return; hci_dev_lock(hdev); if (err) { u8 mgmt_err = mgmt_status(err); mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err); hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE); goto done; } if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE) && hdev->discov_timeout > 0) { int to = msecs_to_jiffies(hdev->discov_timeout * 1000); queue_delayed_work(hdev->req_workqueue, &hdev->discov_off, to); } send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev); new_settings(hdev, cmd->sk); done: mgmt_pending_remove(cmd); hci_dev_unlock(hdev); } static int set_discoverable_sync(struct hci_dev *hdev, void *data) { BT_DBG("%s", hdev->name); return hci_update_discoverable_sync(hdev); } static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) { struct mgmt_cp_set_discoverable *cp = data; struct mgmt_pending_cmd *cmd; u16 timeout; int err; bt_dev_dbg(hdev, "sock %p", sk); if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) && !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE, MGMT_STATUS_REJECTED); if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE, MGMT_STATUS_INVALID_PARAMS); timeout = __le16_to_cpu(cp->timeout); /* Disabling discoverable requires that no timeout is set, * and enabling limited discoverable requires a timeout. */ if ((cp->val == 0x00 && timeout > 0) || (cp->val == 0x02 && timeout == 0)) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE, MGMT_STATUS_INVALID_PARAMS); hci_dev_lock(hdev); if (!hdev_is_powered(hdev) && timeout > 0) { err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE, MGMT_STATUS_NOT_POWERED); goto failed; } if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) || pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) { err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE, MGMT_STATUS_BUSY); goto failed; } if (!hci_dev_test_flag(hdev, HCI_CONNECTABLE)) { err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE, MGMT_STATUS_REJECTED); goto failed; } if (hdev->advertising_paused) { err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE, MGMT_STATUS_BUSY); goto failed; } if (!hdev_is_powered(hdev)) { bool changed = false; /* Setting limited discoverable when powered off is * not a valid operation since it requires a timeout * and so no need to check HCI_LIMITED_DISCOVERABLE. */ if (!!cp->val != hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) { hci_dev_change_flag(hdev, HCI_DISCOVERABLE); changed = true; } err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev); if (err < 0) goto failed; if (changed) err = new_settings(hdev, sk); goto failed; } /* If the current mode is the same, then just update the timeout * value with the new value. And if only the timeout gets updated, * then no need for any HCI transactions. */ if (!!cp->val == hci_dev_test_flag(hdev, HCI_DISCOVERABLE) && (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) { cancel_delayed_work(&hdev->discov_off); hdev->discov_timeout = timeout; if (cp->val && hdev->discov_timeout > 0) { int to = msecs_to_jiffies(hdev->discov_timeout * 1000); queue_delayed_work(hdev->req_workqueue, &hdev->discov_off, to); } err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev); goto failed; } cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len); if (!cmd) { err = -ENOMEM; goto failed; } /* Cancel any potential discoverable timeout that might be * still active and store new timeout value. The arming of * the timeout happens in the complete handler. */ cancel_delayed_work(&hdev->discov_off); hdev->discov_timeout = timeout; if (cp->val) hci_dev_set_flag(hdev, HCI_DISCOVERABLE); else hci_dev_clear_flag(hdev, HCI_DISCOVERABLE); /* Limited discoverable mode */ if (cp->val == 0x02) hci_dev_set_flag(hdev, HCI_LIMITED_DISCOVERABLE); else hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE); err = hci_cmd_sync_queue(hdev, set_discoverable_sync, cmd, mgmt_set_discoverable_complete); if (err < 0) mgmt_pending_remove(cmd); failed: hci_dev_unlock(hdev); return err; } static void mgmt_set_connectable_complete(struct hci_dev *hdev, void *data, int err) { struct mgmt_pending_cmd *cmd = data; bt_dev_dbg(hdev, "err %d", err); /* Make sure cmd still outstanding. */ if (cmd != pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) return; hci_dev_lock(hdev); if (err) { u8 mgmt_err = mgmt_status(err); mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err); goto done; } send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev); new_settings(hdev, cmd->sk); done: if (cmd) mgmt_pending_remove(cmd); hci_dev_unlock(hdev); } static int set_connectable_update_settings(struct hci_dev *hdev, struct sock *sk, u8 val) { bool changed = false; int err; if (!!val != hci_dev_test_flag(hdev, HCI_CONNECTABLE)) changed = true; if (val) { hci_dev_set_flag(hdev, HCI_CONNECTABLE); } else { hci_dev_clear_flag(hdev, HCI_CONNECTABLE); hci_dev_clear_flag(hdev, HCI_DISCOVERABLE); } err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev); if (err < 0) return err; if (changed) { hci_update_scan(hdev); hci_update_passive_scan(hdev); return new_settings(hdev, sk); } return 0; } static int set_connectable_sync(struct hci_dev *hdev, void *data) { BT_DBG("%s", hdev->name); return hci_update_connectable_sync(hdev); } static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) { struct mgmt_mode *cp = data; struct mgmt_pending_cmd *cmd; int err; bt_dev_dbg(hdev, "sock %p", sk); if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) && !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE, MGMT_STATUS_REJECTED); if (cp->val != 0x00 && cp->val != 0x01) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE, MGMT_STATUS_INVALID_PARAMS); hci_dev_lock(hdev); if (!hdev_is_powered(hdev)) { err = set_connectable_update_settings(hdev, sk, cp->val); goto failed; } if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) || pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) { err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE, MGMT_STATUS_BUSY); goto failed; } cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len); if (!cmd) { err = -ENOMEM; goto failed; } if (cp->val) { hci_dev_set_flag(hdev, HCI_CONNECTABLE); } else { if (hdev->discov_timeout > 0) cancel_delayed_work(&hdev->discov_off); hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE); hci_dev_clear_flag(hdev, HCI_DISCOVERABLE); hci_dev_clear_flag(hdev, HCI_CONNECTABLE); } err = hci_cmd_sync_queue(hdev, set_connectable_sync, cmd, mgmt_set_connectable_complete); if (err < 0) mgmt_pending_remove(cmd); failed: hci_dev_unlock(hdev); return err; } static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) { struct mgmt_mode *cp = data; bool changed; int err; bt_dev_dbg(hdev, "sock %p", sk); if (cp->val != 0x00 && cp->val != 0x01) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE, MGMT_STATUS_INVALID_PARAMS); hci_dev_lock(hdev); if (cp->val) changed = !hci_dev_test_and_set_flag(hdev, HCI_BONDABLE); else changed = hci_dev_test_and_clear_flag(hdev, HCI_BONDABLE); err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev); if (err < 0) goto unlock; if (changed) { /* In limited privacy mode the change of bondable mode * may affect the local advertising address. */ hci_update_discoverable(hdev); err = new_settings(hdev, sk); } unlock: hci_dev_unlock(hdev); return err; } static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) { struct mgmt_mode *cp = data; struct mgmt_pending_cmd *cmd; u8 val, status; int err; bt_dev_dbg(hdev, "sock %p", sk); status = mgmt_bredr_support(hdev); if (status) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY, status); if (cp->val != 0x00 && cp->val != 0x01) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY, MGMT_STATUS_INVALID_PARAMS); hci_dev_lock(hdev); if (!hdev_is_powered(hdev)) { bool changed = false; if (!!cp->val != hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) { hci_dev_change_flag(hdev, HCI_LINK_SECURITY); changed = true; } err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev); if (err < 0) goto failed; if (changed) err = new_settings(hdev, sk); goto failed; } if (pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) { err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY, MGMT_STATUS_BUSY); goto failed; } val = !!cp->val; if (test_bit(HCI_AUTH, &hdev->flags) == val) { err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev); goto failed; } cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len); if (!cmd) { err = -ENOMEM; goto failed; } err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val); if (err < 0) { mgmt_pending_remove(cmd); goto failed; } failed: hci_dev_unlock(hdev); return err; } static void set_ssp_complete(struct hci_dev *hdev, void *data, int err) { struct cmd_lookup match = { NULL, hdev }; struct mgmt_pending_cmd *cmd = data; struct mgmt_mode *cp = cmd->param; u8 enable = cp->val; bool changed; /* Make sure cmd still outstanding. */ if (cmd != pending_find(MGMT_OP_SET_SSP, hdev)) return; if (err) { u8 mgmt_err = mgmt_status(err); if (enable && hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED)) { hci_dev_clear_flag(hdev, HCI_HS_ENABLED); new_settings(hdev, NULL); } mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp, &mgmt_err); return; } if (enable) { changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED); } else { changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED); if (!changed) changed = hci_dev_test_and_clear_flag(hdev, HCI_HS_ENABLED); else hci_dev_clear_flag(hdev, HCI_HS_ENABLED); } mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match); if (changed) new_settings(hdev, match.sk); if (match.sk) sock_put(match.sk); hci_update_eir_sync(hdev); } static int set_ssp_sync(struct hci_dev *hdev, void *data) { struct mgmt_pending_cmd *cmd = data; struct mgmt_mode *cp = cmd->param; bool changed = false; int err; if (cp->val) changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED); err = hci_write_ssp_mode_sync(hdev, cp->val); if (!err && changed) hci_dev_clear_flag(hdev, HCI_SSP_ENABLED); return err; } static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) { struct mgmt_mode *cp = data; struct mgmt_pending_cmd *cmd; u8 status; int err; bt_dev_dbg(hdev, "sock %p", sk); status = mgmt_bredr_support(hdev); if (status) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status); if (!lmp_ssp_capable(hdev)) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, MGMT_STATUS_NOT_SUPPORTED); if (cp->val != 0x00 && cp->val != 0x01) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, MGMT_STATUS_INVALID_PARAMS); hci_dev_lock(hdev); if (!hdev_is_powered(hdev)) { bool changed; if (cp->val) { changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED); } else { changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED); if (!changed) changed = hci_dev_test_and_clear_flag(hdev, HCI_HS_ENABLED); else hci_dev_clear_flag(hdev, HCI_HS_ENABLED); } err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev); if (err < 0) goto failed; if (changed) err = new_settings(hdev, sk); goto failed; } if (pending_find(MGMT_OP_SET_SSP, hdev)) { err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, MGMT_STATUS_BUSY); goto failed; } if (!!cp->val == hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) { err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev); goto failed; } cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len); if (!cmd) err = -ENOMEM; else err = hci_cmd_sync_queue(hdev, set_ssp_sync, cmd, set_ssp_complete); if (err < 0) { err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, MGMT_STATUS_FAILED); if (cmd) mgmt_pending_remove(cmd); } failed: hci_dev_unlock(hdev); return err; } static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) { struct mgmt_mode *cp = data; bool changed; u8 status; int err; bt_dev_dbg(hdev, "sock %p", sk); if (!IS_ENABLED(CONFIG_BT_HS)) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, MGMT_STATUS_NOT_SUPPORTED); status = mgmt_bredr_support(hdev); if (status) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status); if (!lmp_ssp_capable(hdev)) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, MGMT_STATUS_NOT_SUPPORTED); if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, MGMT_STATUS_REJECTED); if (cp->val != 0x00 && cp->val != 0x01) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, MGMT_STATUS_INVALID_PARAMS); hci_dev_lock(hdev); if (pending_find(MGMT_OP_SET_SSP, hdev)) { err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, MGMT_STATUS_BUSY); goto unlock; } if (cp->val) { changed = !hci_dev_test_and_set_flag(hdev, HCI_HS_ENABLED); } else { if (hdev_is_powered(hdev)) { err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, MGMT_STATUS_REJECTED); goto unlock; } changed = hci_dev_test_and_clear_flag(hdev, HCI_HS_ENABLED); } err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev); if (err < 0) goto unlock; if (changed) err = new_settings(hdev, sk); unlock: hci_dev_unlock(hdev); return err; } static void set_le_complete(struct hci_dev *hdev, void *data, int err) { struct cmd_lookup match = { NULL, hdev }; u8 status = mgmt_status(err); bt_dev_dbg(hdev, "err %d", err); if (status) { mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp, &status); return; } mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match); new_settings(hdev, match.sk); if (match.sk) sock_put(match.sk); } static int set_le_sync(struct hci_dev *hdev, void *data) { struct mgmt_pending_cmd *cmd = data; struct mgmt_mode *cp = cmd->param; u8 val = !!cp->val; int err; if (!val) { hci_clear_adv_instance_sync(hdev, NULL, 0x00, true); if (hci_dev_test_flag(hdev, HCI_LE_ADV)) hci_disable_advertising_sync(hdev); if (ext_adv_capable(hdev)) hci_remove_ext_adv_instance_sync(hdev, 0, cmd->sk); } else { hci_dev_set_flag(hdev, HCI_LE_ENABLED); } err = hci_write_le_host_supported_sync(hdev, val, 0); /* Make sure the controller has a good default for * advertising data. Restrict the update to when LE * has actually been enabled. During power on, the * update in powered_update_hci will take care of it. */ if (!err && hci_dev_test_flag(hdev, HCI_LE_ENABLED)) { if (ext_adv_capable(hdev)) { int status; status = hci_setup_ext_adv_instance_sync(hdev, 0x00); if (!status) hci_update_scan_rsp_data_sync(hdev, 0x00); } else { hci_update_adv_data_sync(hdev, 0x00); hci_update_scan_rsp_data_sync(hdev, 0x00); } hci_update_passive_scan(hdev); } return err; } static void set_mesh_complete(struct hci_dev *hdev, void *data, int err) { struct mgmt_pending_cmd *cmd = data; u8 status = mgmt_status(err); struct sock *sk = cmd->sk; if (status) { mgmt_pending_foreach(MGMT_OP_SET_MESH_RECEIVER, hdev, cmd_status_rsp, &status); return; } mgmt_pending_remove(cmd); mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER, 0, NULL, 0); } static int set_mesh_sync(struct hci_dev *hdev, void *data) { struct mgmt_pending_cmd *cmd = data; struct mgmt_cp_set_mesh *cp = cmd->param; size_t len = cmd->param_len; memset(hdev->mesh_ad_types, 0, sizeof(hdev->mesh_ad_types)); if (cp->enable) hci_dev_set_flag(hdev, HCI_MESH); else hci_dev_clear_flag(hdev, HCI_MESH); len -= sizeof(*cp); /* If filters don't fit, forward all adv pkts */ if (len <= sizeof(hdev->mesh_ad_types)) memcpy(hdev->mesh_ad_types, cp->ad_types, len); hci_update_passive_scan_sync(hdev); return 0; } static int set_mesh(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) { struct mgmt_cp_set_mesh *cp = data; struct mgmt_pending_cmd *cmd; int err = 0; bt_dev_dbg(hdev, "sock %p", sk); if (!lmp_le_capable(hdev) || !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL)) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER, MGMT_STATUS_NOT_SUPPORTED); if (cp->enable != 0x00 && cp->enable != 0x01) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER, MGMT_STATUS_INVALID_PARAMS); hci_dev_lock(hdev); cmd = mgmt_pending_add(sk, MGMT_OP_SET_MESH_RECEIVER, hdev, data, len); if (!cmd) err = -ENOMEM; else err = hci_cmd_sync_queue(hdev, set_mesh_sync, cmd, set_mesh_complete); if (err < 0) { err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER, MGMT_STATUS_FAILED); if (cmd) mgmt_pending_remove(cmd); } hci_dev_unlock(hdev); return err; } static void mesh_send_start_complete(struct hci_dev *hdev, void *data, int err) { struct mgmt_mesh_tx *mesh_tx = data; struct mgmt_cp_mesh_send *send = (void *)mesh_tx->param; unsigned long mesh_send_interval; u8 mgmt_err = mgmt_status(err); /* Report any errors here, but don't report completion */ if (mgmt_err) { hci_dev_clear_flag(hdev, HCI_MESH_SENDING); /* Send Complete Error Code for handle */ mesh_send_complete(hdev, mesh_tx, false); return; } mesh_send_interval = msecs_to_jiffies((send->cnt) * 25); queue_delayed_work(hdev->req_workqueue, &hdev->mesh_send_done, mesh_send_interval); } static int mesh_send_sync(struct hci_dev *hdev, void *data) { struct mgmt_mesh_tx *mesh_tx = data; struct mgmt_cp_mesh_send *send = (void *)mesh_tx->param; struct adv_info *adv, *next_instance; u8 instance = hdev->le_num_of_adv_sets + 1; u16 timeout, duration; int err = 0; if (hdev->le_num_of_adv_sets <= hdev->adv_instance_cnt) return MGMT_STATUS_BUSY; timeout = 1000; duration = send->cnt * INTERVAL_TO_MS(hdev->le_adv_max_interval); adv = hci_add_adv_instance(hdev, instance, 0, send->adv_data_len, send->adv_data, 0, NULL, timeout, duration, HCI_ADV_TX_POWER_NO_PREFERENCE, hdev->le_adv_min_interval, hdev->le_adv_max_interval, mesh_tx->handle); if (!IS_ERR(adv)) mesh_tx->instance = instance; else err = PTR_ERR(adv); if (hdev->cur_adv_instance == instance) { /* If the currently advertised instance is being changed then * cancel the current advertising and schedule the next * instance. If there is only one instance then the overridden * advertising data will be visible right away. */ cancel_adv_timeout(hdev); next_instance = hci_get_next_instance(hdev, instance); if (next_instance) instance = next_instance->instance; else instance = 0; } else if (hdev->adv_instance_timeout) { /* Immediately advertise the new instance if no other, or * let it go naturally from queue if ADV is already happening */ instance = 0; } if (instance) return hci_schedule_adv_instance_sync(hdev, instance, true); return err; } static void send_count(struct mgmt_mesh_tx *mesh_tx, void *data) { struct mgmt_rp_mesh_read_features *rp = data; if (rp->used_handles >= rp->max_handles) return; rp->handles[rp->used_handles++] = mesh_tx->handle; } static int mesh_features(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) { struct mgmt_rp_mesh_read_features rp; if (!lmp_le_capable(hdev) || !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL)) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_READ_FEATURES, MGMT_STATUS_NOT_SUPPORTED); memset(&rp, 0, sizeof(rp)); rp.index = cpu_to_le16(hdev->id); if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) rp.max_handles = MESH_HANDLES_MAX; hci_dev_lock(hdev); if (rp.max_handles) mgmt_mesh_foreach(hdev, send_count, &rp, sk); mgmt_cmd_complete(sk, hdev->id, MGMT_OP_MESH_READ_FEATURES, 0, &rp, rp.used_handles + sizeof(rp) - MESH_HANDLES_MAX); hci_dev_unlock(hdev); return 0; } static int send_cancel(struct hci_dev *hdev, void *data) { struct mgmt_pending_cmd *cmd = data; struct mgmt_cp_mesh_send_cancel *cancel = (void *)cmd->param; struct mgmt_mesh_tx *mesh_tx; if (!cancel->handle) { do { mesh_tx = mgmt_mesh_next(hdev, cmd->sk); if (mesh_tx) mesh_send_complete(hdev, mesh_tx, false); } while (mesh_tx); } else { mesh_tx = mgmt_mesh_find(hdev, cancel->handle); if (mesh_tx && mesh_tx->sk == cmd->sk) mesh_send_complete(hdev, mesh_tx, false); } mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL, 0, NULL, 0); mgmt_pending_free(cmd); return 0; } static int mesh_send_cancel(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) { struct mgmt_pending_cmd *cmd; int err; if (!lmp_le_capable(hdev) || !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL)) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL, MGMT_STATUS_NOT_SUPPORTED); if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL, MGMT_STATUS_REJECTED); hci_dev_lock(hdev); cmd = mgmt_pending_new(sk, MGMT_OP_MESH_SEND_CANCEL, hdev, data, len); if (!cmd) err = -ENOMEM; else err = hci_cmd_sync_queue(hdev, send_cancel, cmd, NULL); if (err < 0) { err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL, MGMT_STATUS_FAILED); if (cmd) mgmt_pending_free(cmd); } hci_dev_unlock(hdev); return err; } static int mesh_send(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) { struct mgmt_mesh_tx *mesh_tx; struct mgmt_cp_mesh_send *send = data; struct mgmt_rp_mesh_read_features rp; bool sending; int err = 0; if (!lmp_le_capable(hdev) || !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL)) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND, MGMT_STATUS_NOT_SUPPORTED); if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) || len <= MGMT_MESH_SEND_SIZE || len > (MGMT_MESH_SEND_SIZE + 31)) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND, MGMT_STATUS_REJECTED); hci_dev_lock(hdev); memset(&rp, 0, sizeof(rp)); rp.max_handles = MESH_HANDLES_MAX; mgmt_mesh_foreach(hdev, send_count, &rp, sk); if (rp.max_handles <= rp.used_handles) { err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND, MGMT_STATUS_BUSY); goto done; } sending = hci_dev_test_flag(hdev, HCI_MESH_SENDING); mesh_tx = mgmt_mesh_add(sk, hdev, send, len); if (!mesh_tx) err = -ENOMEM; else if (!sending) err = hci_cmd_sync_queue(hdev, mesh_send_sync, mesh_tx, mesh_send_start_complete); if (err < 0) { bt_dev_err(hdev, "Send Mesh Failed %d", err); err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND, MGMT_STATUS_FAILED); if (mesh_tx) { if (sending) mgmt_mesh_remove(mesh_tx); } } else { hci_dev_set_flag(hdev, HCI_MESH_SENDING); mgmt_cmd_complete(sk, hdev->id, MGMT_OP_MESH_SEND, 0, &mesh_tx->handle, 1); } done: hci_dev_unlock(hdev); return err; } static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) { struct mgmt_mode *cp = data; struct mgmt_pending_cmd *cmd; int err; u8 val, enabled; bt_dev_dbg(hdev, "sock %p", sk); if (!lmp_le_capable(hdev)) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE, MGMT_STATUS_NOT_SUPPORTED); if (cp->val != 0x00 && cp->val != 0x01) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE, MGMT_STATUS_INVALID_PARAMS); /* Bluetooth single mode LE only controllers or dual-mode * controllers configured as LE only devices, do not allow * switching LE off. These have either LE enabled explicitly * or BR/EDR has been previously switched off. * * When trying to enable an already enabled LE, then gracefully * send a positive response. Trying to disable it however will * result into rejection. */ if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) { if (cp->val == 0x01) return send_settings_rsp(sk, MGMT_OP_SET_LE, hdev); return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE, MGMT_STATUS_REJECTED); } hci_dev_lock(hdev); val = !!cp->val; enabled = lmp_host_le_capable(hdev); if (!hdev_is_powered(hdev) || val == enabled) { bool changed = false; if (val != hci_dev_test_flag(hdev, HCI_LE_ENABLED)) { hci_dev_change_flag(hdev, HCI_LE_ENABLED); changed = true; } if (!val && hci_dev_test_flag(hdev, HCI_ADVERTISING)) { hci_dev_clear_flag(hdev, HCI_ADVERTISING); changed = true; } err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev); if (err < 0) goto unlock; if (changed) err = new_settings(hdev, sk); goto unlock; } if (pending_find(MGMT_OP_SET_LE, hdev) || pending_find(MGMT_OP_SET_ADVERTISING, hdev)) { err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE, MGMT_STATUS_BUSY); goto unlock; } cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len); if (!cmd) err = -ENOMEM; else err = hci_cmd_sync_queue(hdev, set_le_sync, cmd, set_le_complete); if (err < 0) { err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE, MGMT_STATUS_FAILED); if (cmd) mgmt_pending_remove(cmd); } unlock: hci_dev_unlock(hdev); return err; } /* This is a helper function to test for pending mgmt commands that can * cause CoD or EIR HCI commands. We can only allow one such pending * mgmt command at a time since otherwise we cannot easily track what * the current values are, will be, and based on that calculate if a new * HCI command needs to be sent and if yes with what value. */ static bool pending_eir_or_class(struct hci_dev *hdev) { struct mgmt_pending_cmd *cmd; list_for_each_entry(cmd, &hdev->mgmt_pending, list) { switch (cmd->opcode) { case MGMT_OP_ADD_UUID: case MGMT_OP_REMOVE_UUID: case MGMT_OP_SET_DEV_CLASS: case MGMT_OP_SET_POWERED: return true; } } return false; } static const u8 bluetooth_base_uuid[] = { 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80, 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, }; static u8 get_uuid_size(const u8 *uuid) { u32 val; if (memcmp(uuid, bluetooth_base_uuid, 12)) return 128; val = get_unaligned_le32(&uuid[12]); if (val > 0xffff) return 32; return 16; } static void mgmt_class_complete(struct hci_dev *hdev, void *data, int err) { struct mgmt_pending_cmd *cmd = data; bt_dev_dbg(hdev, "err %d", err); mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(err), hdev->dev_class, 3); mgmt_pending_free(cmd); } static int add_uuid_sync(struct hci_dev *hdev, void *data) { int err; err = hci_update_class_sync(hdev); if (err) return err; return hci_update_eir_sync(hdev); } static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) { struct mgmt_cp_add_uuid *cp = data; struct mgmt_pending_cmd *cmd; struct bt_uuid *uuid; int err; bt_dev_dbg(hdev, "sock %p", sk); hci_dev_lock(hdev); if (pending_eir_or_class(hdev)) { err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID, MGMT_STATUS_BUSY); goto failed; } uuid = kmalloc(sizeof(*uuid), GFP_KERNEL); if (!uuid) { err = -ENOMEM; goto failed; } memcpy(uuid->uuid, cp->uuid, 16); uuid->svc_hint = cp->svc_hint; uuid->size = get_uuid_size(cp->uuid); list_add_tail(&uuid->list, &hdev->uuids); cmd = mgmt_pending_new(sk, MGMT_OP_ADD_UUID, hdev, data, len); if (!cmd) { err = -ENOMEM; goto failed; } err = hci_cmd_sync_queue(hdev, add_uuid_sync, cmd, mgmt_class_complete); if (err < 0) { mgmt_pending_free(cmd); goto failed; } failed: hci_dev_unlock(hdev); return err; } static bool enable_service_cache(struct hci_dev *hdev) { if (!hdev_is_powered(hdev)) return false; if (!hci_dev_test_and_set_flag(hdev, HCI_SERVICE_CACHE)) { queue_delayed_work(hdev->workqueue, &hdev->service_cache, CACHE_TIMEOUT); return true; } return false; } static int remove_uuid_sync(struct hci_dev *hdev, void *data) { int err; err = hci_update_class_sync(hdev); if (err) return err; return hci_update_eir_sync(hdev); } static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) { struct mgmt_cp_remove_uuid *cp = data; struct mgmt_pending_cmd *cmd; struct bt_uuid *match, *tmp; static const u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; int err, found; bt_dev_dbg(hdev, "sock %p", sk); hci_dev_lock(hdev); if (pending_eir_or_class(hdev)) { err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID, MGMT_STATUS_BUSY); goto unlock; } if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) { hci_uuids_clear(hdev); if (enable_service_cache(hdev)) { err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0, hdev->dev_class, 3); goto unlock; } goto update_class; } found = 0; list_for_each_entry_safe(match, tmp, &hdev->uuids, list) { if (memcmp(match->uuid, cp->uuid, 16) != 0) continue; list_del(&match->list); kfree(match); found++; } if (found == 0) { err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID, MGMT_STATUS_INVALID_PARAMS); goto unlock; } update_class: cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_UUID, hdev, data, len); if (!cmd) { err = -ENOMEM; goto unlock; } err = hci_cmd_sync_queue(hdev, remove_uuid_sync, cmd, mgmt_class_complete); if (err < 0) mgmt_pending_free(cmd); unlock: hci_dev_unlock(hdev); return err; } static int set_class_sync(struct hci_dev *hdev, void *data) { int err = 0; if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) { cancel_delayed_work_sync(&hdev->service_cache); err = hci_update_eir_sync(hdev); } if (err) return err; return hci_update_class_sync(hdev); } static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) { struct mgmt_cp_set_dev_class *cp = data; struct mgmt_pending_cmd *cmd; int err; bt_dev_dbg(hdev, "sock %p", sk); if (!lmp_bredr_capable(hdev)) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, MGMT_STATUS_NOT_SUPPORTED); hci_dev_lock(hdev); if (pending_eir_or_class(hdev)) { err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, MGMT_STATUS_BUSY); goto unlock; } if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) { err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, MGMT_STATUS_INVALID_PARAMS); goto unlock; } hdev->major_class = cp->major; hdev->minor_class = cp->minor; if (!hdev_is_powered(hdev)) { err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0, hdev->dev_class, 3); goto unlock; } cmd = mgmt_pending_new(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len); if (!cmd) { err = -ENOMEM; goto unlock; } err = hci_cmd_sync_queue(hdev, set_class_sync, cmd, mgmt_class_complete); if (err < 0) mgmt_pending_free(cmd); unlock: hci_dev_unlock(hdev); return err; } static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) { struct mgmt_cp_load_link_keys *cp = data; const u16 max_key_count = ((U16_MAX - sizeof(*cp)) / sizeof(struct mgmt_link_key_info)); u16 key_count, expected_len; bool changed; int i; bt_dev_dbg(hdev, "sock %p", sk); if (!lmp_bredr_capable(hdev)) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, MGMT_STATUS_NOT_SUPPORTED); key_count = __le16_to_cpu(cp->key_count); if (key_count > max_key_count) { bt_dev_err(hdev, "load_link_keys: too big key_count value %u", key_count); return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, MGMT_STATUS_INVALID_PARAMS); } expected_len = struct_size(cp, keys, key_count); if (expected_len != len) { bt_dev_err(hdev, "load_link_keys: expected %u bytes, got %u bytes", expected_len, len); return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, MGMT_STATUS_INVALID_PARAMS); } if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, MGMT_STATUS_INVALID_PARAMS); bt_dev_dbg(hdev, "debug_keys %u key_count %u", cp->debug_keys, key_count); for (i = 0; i < key_count; i++) { struct mgmt_link_key_info *key = &cp->keys[i]; if (key->addr.type != BDADDR_BREDR || key->type > 0x08) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, MGMT_STATUS_INVALID_PARAMS); } hci_dev_lock(hdev); hci_link_keys_clear(hdev); if (cp->debug_keys) changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS); else changed = hci_dev_test_and_clear_flag(hdev, HCI_KEEP_DEBUG_KEYS); if (changed) new_settings(hdev, NULL); for (i = 0; i < key_count; i++) { struct mgmt_link_key_info *key = &cp->keys[i]; if (hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_LINKKEY, key->val)) { bt_dev_warn(hdev, "Skipping blocked link key for %pMR", &key->addr.bdaddr); continue; } /* Always ignore debug keys and require a new pairing if * the user wants to use them. */ if (key->type == HCI_LK_DEBUG_COMBINATION) continue; hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val, key->type, key->pin_len, NULL); } mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0); hci_dev_unlock(hdev); return 0; } static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, struct sock *skip_sk) { struct mgmt_ev_device_unpaired ev; bacpy(&ev.addr.bdaddr, bdaddr); ev.addr.type = addr_type; return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev), skip_sk); } static void unpair_device_complete(struct hci_dev *hdev, void *data, int err) { struct mgmt_pending_cmd *cmd = data; struct mgmt_cp_unpair_device *cp = cmd->param; if (!err) device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk); cmd->cmd_complete(cmd, err); mgmt_pending_free(cmd); } static int unpair_device_sync(struct hci_dev *hdev, void *data) { struct mgmt_pending_cmd *cmd = data; struct mgmt_cp_unpair_device *cp = cmd->param; struct hci_conn *conn; if (cp->addr.type == BDADDR_BREDR) conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr); else conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr, le_addr_type(cp->addr.type)); if (!conn) return 0; return hci_abort_conn_sync(hdev, conn, HCI_ERROR_REMOTE_USER_TERM); } static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) { struct mgmt_cp_unpair_device *cp = data; struct mgmt_rp_unpair_device rp; struct hci_conn_params *params; struct mgmt_pending_cmd *cmd; struct hci_conn *conn; u8 addr_type; int err; memset(&rp, 0, sizeof(rp)); bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr); rp.addr.type = cp->addr.type; if (!bdaddr_type_is_valid(cp->addr.type)) return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, MGMT_STATUS_INVALID_PARAMS, &rp, sizeof(rp)); if (cp->disconnect != 0x00 && cp->disconnect != 0x01) return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, MGMT_STATUS_INVALID_PARAMS, &rp, sizeof(rp)); hci_dev_lock(hdev); if (!hdev_is_powered(hdev)) { err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp)); goto unlock; } if (cp->addr.type == BDADDR_BREDR) { /* If disconnection is requested, then look up the * connection. If the remote device is connected, it * will be later used to terminate the link. * * Setting it to NULL explicitly will cause no * termination of the link. */ if (cp->disconnect) conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr); else conn = NULL; err = hci_remove_link_key(hdev, &cp->addr.bdaddr); if (err < 0) { err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, MGMT_STATUS_NOT_PAIRED, &rp, sizeof(rp)); goto unlock; } goto done; } /* LE address type */ addr_type = le_addr_type(cp->addr.type); /* Abort any ongoing SMP pairing. Removes ltk and irk if they exist. */ err = smp_cancel_and_remove_pairing(hdev, &cp->addr.bdaddr, addr_type); if (err < 0) { err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, MGMT_STATUS_NOT_PAIRED, &rp, sizeof(rp)); goto unlock; } conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr, addr_type); if (!conn) { hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type); goto done; } /* Defer clearing up the connection parameters until closing to * give a chance of keeping them if a repairing happens. */ set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags); /* Disable auto-connection parameters if present */ params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, addr_type); if (params) { if (params->explicit_connect) params->auto_connect = HCI_AUTO_CONN_EXPLICIT; else params->auto_connect = HCI_AUTO_CONN_DISABLED; } /* If disconnection is not requested, then clear the connection * variable so that the link is not terminated. */ if (!cp->disconnect) conn = NULL; done: /* If the connection variable is set, then termination of the * link is requested. */ if (!conn) { err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0, &rp, sizeof(rp)); device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk); goto unlock; } cmd = mgmt_pending_new(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp, sizeof(*cp)); if (!cmd) { err = -ENOMEM; goto unlock; } cmd->cmd_complete = addr_cmd_complete; err = hci_cmd_sync_queue(hdev, unpair_device_sync, cmd, unpair_device_complete); if (err < 0) mgmt_pending_free(cmd); unlock: hci_dev_unlock(hdev); return err; } static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) { struct mgmt_cp_disconnect *cp = data; struct mgmt_rp_disconnect rp; struct mgmt_pending_cmd *cmd; struct hci_conn *conn; int err; bt_dev_dbg(hdev, "sock %p", sk); memset(&rp, 0, sizeof(rp)); bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr); rp.addr.type = cp->addr.type; if (!bdaddr_type_is_valid(cp->addr.type)) return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT, MGMT_STATUS_INVALID_PARAMS, &rp, sizeof(rp)); hci_dev_lock(hdev); if (!test_bit(HCI_UP, &hdev->flags)) { err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT, MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp)); goto failed; } if (pending_find(MGMT_OP_DISCONNECT, hdev)) { err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT, MGMT_STATUS_BUSY, &rp, sizeof(rp)); goto failed; } if (cp->addr.type == BDADDR_BREDR) conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr); else conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr, le_addr_type(cp->addr.type)); if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) { err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT, MGMT_STATUS_NOT_CONNECTED, &rp, sizeof(rp)); goto failed; } cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len); if (!cmd) { err = -ENOMEM; goto failed; } cmd->cmd_complete = generic_cmd_complete; err = hci_disconnect(conn, HCI_ERROR_REMOTE_USER_TERM); if (err < 0) mgmt_pending_remove(cmd); failed: hci_dev_unlock(hdev); return err; } static u8 link_to_bdaddr(u8 link_type, u8 addr_type) { switch (link_type) { case LE_LINK: switch (addr_type) { case ADDR_LE_DEV_PUBLIC: return BDADDR_LE_PUBLIC; default: /* Fallback to LE Random address type */ return BDADDR_LE_RANDOM; } default: /* Fallback to BR/EDR type */ return BDADDR_BREDR; } } static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data, u16 data_len) { struct mgmt_rp_get_connections *rp; struct hci_conn *c; int err; u16 i; bt_dev_dbg(hdev, "sock %p", sk); hci_dev_lock(hdev); if (!hdev_is_powered(hdev)) { err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, MGMT_STATUS_NOT_POWERED); goto unlock; } i = 0; list_for_each_entry(c, &hdev->conn_hash.list, list) { if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags)) i++; } rp = kmalloc(struct_size(rp, addr, i), GFP_KERNEL); if (!rp) { err = -ENOMEM; goto unlock; } i = 0; list_for_each_entry(c, &hdev->conn_hash.list, list) { if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags)) continue; bacpy(&rp->addr[i].bdaddr, &c->dst); rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type); if (c->type == SCO_LINK || c->type == ESCO_LINK) continue; i++; } rp->conn_count = cpu_to_le16(i); /* Recalculate length in case of filtered SCO connections, etc */ err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp, struct_size(rp, addr, i)); kfree(rp); unlock: hci_dev_unlock(hdev); return err; } static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev, struct mgmt_cp_pin_code_neg_reply *cp) { struct mgmt_pending_cmd *cmd; int err; cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp, sizeof(*cp)); if (!cmd) return -ENOMEM; cmd->cmd_complete = addr_cmd_complete; err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY, sizeof(cp->addr.bdaddr), &cp->addr.bdaddr); if (err < 0) mgmt_pending_remove(cmd); return err; } static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) { struct hci_conn *conn; struct mgmt_cp_pin_code_reply *cp = data; struct hci_cp_pin_code_reply reply; struct mgmt_pending_cmd *cmd; int err; bt_dev_dbg(hdev, "sock %p", sk); hci_dev_lock(hdev); if (!hdev_is_powered(hdev)) { err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY, MGMT_STATUS_NOT_POWERED); goto failed; } conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr); if (!conn) { err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY, MGMT_STATUS_NOT_CONNECTED); goto failed; } if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) { struct mgmt_cp_pin_code_neg_reply ncp; memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr)); bt_dev_err(hdev, "PIN code is not 16 bytes long"); err = send_pin_code_neg_reply(sk, hdev, &ncp); if (err >= 0) err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY, MGMT_STATUS_INVALID_PARAMS); goto failed; } cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len); if (!cmd) { err = -ENOMEM; goto failed; } cmd->cmd_complete = addr_cmd_complete; bacpy(&reply.bdaddr, &cp->addr.bdaddr); reply.pin_len = cp->pin_len; memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code)); err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply); if (err < 0) mgmt_pending_remove(cmd); failed: hci_dev_unlock(hdev); return err; } static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) { struct mgmt_cp_set_io_capability *cp = data; bt_dev_dbg(hdev, "sock %p", sk); if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, MGMT_STATUS_INVALID_PARAMS); hci_dev_lock(hdev); hdev->io_capability = cp->io_capability; bt_dev_dbg(hdev, "IO capability set to 0x%02x", hdev->io_capability); hci_dev_unlock(hdev); return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0, NULL, 0); } static struct mgmt_pending_cmd *find_pairing(struct hci_conn *conn) { struct hci_dev *hdev = conn->hdev; struct mgmt_pending_cmd *cmd; list_for_each_entry(cmd, &hdev->mgmt_pending, list) { if (cmd->opcode != MGMT_OP_PAIR_DEVICE) continue; if (cmd->user_data != conn) continue; return cmd; } return NULL; } static int pairing_complete(struct mgmt_pending_cmd *cmd, u8 status) { struct mgmt_rp_pair_device rp; struct hci_conn *conn = cmd->user_data; int err; bacpy(&rp.addr.bdaddr, &conn->dst); rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type); err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE, status, &rp, sizeof(rp)); /* So we don't get further callbacks for this connection */ conn->connect_cfm_cb = NULL; conn->security_cfm_cb = NULL; conn->disconn_cfm_cb = NULL; hci_conn_drop(conn); /* The device is paired so there is no need to remove * its connection parameters anymore. */ clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags); hci_conn_put(conn); return err; } void mgmt_smp_complete(struct hci_conn *conn, bool complete) { u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED; struct mgmt_pending_cmd *cmd; cmd = find_pairing(conn); if (cmd) { cmd->cmd_complete(cmd, status); mgmt_pending_remove(cmd); } } static void pairing_complete_cb(struct hci_conn *conn, u8 status) { struct mgmt_pending_cmd *cmd; BT_DBG("status %u", status); cmd = find_pairing(conn); if (!cmd) { BT_DBG("Unable to find a pending command"); return; } cmd->cmd_complete(cmd, mgmt_status(status)); mgmt_pending_remove(cmd); } static void le_pairing_complete_cb(struct hci_conn *conn, u8 status) { struct mgmt_pending_cmd *cmd; BT_DBG("status %u", status); if (!status) return; cmd = find_pairing(conn); if (!cmd) { BT_DBG("Unable to find a pending command"); return; } cmd->cmd_complete(cmd, mgmt_status(status)); mgmt_pending_remove(cmd); } static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) { struct mgmt_cp_pair_device *cp = data; struct mgmt_rp_pair_device rp; struct mgmt_pending_cmd *cmd; u8 sec_level, auth_type; struct hci_conn *conn; int err; bt_dev_dbg(hdev, "sock %p", sk); memset(&rp, 0, sizeof(rp)); bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr); rp.addr.type = cp->addr.type; if (!bdaddr_type_is_valid(cp->addr.type)) return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE, MGMT_STATUS_INVALID_PARAMS, &rp, sizeof(rp)); if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY) return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE, MGMT_STATUS_INVALID_PARAMS, &rp, sizeof(rp)); hci_dev_lock(hdev); if (!hdev_is_powered(hdev)) { err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE, MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp)); goto unlock; } if (hci_bdaddr_is_paired(hdev, &cp->addr.bdaddr, cp->addr.type)) { err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE, MGMT_STATUS_ALREADY_PAIRED, &rp, sizeof(rp)); goto unlock; } sec_level = BT_SECURITY_MEDIUM; auth_type = HCI_AT_DEDICATED_BONDING; if (cp->addr.type == BDADDR_BREDR) { conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level, auth_type, CONN_REASON_PAIR_DEVICE); } else { u8 addr_type = le_addr_type(cp->addr.type); struct hci_conn_params *p; /* When pairing a new device, it is expected to remember * this device for future connections. Adding the connection * parameter information ahead of time allows tracking * of the peripheral preferred values and will speed up any * further connection establishment. * * If connection parameters already exist, then they * will be kept and this function does nothing. */ p = hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type); if (p->auto_connect == HCI_AUTO_CONN_EXPLICIT) p->auto_connect = HCI_AUTO_CONN_DISABLED; conn = hci_connect_le_scan(hdev, &cp->addr.bdaddr, addr_type, sec_level, HCI_LE_CONN_TIMEOUT, CONN_REASON_PAIR_DEVICE); } if (IS_ERR(conn)) { int status; if (PTR_ERR(conn) == -EBUSY) status = MGMT_STATUS_BUSY; else if (PTR_ERR(conn) == -EOPNOTSUPP) status = MGMT_STATUS_NOT_SUPPORTED; else if (PTR_ERR(conn) == -ECONNREFUSED) status = MGMT_STATUS_REJECTED; else status = MGMT_STATUS_CONNECT_FAILED; err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE, status, &rp, sizeof(rp)); goto unlock; } if (conn->connect_cfm_cb) { hci_conn_drop(conn); err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE, MGMT_STATUS_BUSY, &rp, sizeof(rp)); goto unlock; } cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len); if (!cmd) { err = -ENOMEM; hci_conn_drop(conn); goto unlock; } cmd->cmd_complete = pairing_complete; /* For LE, just connecting isn't a proof that the pairing finished */ if (cp->addr.type == BDADDR_BREDR) { conn->connect_cfm_cb = pairing_complete_cb; conn->security_cfm_cb = pairing_complete_cb; conn->disconn_cfm_cb = pairing_complete_cb; } else { conn->connect_cfm_cb = le_pairing_complete_cb; conn->security_cfm_cb = le_pairing_complete_cb; conn->disconn_cfm_cb = le_pairing_complete_cb; } conn->io_capability = cp->io_cap; cmd->user_data = hci_conn_get(conn); if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) && hci_conn_security(conn, sec_level, auth_type, true)) { cmd->cmd_complete(cmd, 0); mgmt_pending_remove(cmd); } err = 0; unlock: hci_dev_unlock(hdev); return err; } static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) { struct mgmt_addr_info *addr = data; struct mgmt_pending_cmd *cmd; struct hci_conn *conn; int err; bt_dev_dbg(hdev, "sock %p", sk); hci_dev_lock(hdev); if (!hdev_is_powered(hdev)) { err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, MGMT_STATUS_NOT_POWERED); goto unlock; } cmd = pending_find(MGMT_OP_PAIR_DEVICE, hdev); if (!cmd) { err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, MGMT_STATUS_INVALID_PARAMS); goto unlock; } conn = cmd->user_data; if (bacmp(&addr->bdaddr, &conn->dst) != 0) { err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, MGMT_STATUS_INVALID_PARAMS); goto unlock; } cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED); mgmt_pending_remove(cmd); err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0, addr, sizeof(*addr)); /* Since user doesn't want to proceed with the connection, abort any * ongoing pairing and then terminate the link if it was created * because of the pair device action. */ if (addr->type == BDADDR_BREDR) hci_remove_link_key(hdev, &addr->bdaddr); else smp_cancel_and_remove_pairing(hdev, &addr->bdaddr, le_addr_type(addr->type)); if (conn->conn_reason == CONN_REASON_PAIR_DEVICE) hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM); unlock: hci_dev_unlock(hdev); return err; } static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev, struct mgmt_addr_info *addr, u16 mgmt_op, u16 hci_op, __le32 passkey) { struct mgmt_pending_cmd *cmd; struct hci_conn *conn; int err; hci_dev_lock(hdev); if (!hdev_is_powered(hdev)) { err = mgmt_cmd_complete(sk, hdev->id, mgmt_op, MGMT_STATUS_NOT_POWERED, addr, sizeof(*addr)); goto done; } if (addr->type == BDADDR_BREDR) conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr); else conn = hci_conn_hash_lookup_le(hdev, &addr->bdaddr, le_addr_type(addr->type)); if (!conn) { err = mgmt_cmd_complete(sk, hdev->id, mgmt_op, MGMT_STATUS_NOT_CONNECTED, addr, sizeof(*addr)); goto done; } if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) { err = smp_user_confirm_reply(conn, mgmt_op, passkey); if (!err) err = mgmt_cmd_complete(sk, hdev->id, mgmt_op, MGMT_STATUS_SUCCESS, addr, sizeof(*addr)); else err = mgmt_cmd_complete(sk, hdev->id, mgmt_op, MGMT_STATUS_FAILED, addr, sizeof(*addr)); goto done; } cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr)); if (!cmd) { err = -ENOMEM; goto done; } cmd->cmd_complete = addr_cmd_complete; /* Continue with pairing via HCI */ if (hci_op == HCI_OP_USER_PASSKEY_REPLY) { struct hci_cp_user_passkey_reply cp; bacpy(&cp.bdaddr, &addr->bdaddr); cp.passkey = passkey; err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp); } else err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr), &addr->bdaddr); if (err < 0) mgmt_pending_remove(cmd); done: hci_dev_unlock(hdev); return err; } static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) { struct mgmt_cp_pin_code_neg_reply *cp = data; bt_dev_dbg(hdev, "sock %p", sk); return user_pairing_resp(sk, hdev, &cp->addr, MGMT_OP_PIN_CODE_NEG_REPLY, HCI_OP_PIN_CODE_NEG_REPLY, 0); } static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) { struct mgmt_cp_user_confirm_reply *cp = data; bt_dev_dbg(hdev, "sock %p", sk); if (len != sizeof(*cp)) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY, MGMT_STATUS_INVALID_PARAMS); return user_pairing_resp(sk, hdev, &cp->addr, MGMT_OP_USER_CONFIRM_REPLY, HCI_OP_USER_CONFIRM_REPLY, 0); } static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) { struct mgmt_cp_user_confirm_neg_reply *cp = data; bt_dev_dbg(hdev, "sock %p", sk); return user_pairing_resp(sk, hdev, &cp->addr, MGMT_OP_USER_CONFIRM_NEG_REPLY, HCI_OP_USER_CONFIRM_NEG_REPLY, 0); } static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) { struct mgmt_cp_user_passkey_reply *cp = data; bt_dev_dbg(hdev, "sock %p", sk); return user_pairing_resp(sk, hdev, &cp->addr, MGMT_OP_USER_PASSKEY_REPLY, HCI_OP_USER_PASSKEY_REPLY, cp->passkey); } static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) { struct mgmt_cp_user_passkey_neg_reply *cp = data; bt_dev_dbg(hdev, "sock %p", sk); return user_pairing_resp(sk, hdev, &cp->addr, MGMT_OP_USER_PASSKEY_NEG_REPLY, HCI_OP_USER_PASSKEY_NEG_REPLY, 0); } static int adv_expire_sync(struct hci_dev *hdev, u32 flags) { struct adv_info *adv_instance; adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance); if (!adv_instance) return 0; /* stop if current instance doesn't need to be changed */ if (!(adv_instance->flags & flags)) return 0; cancel_adv_timeout(hdev); adv_instance = hci_get_next_instance(hdev, adv_instance->instance); if (!adv_instance) return 0; hci_schedule_adv_instance_sync(hdev, adv_instance->instance, true); return 0; } static int name_changed_sync(struct hci_dev *hdev, void *data) { return adv_expire_sync(hdev, MGMT_ADV_FLAG_LOCAL_NAME); } static void set_name_complete(struct hci_dev *hdev, void *data, int err) { struct mgmt_pending_cmd *cmd = data; struct mgmt_cp_set_local_name *cp = cmd->param; u8 status = mgmt_status(err); bt_dev_dbg(hdev, "err %d", err); if (cmd != pending_find(MGMT_OP_SET_LOCAL_NAME, hdev)) return; if (status) { mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, status); } else { mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0, cp, sizeof(*cp)); if (hci_dev_test_flag(hdev, HCI_LE_ADV)) hci_cmd_sync_queue(hdev, name_changed_sync, NULL, NULL); } mgmt_pending_remove(cmd); } static int set_name_sync(struct hci_dev *hdev, void *data) { if (lmp_bredr_capable(hdev)) { hci_update_name_sync(hdev); hci_update_eir_sync(hdev); } /* The name is stored in the scan response data and so * no need to update the advertising data here. */ if (lmp_le_capable(hdev) && hci_dev_test_flag(hdev, HCI_ADVERTISING)) hci_update_scan_rsp_data_sync(hdev, hdev->cur_adv_instance); return 0; } static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) { struct mgmt_cp_set_local_name *cp = data; struct mgmt_pending_cmd *cmd; int err; bt_dev_dbg(hdev, "sock %p", sk); hci_dev_lock(hdev); /* If the old values are the same as the new ones just return a * direct command complete event. */ if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) && !memcmp(hdev->short_name, cp->short_name, sizeof(hdev->short_name))) { err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0, data, len); goto failed; } memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name)); if (!hdev_is_powered(hdev)) { memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name)); err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0, data, len); if (err < 0) goto failed; err = mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data, len, HCI_MGMT_LOCAL_NAME_EVENTS, sk); ext_info_changed(hdev, sk); goto failed; } cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len); if (!cmd) err = -ENOMEM; else err = hci_cmd_sync_queue(hdev, set_name_sync, cmd, set_name_complete); if (err < 0) { err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, MGMT_STATUS_FAILED); if (cmd) mgmt_pending_remove(cmd); goto failed; } memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name)); failed: hci_dev_unlock(hdev); return err; } static int appearance_changed_sync(struct hci_dev *hdev, void *data) { return adv_expire_sync(hdev, MGMT_ADV_FLAG_APPEARANCE); } static int set_appearance(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) { struct mgmt_cp_set_appearance *cp = data; u16 appearance; int err; bt_dev_dbg(hdev, "sock %p", sk); if (!lmp_le_capable(hdev)) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_APPEARANCE, MGMT_STATUS_NOT_SUPPORTED); appearance = le16_to_cpu(cp->appearance); hci_dev_lock(hdev); if (hdev->appearance != appearance) { hdev->appearance = appearance; if (hci_dev_test_flag(hdev, HCI_LE_ADV)) hci_cmd_sync_queue(hdev, appearance_changed_sync, NULL, NULL); ext_info_changed(hdev, sk); } err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_APPEARANCE, 0, NULL, 0); hci_dev_unlock(hdev); return err; } static int get_phy_configuration(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) { struct mgmt_rp_get_phy_configuration rp; bt_dev_dbg(hdev, "sock %p", sk); hci_dev_lock(hdev); memset(&rp, 0, sizeof(rp)); rp.supported_phys = cpu_to_le32(get_supported_phys(hdev)); rp.selected_phys = cpu_to_le32(get_selected_phys(hdev)); rp.configurable_phys = cpu_to_le32(get_configurable_phys(hdev)); hci_dev_unlock(hdev); return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_PHY_CONFIGURATION, 0, &rp, sizeof(rp)); } int mgmt_phy_configuration_changed(struct hci_dev *hdev, struct sock *skip) { struct mgmt_ev_phy_configuration_changed ev; memset(&ev, 0, sizeof(ev)); ev.selected_phys = cpu_to_le32(get_selected_phys(hdev)); return mgmt_event(MGMT_EV_PHY_CONFIGURATION_CHANGED, hdev, &ev, sizeof(ev), skip); } static void set_default_phy_complete(struct hci_dev *hdev, void *data, int err) { struct mgmt_pending_cmd *cmd = data; struct sk_buff *skb = cmd->skb; u8 status = mgmt_status(err); if (cmd != pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev)) return; if (!status) { if (!skb) status = MGMT_STATUS_FAILED; else if (IS_ERR(skb)) status = mgmt_status(PTR_ERR(skb)); else status = mgmt_status(skb->data[0]); } bt_dev_dbg(hdev, "status %d", status); if (status) { mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_PHY_CONFIGURATION, status); } else { mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_PHY_CONFIGURATION, 0, NULL, 0); mgmt_phy_configuration_changed(hdev, cmd->sk); } if (skb && !IS_ERR(skb)) kfree_skb(skb); mgmt_pending_remove(cmd); } static int set_default_phy_sync(struct hci_dev *hdev, void *data) { struct mgmt_pending_cmd *cmd = data; struct mgmt_cp_set_phy_configuration *cp = cmd->param; struct hci_cp_le_set_default_phy cp_phy; u32 selected_phys = __le32_to_cpu(cp->selected_phys); memset(&cp_phy, 0, sizeof(cp_phy)); if (!(selected_phys & MGMT_PHY_LE_TX_MASK)) cp_phy.all_phys |= 0x01; if (!(selected_phys & MGMT_PHY_LE_RX_MASK)) cp_phy.all_phys |= 0x02; if (selected_phys & MGMT_PHY_LE_1M_TX) cp_phy.tx_phys |= HCI_LE_SET_PHY_1M; if (selected_phys & MGMT_PHY_LE_2M_TX) cp_phy.tx_phys |= HCI_LE_SET_PHY_2M; if (selected_phys & MGMT_PHY_LE_CODED_TX) cp_phy.tx_phys |= HCI_LE_SET_PHY_CODED; if (selected_phys & MGMT_PHY_LE_1M_RX) cp_phy.rx_phys |= HCI_LE_SET_PHY_1M; if (selected_phys & MGMT_PHY_LE_2M_RX) cp_phy.rx_phys |= HCI_LE_SET_PHY_2M; if (selected_phys & MGMT_PHY_LE_CODED_RX) cp_phy.rx_phys |= HCI_LE_SET_PHY_CODED; cmd->skb = __hci_cmd_sync(hdev, HCI_OP_LE_SET_DEFAULT_PHY, sizeof(cp_phy), &cp_phy, HCI_CMD_TIMEOUT); return 0; } static int set_phy_configuration(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) { struct mgmt_cp_set_phy_configuration *cp = data; struct mgmt_pending_cmd *cmd; u32 selected_phys, configurable_phys, supported_phys, unconfigure_phys; u16 pkt_type = (HCI_DH1 | HCI_DM1); bool changed = false; int err; bt_dev_dbg(hdev, "sock %p", sk); configurable_phys = get_configurable_phys(hdev); supported_phys = get_supported_phys(hdev); selected_phys = __le32_to_cpu(cp->selected_phys); if (selected_phys & ~supported_phys) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PHY_CONFIGURATION, MGMT_STATUS_INVALID_PARAMS); unconfigure_phys = supported_phys & ~configurable_phys; if ((selected_phys & unconfigure_phys) != unconfigure_phys) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PHY_CONFIGURATION, MGMT_STATUS_INVALID_PARAMS); if (selected_phys == get_selected_phys(hdev)) return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_PHY_CONFIGURATION, 0, NULL, 0); hci_dev_lock(hdev); if (!hdev_is_powered(hdev)) { err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PHY_CONFIGURATION, MGMT_STATUS_REJECTED); goto unlock; } if (pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev)) { err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PHY_CONFIGURATION, MGMT_STATUS_BUSY); goto unlock; } if (selected_phys & MGMT_PHY_BR_1M_3SLOT) pkt_type |= (HCI_DH3 | HCI_DM3); else pkt_type &= ~(HCI_DH3 | HCI_DM3); if (selected_phys & MGMT_PHY_BR_1M_5SLOT) pkt_type |= (HCI_DH5 | HCI_DM5); else pkt_type &= ~(HCI_DH5 | HCI_DM5); if (selected_phys & MGMT_PHY_EDR_2M_1SLOT) pkt_type &= ~HCI_2DH1; else pkt_type |= HCI_2DH1; if (selected_phys & MGMT_PHY_EDR_2M_3SLOT) pkt_type &= ~HCI_2DH3; else pkt_type |= HCI_2DH3; if (selected_phys & MGMT_PHY_EDR_2M_5SLOT) pkt_type &= ~HCI_2DH5; else pkt_type |= HCI_2DH5; if (selected_phys & MGMT_PHY_EDR_3M_1SLOT) pkt_type &= ~HCI_3DH1; else pkt_type |= HCI_3DH1; if (selected_phys & MGMT_PHY_EDR_3M_3SLOT) pkt_type &= ~HCI_3DH3; else pkt_type |= HCI_3DH3; if (selected_phys & MGMT_PHY_EDR_3M_5SLOT) pkt_type &= ~HCI_3DH5; else pkt_type |= HCI_3DH5; if (pkt_type != hdev->pkt_type) { hdev->pkt_type = pkt_type; changed = true; } if ((selected_phys & MGMT_PHY_LE_MASK) == (get_selected_phys(hdev) & MGMT_PHY_LE_MASK)) { if (changed) mgmt_phy_configuration_changed(hdev, sk); err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_PHY_CONFIGURATION, 0, NULL, 0); goto unlock; } cmd = mgmt_pending_add(sk, MGMT_OP_SET_PHY_CONFIGURATION, hdev, data, len); if (!cmd) err = -ENOMEM; else err = hci_cmd_sync_queue(hdev, set_default_phy_sync, cmd, set_default_phy_complete); if (err < 0) { err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PHY_CONFIGURATION, MGMT_STATUS_FAILED); if (cmd) mgmt_pending_remove(cmd); } unlock: hci_dev_unlock(hdev); return err; } static int set_blocked_keys(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) { int err = MGMT_STATUS_SUCCESS; struct mgmt_cp_set_blocked_keys *keys = data; const u16 max_key_count = ((U16_MAX - sizeof(*keys)) / sizeof(struct mgmt_blocked_key_info)); u16 key_count, expected_len; int i; bt_dev_dbg(hdev, "sock %p", sk); key_count = __le16_to_cpu(keys->key_count); if (key_count > max_key_count) { bt_dev_err(hdev, "too big key_count value %u", key_count); return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS, MGMT_STATUS_INVALID_PARAMS); } expected_len = struct_size(keys, keys, key_count); if (expected_len != len) { bt_dev_err(hdev, "expected %u bytes, got %u bytes", expected_len, len); return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS, MGMT_STATUS_INVALID_PARAMS); } hci_dev_lock(hdev); hci_blocked_keys_clear(hdev); for (i = 0; i < key_count; ++i) { struct blocked_key *b = kzalloc(sizeof(*b), GFP_KERNEL); if (!b) { err = MGMT_STATUS_NO_RESOURCES; break; } b->type = keys->keys[i].type; memcpy(b->val, keys->keys[i].val, sizeof(b->val)); list_add_rcu(&b->list, &hdev->blocked_keys); } hci_dev_unlock(hdev); return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS, err, NULL, 0); } static int set_wideband_speech(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) { struct mgmt_mode *cp = data; int err; bool changed = false; bt_dev_dbg(hdev, "sock %p", sk); if (!test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks)) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_WIDEBAND_SPEECH, MGMT_STATUS_NOT_SUPPORTED); if (cp->val != 0x00 && cp->val != 0x01) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_WIDEBAND_SPEECH, MGMT_STATUS_INVALID_PARAMS); hci_dev_lock(hdev); if (hdev_is_powered(hdev) && !!cp->val != hci_dev_test_flag(hdev, HCI_WIDEBAND_SPEECH_ENABLED)) { err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_WIDEBAND_SPEECH, MGMT_STATUS_REJECTED); goto unlock; } if (cp->val) changed = !hci_dev_test_and_set_flag(hdev, HCI_WIDEBAND_SPEECH_ENABLED); else changed = hci_dev_test_and_clear_flag(hdev, HCI_WIDEBAND_SPEECH_ENABLED); err = send_settings_rsp(sk, MGMT_OP_SET_WIDEBAND_SPEECH, hdev); if (err < 0) goto unlock; if (changed) err = new_settings(hdev, sk); unlock: hci_dev_unlock(hdev); return err; } static int read_controller_cap(struct sock *sk, struct hci_dev *hdev, void *data, u16 data_len) { char buf[20]; struct mgmt_rp_read_controller_cap *rp = (void *)buf; u16 cap_len = 0; u8 flags = 0; u8 tx_power_range[2]; bt_dev_dbg(hdev, "sock %p", sk); memset(&buf, 0, sizeof(buf)); hci_dev_lock(hdev); /* When the Read Simple Pairing Options command is supported, then * the remote public key validation is supported. * * Alternatively, when Microsoft extensions are available, they can * indicate support for public key validation as well. */ if ((hdev->commands[41] & 0x08) || msft_curve_validity(hdev)) flags |= 0x01; /* Remote public key validation (BR/EDR) */ flags |= 0x02; /* Remote public key validation (LE) */ /* When the Read Encryption Key Size command is supported, then the * encryption key size is enforced. */ if (hdev->commands[20] & 0x10) flags |= 0x04; /* Encryption key size enforcement (BR/EDR) */ flags |= 0x08; /* Encryption key size enforcement (LE) */ cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_SEC_FLAGS, &flags, 1); /* When the Read Simple Pairing Options command is supported, then * also max encryption key size information is provided. */ if (hdev->commands[41] & 0x08) cap_len = eir_append_le16(rp->cap, cap_len, MGMT_CAP_MAX_ENC_KEY_SIZE, hdev->max_enc_key_size); cap_len = eir_append_le16(rp->cap, cap_len, MGMT_CAP_SMP_MAX_ENC_KEY_SIZE, SMP_MAX_ENC_KEY_SIZE); /* Append the min/max LE tx power parameters if we were able to fetch * it from the controller */ if (hdev->commands[38] & 0x80) { memcpy(&tx_power_range[0], &hdev->min_le_tx_power, 1); memcpy(&tx_power_range[1], &hdev->max_le_tx_power, 1); cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_LE_TX_PWR, tx_power_range, 2); } rp->cap_len = cpu_to_le16(cap_len); hci_dev_unlock(hdev); return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONTROLLER_CAP, 0, rp, sizeof(*rp) + cap_len); } #ifdef CONFIG_BT_FEATURE_DEBUG /* d4992530-b9ec-469f-ab01-6c481c47da1c */ static const u8 debug_uuid[16] = { 0x1c, 0xda, 0x47, 0x1c, 0x48, 0x6c, 0x01, 0xab, 0x9f, 0x46, 0xec, 0xb9, 0x30, 0x25, 0x99, 0xd4, }; #endif /* 330859bc-7506-492d-9370-9a6f0614037f */ static const u8 quality_report_uuid[16] = { 0x7f, 0x03, 0x14, 0x06, 0x6f, 0x9a, 0x70, 0x93, 0x2d, 0x49, 0x06, 0x75, 0xbc, 0x59, 0x08, 0x33, }; /* a6695ace-ee7f-4fb9-881a-5fac66c629af */ static const u8 offload_codecs_uuid[16] = { 0xaf, 0x29, 0xc6, 0x66, 0xac, 0x5f, 0x1a, 0x88, 0xb9, 0x4f, 0x7f, 0xee, 0xce, 0x5a, 0x69, 0xa6, }; /* 671b10b5-42c0-4696-9227-eb28d1b049d6 */ static const u8 le_simultaneous_roles_uuid[16] = { 0xd6, 0x49, 0xb0, 0xd1, 0x28, 0xeb, 0x27, 0x92, 0x96, 0x46, 0xc0, 0x42, 0xb5, 0x10, 0x1b, 0x67, }; /* 15c0a148-c273-11ea-b3de-0242ac130004 */ static const u8 rpa_resolution_uuid[16] = { 0x04, 0x00, 0x13, 0xac, 0x42, 0x02, 0xde, 0xb3, 0xea, 0x11, 0x73, 0xc2, 0x48, 0xa1, 0xc0, 0x15, }; /* 6fbaf188-05e0-496a-9885-d6ddfdb4e03e */ static const u8 iso_socket_uuid[16] = { 0x3e, 0xe0, 0xb4, 0xfd, 0xdd, 0xd6, 0x85, 0x98, 0x6a, 0x49, 0xe0, 0x05, 0x88, 0xf1, 0xba, 0x6f, }; /* 2ce463d7-7a03-4d8d-bf05-5f24e8f36e76 */ static const u8 mgmt_mesh_uuid[16] = { 0x76, 0x6e, 0xf3, 0xe8, 0x24, 0x5f, 0x05, 0xbf, 0x8d, 0x4d, 0x03, 0x7a, 0xd7, 0x63, 0xe4, 0x2c, }; static int read_exp_features_info(struct sock *sk, struct hci_dev *hdev, void *data, u16 data_len) { struct mgmt_rp_read_exp_features_info *rp; size_t len; u16 idx = 0; u32 flags; int status; bt_dev_dbg(hdev, "sock %p", sk); /* Enough space for 7 features */ len = sizeof(*rp) + (sizeof(rp->features[0]) * 7); rp = kzalloc(len, GFP_KERNEL); if (!rp) return -ENOMEM; #ifdef CONFIG_BT_FEATURE_DEBUG if (!hdev) { flags = bt_dbg_get() ? BIT(0) : 0; memcpy(rp->features[idx].uuid, debug_uuid, 16); rp->features[idx].flags = cpu_to_le32(flags); idx++; } #endif if (hdev && hci_dev_le_state_simultaneous(hdev)) { if (hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES)) flags = BIT(0); else flags = 0; memcpy(rp->features[idx].uuid, le_simultaneous_roles_uuid, 16); rp->features[idx].flags = cpu_to_le32(flags); idx++; } if (hdev && ll_privacy_capable(hdev)) { if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY)) flags = BIT(0) | BIT(1); else flags = BIT(1); memcpy(rp->features[idx].uuid, rpa_resolution_uuid, 16); rp->features[idx].flags = cpu_to_le32(flags); idx++; } if (hdev && (aosp_has_quality_report(hdev) || hdev->set_quality_report)) { if (hci_dev_test_flag(hdev, HCI_QUALITY_REPORT)) flags = BIT(0); else flags = 0; memcpy(rp->features[idx].uuid, quality_report_uuid, 16); rp->features[idx].flags = cpu_to_le32(flags); idx++; } if (hdev && hdev->get_data_path_id) { if (hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED)) flags = BIT(0); else flags = 0; memcpy(rp->features[idx].uuid, offload_codecs_uuid, 16); rp->features[idx].flags = cpu_to_le32(flags); idx++; } if (IS_ENABLED(CONFIG_BT_LE)) { flags = iso_enabled() ? BIT(0) : 0; memcpy(rp->features[idx].uuid, iso_socket_uuid, 16); rp->features[idx].flags = cpu_to_le32(flags); idx++; } if (hdev && lmp_le_capable(hdev)) { if (hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL)) flags = BIT(0); else flags = 0; memcpy(rp->features[idx].uuid, mgmt_mesh_uuid, 16); rp->features[idx].flags = cpu_to_le32(flags); idx++; } rp->feature_count = cpu_to_le16(idx); /* After reading the experimental features information, enable * the events to update client on any future change. */ hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS); status = mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE, MGMT_OP_READ_EXP_FEATURES_INFO, 0, rp, sizeof(*rp) + (20 * idx)); kfree(rp); return status; } static int exp_ll_privacy_feature_changed(bool enabled, struct hci_dev *hdev, struct sock *skip) { struct mgmt_ev_exp_feature_changed ev; memset(&ev, 0, sizeof(ev)); memcpy(ev.uuid, rpa_resolution_uuid, 16); ev.flags = cpu_to_le32((enabled ? BIT(0) : 0) | BIT(1)); // Do we need to be atomic with the conn_flags? if (enabled && privacy_mode_capable(hdev)) hdev->conn_flags |= HCI_CONN_FLAG_DEVICE_PRIVACY; else hdev->conn_flags &= ~HCI_CONN_FLAG_DEVICE_PRIVACY; return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev, &ev, sizeof(ev), HCI_MGMT_EXP_FEATURE_EVENTS, skip); } static int exp_feature_changed(struct hci_dev *hdev, const u8 *uuid, bool enabled, struct sock *skip) { struct mgmt_ev_exp_feature_changed ev; memset(&ev, 0, sizeof(ev)); memcpy(ev.uuid, uuid, 16); ev.flags = cpu_to_le32(enabled ? BIT(0) : 0); return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev, &ev, sizeof(ev), HCI_MGMT_EXP_FEATURE_EVENTS, skip); } #define EXP_FEAT(_uuid, _set_func) \ { \ .uuid = _uuid, \ .set_func = _set_func, \ } /* The zero key uuid is special. Multiple exp features are set through it. */ static int set_zero_key_func(struct sock *sk, struct hci_dev *hdev, struct mgmt_cp_set_exp_feature *cp, u16 data_len) { struct mgmt_rp_set_exp_feature rp; memset(rp.uuid, 0, 16); rp.flags = cpu_to_le32(0); #ifdef CONFIG_BT_FEATURE_DEBUG if (!hdev) { bool changed = bt_dbg_get(); bt_dbg_set(false); if (changed) exp_feature_changed(NULL, ZERO_KEY, false, sk); } #endif if (hdev && use_ll_privacy(hdev) && !hdev_is_powered(hdev)) { bool changed; changed = hci_dev_test_and_clear_flag(hdev, HCI_ENABLE_LL_PRIVACY); if (changed) exp_feature_changed(hdev, rpa_resolution_uuid, false, sk); } hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS); return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE, MGMT_OP_SET_EXP_FEATURE, 0, &rp, sizeof(rp)); } #ifdef CONFIG_BT_FEATURE_DEBUG static int set_debug_func(struct sock *sk, struct hci_dev *hdev, struct mgmt_cp_set_exp_feature *cp, u16 data_len) { struct mgmt_rp_set_exp_feature rp; bool val, changed; int err; /* Command requires to use the non-controller index */ if (hdev) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXP_FEATURE, MGMT_STATUS_INVALID_INDEX); /* Parameters are limited to a single octet */ if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1) return mgmt_cmd_status(sk, MGMT_INDEX_NONE, MGMT_OP_SET_EXP_FEATURE, MGMT_STATUS_INVALID_PARAMS); /* Only boolean on/off is supported */ if (cp->param[0] != 0x00 && cp->param[0] != 0x01) return mgmt_cmd_status(sk, MGMT_INDEX_NONE, MGMT_OP_SET_EXP_FEATURE, MGMT_STATUS_INVALID_PARAMS); val = !!cp->param[0]; changed = val ? !bt_dbg_get() : bt_dbg_get(); bt_dbg_set(val); memcpy(rp.uuid, debug_uuid, 16); rp.flags = cpu_to_le32(val ? BIT(0) : 0); hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS); err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_SET_EXP_FEATURE, 0, &rp, sizeof(rp)); if (changed) exp_feature_changed(hdev, debug_uuid, val, sk); return err; } #endif static int set_mgmt_mesh_func(struct sock *sk, struct hci_dev *hdev, struct mgmt_cp_set_exp_feature *cp, u16 data_len) { struct mgmt_rp_set_exp_feature rp; bool val, changed; int err; /* Command requires to use the controller index */ if (!hdev) return mgmt_cmd_status(sk, MGMT_INDEX_NONE, MGMT_OP_SET_EXP_FEATURE, MGMT_STATUS_INVALID_INDEX); /* Parameters are limited to a single octet */ if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXP_FEATURE, MGMT_STATUS_INVALID_PARAMS); /* Only boolean on/off is supported */ if (cp->param[0] != 0x00 && cp->param[0] != 0x01) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXP_FEATURE, MGMT_STATUS_INVALID_PARAMS); val = !!cp->param[0]; if (val) { changed = !hci_dev_test_and_set_flag(hdev, HCI_MESH_EXPERIMENTAL); } else { hci_dev_clear_flag(hdev, HCI_MESH); changed = hci_dev_test_and_clear_flag(hdev, HCI_MESH_EXPERIMENTAL); } memcpy(rp.uuid, mgmt_mesh_uuid, 16); rp.flags = cpu_to_le32(val ? BIT(0) : 0); hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS); err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_EXP_FEATURE, 0, &rp, sizeof(rp)); if (changed) exp_feature_changed(hdev, mgmt_mesh_uuid, val, sk); return err; } static int set_rpa_resolution_func(struct sock *sk, struct hci_dev *hdev, struct mgmt_cp_set_exp_feature *cp, u16 data_len) { struct mgmt_rp_set_exp_feature rp; bool val, changed; int err; u32 flags; /* Command requires to use the controller index */ if (!hdev) return mgmt_cmd_status(sk, MGMT_INDEX_NONE, MGMT_OP_SET_EXP_FEATURE, MGMT_STATUS_INVALID_INDEX); /* Changes can only be made when controller is powered down */ if (hdev_is_powered(hdev)) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXP_FEATURE, MGMT_STATUS_REJECTED); /* Parameters are limited to a single octet */ if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXP_FEATURE, MGMT_STATUS_INVALID_PARAMS); /* Only boolean on/off is supported */ if (cp->param[0] != 0x00 && cp->param[0] != 0x01) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXP_FEATURE, MGMT_STATUS_INVALID_PARAMS); val = !!cp->param[0]; if (val) { changed = !hci_dev_test_and_set_flag(hdev, HCI_ENABLE_LL_PRIVACY); hci_dev_clear_flag(hdev, HCI_ADVERTISING); /* Enable LL privacy + supported settings changed */ flags = BIT(0) | BIT(1); } else { changed = hci_dev_test_and_clear_flag(hdev, HCI_ENABLE_LL_PRIVACY); /* Disable LL privacy + supported settings changed */ flags = BIT(1); } memcpy(rp.uuid, rpa_resolution_uuid, 16); rp.flags = cpu_to_le32(flags); hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS); err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_EXP_FEATURE, 0, &rp, sizeof(rp)); if (changed) exp_ll_privacy_feature_changed(val, hdev, sk); return err; } static int set_quality_report_func(struct sock *sk, struct hci_dev *hdev, struct mgmt_cp_set_exp_feature *cp, u16 data_len) { struct mgmt_rp_set_exp_feature rp; bool val, changed; int err; /* Command requires to use a valid controller index */ if (!hdev) return mgmt_cmd_status(sk, MGMT_INDEX_NONE, MGMT_OP_SET_EXP_FEATURE, MGMT_STATUS_INVALID_INDEX); /* Parameters are limited to a single octet */ if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXP_FEATURE, MGMT_STATUS_INVALID_PARAMS); /* Only boolean on/off is supported */ if (cp->param[0] != 0x00 && cp->param[0] != 0x01) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXP_FEATURE, MGMT_STATUS_INVALID_PARAMS); hci_req_sync_lock(hdev); val = !!cp->param[0]; changed = (val != hci_dev_test_flag(hdev, HCI_QUALITY_REPORT)); if (!aosp_has_quality_report(hdev) && !hdev->set_quality_report) { err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXP_FEATURE, MGMT_STATUS_NOT_SUPPORTED); goto unlock_quality_report; } if (changed) { if (hdev->set_quality_report) err = hdev->set_quality_report(hdev, val); else err = aosp_set_quality_report(hdev, val); if (err) { err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXP_FEATURE, MGMT_STATUS_FAILED); goto unlock_quality_report; } if (val) hci_dev_set_flag(hdev, HCI_QUALITY_REPORT); else hci_dev_clear_flag(hdev, HCI_QUALITY_REPORT); } bt_dev_dbg(hdev, "quality report enable %d changed %d", val, changed); memcpy(rp.uuid, quality_report_uuid, 16); rp.flags = cpu_to_le32(val ? BIT(0) : 0); hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS); err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_EXP_FEATURE, 0, &rp, sizeof(rp)); if (changed) exp_feature_changed(hdev, quality_report_uuid, val, sk); unlock_quality_report: hci_req_sync_unlock(hdev); return err; } static int set_offload_codec_func(struct sock *sk, struct hci_dev *hdev, struct mgmt_cp_set_exp_feature *cp, u16 data_len) { bool val, changed; int err; struct mgmt_rp_set_exp_feature rp; /* Command requires to use a valid controller index */ if (!hdev) return mgmt_cmd_status(sk, MGMT_INDEX_NONE, MGMT_OP_SET_EXP_FEATURE, MGMT_STATUS_INVALID_INDEX); /* Parameters are limited to a single octet */ if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXP_FEATURE, MGMT_STATUS_INVALID_PARAMS); /* Only boolean on/off is supported */ if (cp->param[0] != 0x00 && cp->param[0] != 0x01) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXP_FEATURE, MGMT_STATUS_INVALID_PARAMS); val = !!cp->param[0]; changed = (val != hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED)); if (!hdev->get_data_path_id) { return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXP_FEATURE, MGMT_STATUS_NOT_SUPPORTED); } if (changed) { if (val) hci_dev_set_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED); else hci_dev_clear_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED); } bt_dev_info(hdev, "offload codecs enable %d changed %d", val, changed); memcpy(rp.uuid, offload_codecs_uuid, 16); rp.flags = cpu_to_le32(val ? BIT(0) : 0); hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS); err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_EXP_FEATURE, 0, &rp, sizeof(rp)); if (changed) exp_feature_changed(hdev, offload_codecs_uuid, val, sk); return err; } static int set_le_simultaneous_roles_func(struct sock *sk, struct hci_dev *hdev, struct mgmt_cp_set_exp_feature *cp, u16 data_len) { bool val, changed; int err; struct mgmt_rp_set_exp_feature rp; /* Command requires to use a valid controller index */ if (!hdev) return mgmt_cmd_status(sk, MGMT_INDEX_NONE, MGMT_OP_SET_EXP_FEATURE, MGMT_STATUS_INVALID_INDEX); /* Parameters are limited to a single octet */ if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXP_FEATURE, MGMT_STATUS_INVALID_PARAMS); /* Only boolean on/off is supported */ if (cp->param[0] != 0x00 && cp->param[0] != 0x01) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXP_FEATURE, MGMT_STATUS_INVALID_PARAMS); val = !!cp->param[0]; changed = (val != hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES)); if (!hci_dev_le_state_simultaneous(hdev)) { return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXP_FEATURE, MGMT_STATUS_NOT_SUPPORTED); } if (changed) { if (val) hci_dev_set_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES); else hci_dev_clear_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES); } bt_dev_info(hdev, "LE simultaneous roles enable %d changed %d", val, changed); memcpy(rp.uuid, le_simultaneous_roles_uuid, 16); rp.flags = cpu_to_le32(val ? BIT(0) : 0); hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS); err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_EXP_FEATURE, 0, &rp, sizeof(rp)); if (changed) exp_feature_changed(hdev, le_simultaneous_roles_uuid, val, sk); return err; } #ifdef CONFIG_BT_LE static int set_iso_socket_func(struct sock *sk, struct hci_dev *hdev, struct mgmt_cp_set_exp_feature *cp, u16 data_len) { struct mgmt_rp_set_exp_feature rp; bool val, changed = false; int err; /* Command requires to use the non-controller index */ if (hdev) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXP_FEATURE, MGMT_STATUS_INVALID_INDEX); /* Parameters are limited to a single octet */ if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1) return mgmt_cmd_status(sk, MGMT_INDEX_NONE, MGMT_OP_SET_EXP_FEATURE, MGMT_STATUS_INVALID_PARAMS); /* Only boolean on/off is supported */ if (cp->param[0] != 0x00 && cp->param[0] != 0x01) return mgmt_cmd_status(sk, MGMT_INDEX_NONE, MGMT_OP_SET_EXP_FEATURE, MGMT_STATUS_INVALID_PARAMS); val = cp->param[0] ? true : false; if (val) err = iso_init(); else err = iso_exit(); if (!err) changed = true; memcpy(rp.uuid, iso_socket_uuid, 16); rp.flags = cpu_to_le32(val ? BIT(0) : 0); hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS); err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_SET_EXP_FEATURE, 0, &rp, sizeof(rp)); if (changed) exp_feature_changed(hdev, iso_socket_uuid, val, sk); return err; } #endif static const struct mgmt_exp_feature { const u8 *uuid; int (*set_func)(struct sock *sk, struct hci_dev *hdev, struct mgmt_cp_set_exp_feature *cp, u16 data_len); } exp_features[] = { EXP_FEAT(ZERO_KEY, set_zero_key_func), #ifdef CONFIG_BT_FEATURE_DEBUG EXP_FEAT(debug_uuid, set_debug_func), #endif EXP_FEAT(mgmt_mesh_uuid, set_mgmt_mesh_func), EXP_FEAT(rpa_resolution_uuid, set_rpa_resolution_func), EXP_FEAT(quality_report_uuid, set_quality_report_func), EXP_FEAT(offload_codecs_uuid, set_offload_codec_func), EXP_FEAT(le_simultaneous_roles_uuid, set_le_simultaneous_roles_func), #ifdef CONFIG_BT_LE EXP_FEAT(iso_socket_uuid, set_iso_socket_func), #endif /* end with a null feature */ EXP_FEAT(NULL, NULL) }; static int set_exp_feature(struct sock *sk, struct hci_dev *hdev, void *data, u16 data_len) { struct mgmt_cp_set_exp_feature *cp = data; size_t i = 0; bt_dev_dbg(hdev, "sock %p", sk); for (i = 0; exp_features[i].uuid; i++) { if (!memcmp(cp->uuid, exp_features[i].uuid, 16)) return exp_features[i].set_func(sk, hdev, cp, data_len); } return mgmt_cmd_status(sk, hdev ? hdev->id : MGMT_INDEX_NONE, MGMT_OP_SET_EXP_FEATURE, MGMT_STATUS_NOT_SUPPORTED); } static u32 get_params_flags(struct hci_dev *hdev, struct hci_conn_params *params) { u32 flags = hdev->conn_flags; /* Devices using RPAs can only be programmed in the acceptlist if * LL Privacy has been enable otherwise they cannot mark * HCI_CONN_FLAG_REMOTE_WAKEUP. */ if ((flags & HCI_CONN_FLAG_REMOTE_WAKEUP) && !use_ll_privacy(hdev) && hci_find_irk_by_addr(hdev, &params->addr, params->addr_type)) flags &= ~HCI_CONN_FLAG_REMOTE_WAKEUP; return flags; } static int get_device_flags(struct sock *sk, struct hci_dev *hdev, void *data, u16 data_len) { struct mgmt_cp_get_device_flags *cp = data; struct mgmt_rp_get_device_flags rp; struct bdaddr_list_with_flags *br_params; struct hci_conn_params *params; u32 supported_flags; u32 current_flags = 0; u8 status = MGMT_STATUS_INVALID_PARAMS; bt_dev_dbg(hdev, "Get device flags %pMR (type 0x%x)\n", &cp->addr.bdaddr, cp->addr.type); hci_dev_lock(hdev); supported_flags = hdev->conn_flags; memset(&rp, 0, sizeof(rp)); if (cp->addr.type == BDADDR_BREDR) { br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list, &cp->addr.bdaddr, cp->addr.type); if (!br_params) goto done; current_flags = br_params->flags; } else { params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, le_addr_type(cp->addr.type)); if (!params) goto done; supported_flags = get_params_flags(hdev, params); current_flags = params->flags; } bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr); rp.addr.type = cp->addr.type; rp.supported_flags = cpu_to_le32(supported_flags); rp.current_flags = cpu_to_le32(current_flags); status = MGMT_STATUS_SUCCESS; done: hci_dev_unlock(hdev); return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_DEVICE_FLAGS, status, &rp, sizeof(rp)); } static void device_flags_changed(struct sock *sk, struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type, u32 supported_flags, u32 current_flags) { struct mgmt_ev_device_flags_changed ev; bacpy(&ev.addr.bdaddr, bdaddr); ev.addr.type = bdaddr_type; ev.supported_flags = cpu_to_le32(supported_flags); ev.current_flags = cpu_to_le32(current_flags); mgmt_event(MGMT_EV_DEVICE_FLAGS_CHANGED, hdev, &ev, sizeof(ev), sk); } static int set_device_flags(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) { struct mgmt_cp_set_device_flags *cp = data; struct bdaddr_list_with_flags *br_params; struct hci_conn_params *params; u8 status = MGMT_STATUS_INVALID_PARAMS; u32 supported_flags; u32 current_flags = __le32_to_cpu(cp->current_flags); bt_dev_dbg(hdev, "Set device flags %pMR (type 0x%x) = 0x%x", &cp->addr.bdaddr, cp->addr.type, current_flags); // We should take hci_dev_lock() early, I think.. conn_flags can change supported_flags = hdev->conn_flags; if ((supported_flags | current_flags) != supported_flags) { bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)", current_flags, supported_flags); goto done; } hci_dev_lock(hdev); if (cp->addr.type == BDADDR_BREDR) { br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list, &cp->addr.bdaddr, cp->addr.type); if (br_params) { br_params->flags = current_flags; status = MGMT_STATUS_SUCCESS; } else { bt_dev_warn(hdev, "No such BR/EDR device %pMR (0x%x)", &cp->addr.bdaddr, cp->addr.type); } goto unlock; } params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, le_addr_type(cp->addr.type)); if (!params) { bt_dev_warn(hdev, "No such LE device %pMR (0x%x)", &cp->addr.bdaddr, le_addr_type(cp->addr.type)); goto unlock; } supported_flags = get_params_flags(hdev, params); if ((supported_flags | current_flags) != supported_flags) { bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)", current_flags, supported_flags); goto unlock; } WRITE_ONCE(params->flags, current_flags); status = MGMT_STATUS_SUCCESS; /* Update passive scan if HCI_CONN_FLAG_DEVICE_PRIVACY * has been set. */ if (params->flags & HCI_CONN_FLAG_DEVICE_PRIVACY) hci_update_passive_scan(hdev); unlock: hci_dev_unlock(hdev); done: if (status == MGMT_STATUS_SUCCESS) device_flags_changed(sk, hdev, &cp->addr.bdaddr, cp->addr.type, supported_flags, current_flags); return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_FLAGS, status, &cp->addr, sizeof(cp->addr)); } static void mgmt_adv_monitor_added(struct sock *sk, struct hci_dev *hdev, u16 handle) { struct mgmt_ev_adv_monitor_added ev; ev.monitor_handle = cpu_to_le16(handle); mgmt_event(MGMT_EV_ADV_MONITOR_ADDED, hdev, &ev, sizeof(ev), sk); } void mgmt_adv_monitor_removed(struct hci_dev *hdev, u16 handle) { struct mgmt_ev_adv_monitor_removed ev; struct mgmt_pending_cmd *cmd; struct sock *sk_skip = NULL; struct mgmt_cp_remove_adv_monitor *cp; cmd = pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev); if (cmd) { cp = cmd->param; if (cp->monitor_handle) sk_skip = cmd->sk; } ev.monitor_handle = cpu_to_le16(handle); mgmt_event(MGMT_EV_ADV_MONITOR_REMOVED, hdev, &ev, sizeof(ev), sk_skip); } static int read_adv_mon_features(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) { struct adv_monitor *monitor = NULL; struct mgmt_rp_read_adv_monitor_features *rp = NULL; int handle, err; size_t rp_size = 0; __u32 supported = 0; __u32 enabled = 0; __u16 num_handles = 0; __u16 handles[HCI_MAX_ADV_MONITOR_NUM_HANDLES]; BT_DBG("request for %s", hdev->name); hci_dev_lock(hdev); if (msft_monitor_supported(hdev)) supported |= MGMT_ADV_MONITOR_FEATURE_MASK_OR_PATTERNS; idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle) handles[num_handles++] = monitor->handle; hci_dev_unlock(hdev); rp_size = sizeof(*rp) + (num_handles * sizeof(u16)); rp = kmalloc(rp_size, GFP_KERNEL); if (!rp) return -ENOMEM; /* All supported features are currently enabled */ enabled = supported; rp->supported_features = cpu_to_le32(supported); rp->enabled_features = cpu_to_le32(enabled); rp->max_num_handles = cpu_to_le16(HCI_MAX_ADV_MONITOR_NUM_HANDLES); rp->max_num_patterns = HCI_MAX_ADV_MONITOR_NUM_PATTERNS; rp->num_handles = cpu_to_le16(num_handles); if (num_handles) memcpy(&rp->handles, &handles, (num_handles * sizeof(u16))); err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_ADV_MONITOR_FEATURES, MGMT_STATUS_SUCCESS, rp, rp_size); kfree(rp); return err; } static void mgmt_add_adv_patterns_monitor_complete(struct hci_dev *hdev, void *data, int status) { struct mgmt_rp_add_adv_patterns_monitor rp; struct mgmt_pending_cmd *cmd = data; struct adv_monitor *monitor = cmd->user_data; hci_dev_lock(hdev); rp.monitor_handle = cpu_to_le16(monitor->handle); if (!status) { mgmt_adv_monitor_added(cmd->sk, hdev, monitor->handle); hdev->adv_monitors_cnt++; if (monitor->state == ADV_MONITOR_STATE_NOT_REGISTERED) monitor->state = ADV_MONITOR_STATE_REGISTERED; hci_update_passive_scan(hdev); } mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(status), &rp, sizeof(rp)); mgmt_pending_remove(cmd); hci_dev_unlock(hdev); bt_dev_dbg(hdev, "add monitor %d complete, status %d", rp.monitor_handle, status); } static int mgmt_add_adv_patterns_monitor_sync(struct hci_dev *hdev, void *data) { struct mgmt_pending_cmd *cmd = data; struct adv_monitor *monitor = cmd->user_data; return hci_add_adv_monitor(hdev, monitor); } static int __add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev, struct adv_monitor *m, u8 status, void *data, u16 len, u16 op) { struct mgmt_pending_cmd *cmd; int err; hci_dev_lock(hdev); if (status) goto unlock; if (pending_find(MGMT_OP_SET_LE, hdev) || pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) || pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev) || pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev)) { status = MGMT_STATUS_BUSY; goto unlock; } cmd = mgmt_pending_add(sk, op, hdev, data, len); if (!cmd) { status = MGMT_STATUS_NO_RESOURCES; goto unlock; } cmd->user_data = m; err = hci_cmd_sync_queue(hdev, mgmt_add_adv_patterns_monitor_sync, cmd, mgmt_add_adv_patterns_monitor_complete); if (err) { if (err == -ENOMEM) status = MGMT_STATUS_NO_RESOURCES; else status = MGMT_STATUS_FAILED; goto unlock; } hci_dev_unlock(hdev); return 0; unlock: hci_free_adv_monitor(hdev, m); hci_dev_unlock(hdev); return mgmt_cmd_status(sk, hdev->id, op, status); } static void parse_adv_monitor_rssi(struct adv_monitor *m, struct mgmt_adv_rssi_thresholds *rssi) { if (rssi) { m->rssi.low_threshold = rssi->low_threshold; m->rssi.low_threshold_timeout = __le16_to_cpu(rssi->low_threshold_timeout); m->rssi.high_threshold = rssi->high_threshold; m->rssi.high_threshold_timeout = __le16_to_cpu(rssi->high_threshold_timeout); m->rssi.sampling_period = rssi->sampling_period; } else { /* Default values. These numbers are the least constricting * parameters for MSFT API to work, so it behaves as if there * are no rssi parameter to consider. May need to be changed * if other API are to be supported. */ m->rssi.low_threshold = -127; m->rssi.low_threshold_timeout = 60; m->rssi.high_threshold = -127; m->rssi.high_threshold_timeout = 0; m->rssi.sampling_period = 0; } } static u8 parse_adv_monitor_pattern(struct adv_monitor *m, u8 pattern_count, struct mgmt_adv_pattern *patterns) { u8 offset = 0, length = 0; struct adv_pattern *p = NULL; int i; for (i = 0; i < pattern_count; i++) { offset = patterns[i].offset; length = patterns[i].length; if (offset >= HCI_MAX_EXT_AD_LENGTH || length > HCI_MAX_EXT_AD_LENGTH || (offset + length) > HCI_MAX_EXT_AD_LENGTH) return MGMT_STATUS_INVALID_PARAMS; p = kmalloc(sizeof(*p), GFP_KERNEL); if (!p) return MGMT_STATUS_NO_RESOURCES; p->ad_type = patterns[i].ad_type; p->offset = patterns[i].offset; p->length = patterns[i].length; memcpy(p->value, patterns[i].value, p->length); INIT_LIST_HEAD(&p->list); list_add(&p->list, &m->patterns); } return MGMT_STATUS_SUCCESS; } static int add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) { struct mgmt_cp_add_adv_patterns_monitor *cp = data; struct adv_monitor *m = NULL; u8 status = MGMT_STATUS_SUCCESS; size_t expected_size = sizeof(*cp); BT_DBG("request for %s", hdev->name); if (len <= sizeof(*cp)) { status = MGMT_STATUS_INVALID_PARAMS; goto done; } expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern); if (len != expected_size) { status = MGMT_STATUS_INVALID_PARAMS; goto done; } m = kzalloc(sizeof(*m), GFP_KERNEL); if (!m) { status = MGMT_STATUS_NO_RESOURCES; goto done; } INIT_LIST_HEAD(&m->patterns); parse_adv_monitor_rssi(m, NULL); status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns); done: return __add_adv_patterns_monitor(sk, hdev, m, status, data, len, MGMT_OP_ADD_ADV_PATTERNS_MONITOR); } static int add_adv_patterns_monitor_rssi(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) { struct mgmt_cp_add_adv_patterns_monitor_rssi *cp = data; struct adv_monitor *m = NULL; u8 status = MGMT_STATUS_SUCCESS; size_t expected_size = sizeof(*cp); BT_DBG("request for %s", hdev->name); if (len <= sizeof(*cp)) { status = MGMT_STATUS_INVALID_PARAMS; goto done; } expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern); if (len != expected_size) { status = MGMT_STATUS_INVALID_PARAMS; goto done; } m = kzalloc(sizeof(*m), GFP_KERNEL); if (!m) { status = MGMT_STATUS_NO_RESOURCES; goto done; } INIT_LIST_HEAD(&m->patterns); parse_adv_monitor_rssi(m, &cp->rssi); status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns); done: return __add_adv_patterns_monitor(sk, hdev, m, status, data, len, MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI); } static void mgmt_remove_adv_monitor_complete(struct hci_dev *hdev, void *data, int status) { struct mgmt_rp_remove_adv_monitor rp; struct mgmt_pending_cmd *cmd = data; struct mgmt_cp_remove_adv_monitor *cp = cmd->param; hci_dev_lock(hdev); rp.monitor_handle = cp->monitor_handle; if (!status) hci_update_passive_scan(hdev); mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(status), &rp, sizeof(rp)); mgmt_pending_remove(cmd); hci_dev_unlock(hdev); bt_dev_dbg(hdev, "remove monitor %d complete, status %d", rp.monitor_handle, status); } static int mgmt_remove_adv_monitor_sync(struct hci_dev *hdev, void *data) { struct mgmt_pending_cmd *cmd = data; struct mgmt_cp_remove_adv_monitor *cp = cmd->param; u16 handle = __le16_to_cpu(cp->monitor_handle); if (!handle) return hci_remove_all_adv_monitor(hdev); return hci_remove_single_adv_monitor(hdev, handle); } static int remove_adv_monitor(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) { struct mgmt_pending_cmd *cmd; int err, status; hci_dev_lock(hdev); if (pending_find(MGMT_OP_SET_LE, hdev) || pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev) || pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) || pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev)) { status = MGMT_STATUS_BUSY; goto unlock; } cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_ADV_MONITOR, hdev, data, len); if (!cmd) { status = MGMT_STATUS_NO_RESOURCES; goto unlock; } err = hci_cmd_sync_queue(hdev, mgmt_remove_adv_monitor_sync, cmd, mgmt_remove_adv_monitor_complete); if (err) { mgmt_pending_remove(cmd); if (err == -ENOMEM) status = MGMT_STATUS_NO_RESOURCES; else status = MGMT_STATUS_FAILED; goto unlock; } hci_dev_unlock(hdev); return 0; unlock: hci_dev_unlock(hdev); return mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADV_MONITOR, status); } static void read_local_oob_data_complete(struct hci_dev *hdev, void *data, int err) { struct mgmt_rp_read_local_oob_data mgmt_rp; size_t rp_size = sizeof(mgmt_rp); struct mgmt_pending_cmd *cmd = data; struct sk_buff *skb = cmd->skb; u8 status = mgmt_status(err); if (!status) { if (!skb) status = MGMT_STATUS_FAILED; else if (IS_ERR(skb)) status = mgmt_status(PTR_ERR(skb)); else status = mgmt_status(skb->data[0]); } bt_dev_dbg(hdev, "status %d", status); if (status) { mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA, status); goto remove; } memset(&mgmt_rp, 0, sizeof(mgmt_rp)); if (!bredr_sc_enabled(hdev)) { struct hci_rp_read_local_oob_data *rp = (void *) skb->data; if (skb->len < sizeof(*rp)) { mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA, MGMT_STATUS_FAILED); goto remove; } memcpy(mgmt_rp.hash192, rp->hash, sizeof(rp->hash)); memcpy(mgmt_rp.rand192, rp->rand, sizeof(rp->rand)); rp_size -= sizeof(mgmt_rp.hash256) + sizeof(mgmt_rp.rand256); } else { struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data; if (skb->len < sizeof(*rp)) { mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA, MGMT_STATUS_FAILED); goto remove; } memcpy(mgmt_rp.hash192, rp->hash192, sizeof(rp->hash192)); memcpy(mgmt_rp.rand192, rp->rand192, sizeof(rp->rand192)); memcpy(mgmt_rp.hash256, rp->hash256, sizeof(rp->hash256)); memcpy(mgmt_rp.rand256, rp->rand256, sizeof(rp->rand256)); } mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA, MGMT_STATUS_SUCCESS, &mgmt_rp, rp_size); remove: if (skb && !IS_ERR(skb)) kfree_skb(skb); mgmt_pending_free(cmd); } static int read_local_oob_data_sync(struct hci_dev *hdev, void *data) { struct mgmt_pending_cmd *cmd = data; if (bredr_sc_enabled(hdev)) cmd->skb = hci_read_local_oob_data_sync(hdev, true, cmd->sk); else cmd->skb = hci_read_local_oob_data_sync(hdev, false, cmd->sk); if (IS_ERR(cmd->skb)) return PTR_ERR(cmd->skb); else return 0; } static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev, void *data, u16 data_len) { struct mgmt_pending_cmd *cmd; int err; bt_dev_dbg(hdev, "sock %p", sk); hci_dev_lock(hdev); if (!hdev_is_powered(hdev)) { err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA, MGMT_STATUS_NOT_POWERED); goto unlock; } if (!lmp_ssp_capable(hdev)) { err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA, MGMT_STATUS_NOT_SUPPORTED); goto unlock; } cmd = mgmt_pending_new(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0); if (!cmd) err = -ENOMEM; else err = hci_cmd_sync_queue(hdev, read_local_oob_data_sync, cmd, read_local_oob_data_complete); if (err < 0) { err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA, MGMT_STATUS_FAILED); if (cmd) mgmt_pending_free(cmd); } unlock: hci_dev_unlock(hdev); return err; } static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) { struct mgmt_addr_info *addr = data; int err; bt_dev_dbg(hdev, "sock %p", sk); if (!bdaddr_type_is_valid(addr->type)) return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA, MGMT_STATUS_INVALID_PARAMS, addr, sizeof(*addr)); hci_dev_lock(hdev); if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) { struct mgmt_cp_add_remote_oob_data *cp = data; u8 status; if (cp->addr.type != BDADDR_BREDR) { err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA, MGMT_STATUS_INVALID_PARAMS, &cp->addr, sizeof(cp->addr)); goto unlock; } err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type, cp->hash, cp->rand, NULL, NULL); if (err < 0) status = MGMT_STATUS_FAILED; else status = MGMT_STATUS_SUCCESS; err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA, status, &cp->addr, sizeof(cp->addr)); } else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) { struct mgmt_cp_add_remote_oob_ext_data *cp = data; u8 *rand192, *hash192, *rand256, *hash256; u8 status; if (bdaddr_type_is_le(cp->addr.type)) { /* Enforce zero-valued 192-bit parameters as * long as legacy SMP OOB isn't implemented. */ if (memcmp(cp->rand192, ZERO_KEY, 16) || memcmp(cp->hash192, ZERO_KEY, 16)) { err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA, MGMT_STATUS_INVALID_PARAMS, addr, sizeof(*addr)); goto unlock; } rand192 = NULL; hash192 = NULL; } else { /* In case one of the P-192 values is set to zero, * then just disable OOB data for P-192. */ if (!memcmp(cp->rand192, ZERO_KEY, 16) || !memcmp(cp->hash192, ZERO_KEY, 16)) { rand192 = NULL; hash192 = NULL; } else { rand192 = cp->rand192; hash192 = cp->hash192; } } /* In case one of the P-256 values is set to zero, then just * disable OOB data for P-256. */ if (!memcmp(cp->rand256, ZERO_KEY, 16) || !memcmp(cp->hash256, ZERO_KEY, 16)) { rand256 = NULL; hash256 = NULL; } else { rand256 = cp->rand256; hash256 = cp->hash256; } err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type, hash192, rand192, hash256, rand256); if (err < 0) status = MGMT_STATUS_FAILED; else status = MGMT_STATUS_SUCCESS; err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA, status, &cp->addr, sizeof(cp->addr)); } else { bt_dev_err(hdev, "add_remote_oob_data: invalid len of %u bytes", len); err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA, MGMT_STATUS_INVALID_PARAMS); } unlock: hci_dev_unlock(hdev); return err; } static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) { struct mgmt_cp_remove_remote_oob_data *cp = data; u8 status; int err; bt_dev_dbg(hdev, "sock %p", sk); if (cp->addr.type != BDADDR_BREDR) return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA, MGMT_STATUS_INVALID_PARAMS, &cp->addr, sizeof(cp->addr)); hci_dev_lock(hdev); if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) { hci_remote_oob_data_clear(hdev); status = MGMT_STATUS_SUCCESS; goto done; } err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type); if (err < 0) status = MGMT_STATUS_INVALID_PARAMS; else status = MGMT_STATUS_SUCCESS; done: err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA, status, &cp->addr, sizeof(cp->addr)); hci_dev_unlock(hdev); return err; } void mgmt_start_discovery_complete(struct hci_dev *hdev, u8 status) { struct mgmt_pending_cmd *cmd; bt_dev_dbg(hdev, "status %u", status); hci_dev_lock(hdev); cmd = pending_find(MGMT_OP_START_DISCOVERY, hdev); if (!cmd) cmd = pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev); if (!cmd) cmd = pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev); if (cmd) { cmd->cmd_complete(cmd, mgmt_status(status)); mgmt_pending_remove(cmd); } hci_dev_unlock(hdev); } static bool discovery_type_is_valid(struct hci_dev *hdev, uint8_t type, uint8_t *mgmt_status) { switch (type) { case DISCOV_TYPE_LE: *mgmt_status = mgmt_le_support(hdev); if (*mgmt_status) return false; break; case DISCOV_TYPE_INTERLEAVED: *mgmt_status = mgmt_le_support(hdev); if (*mgmt_status) return false; fallthrough; case DISCOV_TYPE_BREDR: *mgmt_status = mgmt_bredr_support(hdev); if (*mgmt_status) return false; break; default: *mgmt_status = MGMT_STATUS_INVALID_PARAMS; return false; } return true; } static void start_discovery_complete(struct hci_dev *hdev, void *data, int err) { struct mgmt_pending_cmd *cmd = data; if (cmd != pending_find(MGMT_OP_START_DISCOVERY, hdev) && cmd != pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev) && cmd != pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev)) return; bt_dev_dbg(hdev, "err %d", err); mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(err), cmd->param, 1); mgmt_pending_remove(cmd); hci_discovery_set_state(hdev, err ? DISCOVERY_STOPPED: DISCOVERY_FINDING); } static int start_discovery_sync(struct hci_dev *hdev, void *data) { return hci_start_discovery_sync(hdev); } static int start_discovery_internal(struct sock *sk, struct hci_dev *hdev, u16 op, void *data, u16 len) { struct mgmt_cp_start_discovery *cp = data; struct mgmt_pending_cmd *cmd; u8 status; int err; bt_dev_dbg(hdev, "sock %p", sk); hci_dev_lock(hdev); if (!hdev_is_powered(hdev)) { err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_NOT_POWERED, &cp->type, sizeof(cp->type)); goto failed; } if (hdev->discovery.state != DISCOVERY_STOPPED || hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) { err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY, &cp->type, sizeof(cp->type)); goto failed; } if (!discovery_type_is_valid(hdev, cp->type, &status)) { err = mgmt_cmd_complete(sk, hdev->id, op, status, &cp->type, sizeof(cp->type)); goto failed; } /* Can't start discovery when it is paused */ if (hdev->discovery_paused) { err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY, &cp->type, sizeof(cp->type)); goto failed; } /* Clear the discovery filter first to free any previously * allocated memory for the UUID list. */ hci_discovery_filter_clear(hdev); hdev->discovery.type = cp->type; hdev->discovery.report_invalid_rssi = false; if (op == MGMT_OP_START_LIMITED_DISCOVERY) hdev->discovery.limited = true; else hdev->discovery.limited = false; cmd = mgmt_pending_add(sk, op, hdev, data, len); if (!cmd) { err = -ENOMEM; goto failed; } err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd, start_discovery_complete); if (err < 0) { mgmt_pending_remove(cmd); goto failed; } hci_discovery_set_state(hdev, DISCOVERY_STARTING); failed: hci_dev_unlock(hdev); return err; } static int start_discovery(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) { return start_discovery_internal(sk, hdev, MGMT_OP_START_DISCOVERY, data, len); } static int start_limited_discovery(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) { return start_discovery_internal(sk, hdev, MGMT_OP_START_LIMITED_DISCOVERY, data, len); } static int start_service_discovery(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) { struct mgmt_cp_start_service_discovery *cp = data; struct mgmt_pending_cmd *cmd; const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16); u16 uuid_count, expected_len; u8 status; int err; bt_dev_dbg(hdev, "sock %p", sk); hci_dev_lock(hdev); if (!hdev_is_powered(hdev)) { err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_START_SERVICE_DISCOVERY, MGMT_STATUS_NOT_POWERED, &cp->type, sizeof(cp->type)); goto failed; } if (hdev->discovery.state != DISCOVERY_STOPPED || hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) { err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_START_SERVICE_DISCOVERY, MGMT_STATUS_BUSY, &cp->type, sizeof(cp->type)); goto failed; } if (hdev->discovery_paused) { err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_START_SERVICE_DISCOVERY, MGMT_STATUS_BUSY, &cp->type, sizeof(cp->type)); goto failed; } uuid_count = __le16_to_cpu(cp->uuid_count); if (uuid_count > max_uuid_count) { bt_dev_err(hdev, "service_discovery: too big uuid_count value %u", uuid_count); err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_START_SERVICE_DISCOVERY, MGMT_STATUS_INVALID_PARAMS, &cp->type, sizeof(cp->type)); goto failed; } expected_len = sizeof(*cp) + uuid_count * 16; if (expected_len != len) { bt_dev_err(hdev, "service_discovery: expected %u bytes, got %u bytes", expected_len, len); err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_START_SERVICE_DISCOVERY, MGMT_STATUS_INVALID_PARAMS, &cp->type, sizeof(cp->type)); goto failed; } if (!discovery_type_is_valid(hdev, cp->type, &status)) { err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_START_SERVICE_DISCOVERY, status, &cp->type, sizeof(cp->type)); goto failed; } cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY, hdev, data, len); if (!cmd) { err = -ENOMEM; goto failed; } /* Clear the discovery filter first to free any previously * allocated memory for the UUID list. */ hci_discovery_filter_clear(hdev); hdev->discovery.result_filtering = true; hdev->discovery.type = cp->type; hdev->discovery.rssi = cp->rssi; hdev->discovery.uuid_count = uuid_count; if (uuid_count > 0) { hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16, GFP_KERNEL); if (!hdev->discovery.uuids) { err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_START_SERVICE_DISCOVERY, MGMT_STATUS_FAILED, &cp->type, sizeof(cp->type)); mgmt_pending_remove(cmd); goto failed; } } err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd, start_discovery_complete); if (err < 0) { mgmt_pending_remove(cmd); goto failed; } hci_discovery_set_state(hdev, DISCOVERY_STARTING); failed: hci_dev_unlock(hdev); return err; } void mgmt_stop_discovery_complete(struct hci_dev *hdev, u8 status) { struct mgmt_pending_cmd *cmd; bt_dev_dbg(hdev, "status %u", status); hci_dev_lock(hdev); cmd = pending_find(MGMT_OP_STOP_DISCOVERY, hdev); if (cmd) { cmd->cmd_complete(cmd, mgmt_status(status)); mgmt_pending_remove(cmd); } hci_dev_unlock(hdev); } static void stop_discovery_complete(struct hci_dev *hdev, void *data, int err) { struct mgmt_pending_cmd *cmd = data; if (cmd != pending_find(MGMT_OP_STOP_DISCOVERY, hdev)) return; bt_dev_dbg(hdev, "err %d", err); mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(err), cmd->param, 1); mgmt_pending_remove(cmd); if (!err) hci_discovery_set_state(hdev, DISCOVERY_STOPPED); } static int stop_discovery_sync(struct hci_dev *hdev, void *data) { return hci_stop_discovery_sync(hdev); } static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) { struct mgmt_cp_stop_discovery *mgmt_cp = data; struct mgmt_pending_cmd *cmd; int err; bt_dev_dbg(hdev, "sock %p", sk); hci_dev_lock(hdev); if (!hci_discovery_active(hdev)) { err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY, MGMT_STATUS_REJECTED, &mgmt_cp->type, sizeof(mgmt_cp->type)); goto unlock; } if (hdev->discovery.type != mgmt_cp->type) { err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY, MGMT_STATUS_INVALID_PARAMS, &mgmt_cp->type, sizeof(mgmt_cp->type)); goto unlock; } cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len); if (!cmd) { err = -ENOMEM; goto unlock; } err = hci_cmd_sync_queue(hdev, stop_discovery_sync, cmd, stop_discovery_complete); if (err < 0) { mgmt_pending_remove(cmd); goto unlock; } hci_discovery_set_state(hdev, DISCOVERY_STOPPING); unlock: hci_dev_unlock(hdev); return err; } static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) { struct mgmt_cp_confirm_name *cp = data; struct inquiry_entry *e; int err; bt_dev_dbg(hdev, "sock %p", sk); hci_dev_lock(hdev); if (!hci_discovery_active(hdev)) { err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, MGMT_STATUS_FAILED, &cp->addr, sizeof(cp->addr)); goto failed; } e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr); if (!e) { err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, MGMT_STATUS_INVALID_PARAMS, &cp->addr, sizeof(cp->addr)); goto failed; } if (cp->name_known) { e->name_state = NAME_KNOWN; list_del(&e->list); } else { e->name_state = NAME_NEEDED; hci_inquiry_cache_update_resolve(hdev, e); } err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0, &cp->addr, sizeof(cp->addr)); failed: hci_dev_unlock(hdev); return err; } static int block_device(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) { struct mgmt_cp_block_device *cp = data; u8 status; int err; bt_dev_dbg(hdev, "sock %p", sk); if (!bdaddr_type_is_valid(cp->addr.type)) return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, MGMT_STATUS_INVALID_PARAMS, &cp->addr, sizeof(cp->addr)); hci_dev_lock(hdev); err = hci_bdaddr_list_add(&hdev->reject_list, &cp->addr.bdaddr, cp->addr.type); if (err < 0) { status = MGMT_STATUS_FAILED; goto done; } mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr), sk); status = MGMT_STATUS_SUCCESS; done: err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status, &cp->addr, sizeof(cp->addr)); hci_dev_unlock(hdev); return err; } static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) { struct mgmt_cp_unblock_device *cp = data; u8 status; int err; bt_dev_dbg(hdev, "sock %p", sk); if (!bdaddr_type_is_valid(cp->addr.type)) return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, MGMT_STATUS_INVALID_PARAMS, &cp->addr, sizeof(cp->addr)); hci_dev_lock(hdev); err = hci_bdaddr_list_del(&hdev->reject_list, &cp->addr.bdaddr, cp->addr.type); if (err < 0) { status = MGMT_STATUS_INVALID_PARAMS; goto done; } mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr), sk); status = MGMT_STATUS_SUCCESS; done: err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status, &cp->addr, sizeof(cp->addr)); hci_dev_unlock(hdev); return err; } static int set_device_id_sync(struct hci_dev *hdev, void *data) { return hci_update_eir_sync(hdev); } static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) { struct mgmt_cp_set_device_id *cp = data; int err; __u16 source; bt_dev_dbg(hdev, "sock %p", sk); source = __le16_to_cpu(cp->source); if (source > 0x0002) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, MGMT_STATUS_INVALID_PARAMS); hci_dev_lock(hdev); hdev->devid_source = source; hdev->devid_vendor = __le16_to_cpu(cp->vendor); hdev->devid_product = __le16_to_cpu(cp->product); hdev->devid_version = __le16_to_cpu(cp->version); err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0, NULL, 0); hci_cmd_sync_queue(hdev, set_device_id_sync, NULL, NULL); hci_dev_unlock(hdev); return err; } static void enable_advertising_instance(struct hci_dev *hdev, int err) { if (err) bt_dev_err(hdev, "failed to re-configure advertising %d", err); else bt_dev_dbg(hdev, "status %d", err); } static void set_advertising_complete(struct hci_dev *hdev, void *data, int err) { struct cmd_lookup match = { NULL, hdev }; u8 instance; struct adv_info *adv_instance; u8 status = mgmt_status(err); if (status) { mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, cmd_status_rsp, &status); return; } if (hci_dev_test_flag(hdev, HCI_LE_ADV)) hci_dev_set_flag(hdev, HCI_ADVERTISING); else hci_dev_clear_flag(hdev, HCI_ADVERTISING); mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp, &match); new_settings(hdev, match.sk); if (match.sk) sock_put(match.sk); /* If "Set Advertising" was just disabled and instance advertising was * set up earlier, then re-enable multi-instance advertising. */ if (hci_dev_test_flag(hdev, HCI_ADVERTISING) || list_empty(&hdev->adv_instances)) return; instance = hdev->cur_adv_instance; if (!instance) { adv_instance = list_first_entry_or_null(&hdev->adv_instances, struct adv_info, list); if (!adv_instance) return; instance = adv_instance->instance; } err = hci_schedule_adv_instance_sync(hdev, instance, true); enable_advertising_instance(hdev, err); } static int set_adv_sync(struct hci_dev *hdev, void *data) { struct mgmt_pending_cmd *cmd = data; struct mgmt_mode *cp = cmd->param; u8 val = !!cp->val; if (cp->val == 0x02) hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE); else hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE); cancel_adv_timeout(hdev); if (val) { /* Switch to instance "0" for the Set Advertising setting. * We cannot use update_[adv|scan_rsp]_data() here as the * HCI_ADVERTISING flag is not yet set. */ hdev->cur_adv_instance = 0x00; if (ext_adv_capable(hdev)) { hci_start_ext_adv_sync(hdev, 0x00); } else { hci_update_adv_data_sync(hdev, 0x00); hci_update_scan_rsp_data_sync(hdev, 0x00); hci_enable_advertising_sync(hdev); } } else { hci_disable_advertising_sync(hdev); } return 0; } static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) { struct mgmt_mode *cp = data; struct mgmt_pending_cmd *cmd; u8 val, status; int err; bt_dev_dbg(hdev, "sock %p", sk); status = mgmt_le_support(hdev); if (status) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING, status); if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING, MGMT_STATUS_INVALID_PARAMS); if (hdev->advertising_paused) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING, MGMT_STATUS_BUSY); hci_dev_lock(hdev); val = !!cp->val; /* The following conditions are ones which mean that we should * not do any HCI communication but directly send a mgmt * response to user space (after toggling the flag if * necessary). */ if (!hdev_is_powered(hdev) || (val == hci_dev_test_flag(hdev, HCI_ADVERTISING) && (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) || hci_dev_test_flag(hdev, HCI_MESH) || hci_conn_num(hdev, LE_LINK) > 0 || (hci_dev_test_flag(hdev, HCI_LE_SCAN) && hdev->le_scan_type == LE_SCAN_ACTIVE)) { bool changed; if (cp->val) { hdev->cur_adv_instance = 0x00; changed = !hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING); if (cp->val == 0x02) hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE); else hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE); } else { changed = hci_dev_test_and_clear_flag(hdev, HCI_ADVERTISING); hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE); } err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev); if (err < 0) goto unlock; if (changed) err = new_settings(hdev, sk); goto unlock; } if (pending_find(MGMT_OP_SET_ADVERTISING, hdev) || pending_find(MGMT_OP_SET_LE, hdev)) { err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING, MGMT_STATUS_BUSY); goto unlock; } cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len); if (!cmd) err = -ENOMEM; else err = hci_cmd_sync_queue(hdev, set_adv_sync, cmd, set_advertising_complete); if (err < 0 && cmd) mgmt_pending_remove(cmd); unlock: hci_dev_unlock(hdev); return err; } static int set_static_address(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) { struct mgmt_cp_set_static_address *cp = data; int err; bt_dev_dbg(hdev, "sock %p", sk); if (!lmp_le_capable(hdev)) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS, MGMT_STATUS_NOT_SUPPORTED); if (hdev_is_powered(hdev)) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS, MGMT_STATUS_REJECTED); if (bacmp(&cp->bdaddr, BDADDR_ANY)) { if (!bacmp(&cp->bdaddr, BDADDR_NONE)) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS, MGMT_STATUS_INVALID_PARAMS); /* Two most significant bits shall be set */ if ((cp->bdaddr.b[5] & 0xc0) != 0xc0) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS, MGMT_STATUS_INVALID_PARAMS); } hci_dev_lock(hdev); bacpy(&hdev->static_addr, &cp->bdaddr); err = send_settings_rsp(sk, MGMT_OP_SET_STATIC_ADDRESS, hdev); if (err < 0) goto unlock; err = new_settings(hdev, sk); unlock: hci_dev_unlock(hdev); return err; } static int set_scan_params(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) { struct mgmt_cp_set_scan_params *cp = data; __u16 interval, window; int err; bt_dev_dbg(hdev, "sock %p", sk); if (!lmp_le_capable(hdev)) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, MGMT_STATUS_NOT_SUPPORTED); interval = __le16_to_cpu(cp->interval); if (interval < 0x0004 || interval > 0x4000) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, MGMT_STATUS_INVALID_PARAMS); window = __le16_to_cpu(cp->window); if (window < 0x0004 || window > 0x4000) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, MGMT_STATUS_INVALID_PARAMS); if (window > interval) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, MGMT_STATUS_INVALID_PARAMS); hci_dev_lock(hdev); hdev->le_scan_interval = interval; hdev->le_scan_window = window; err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0, NULL, 0); /* If background scan is running, restart it so new parameters are * loaded. */ if (hci_dev_test_flag(hdev, HCI_LE_SCAN) && hdev->discovery.state == DISCOVERY_STOPPED) hci_update_passive_scan(hdev); hci_dev_unlock(hdev); return err; } static void fast_connectable_complete(struct hci_dev *hdev, void *data, int err) { struct mgmt_pending_cmd *cmd = data; bt_dev_dbg(hdev, "err %d", err); if (err) { mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE, mgmt_status(err)); } else { struct mgmt_mode *cp = cmd->param; if (cp->val) hci_dev_set_flag(hdev, HCI_FAST_CONNECTABLE); else hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE); send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev); new_settings(hdev, cmd->sk); } mgmt_pending_free(cmd); } static int write_fast_connectable_sync(struct hci_dev *hdev, void *data) { struct mgmt_pending_cmd *cmd = data; struct mgmt_mode *cp = cmd->param; return hci_write_fast_connectable_sync(hdev, cp->val); } static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) { struct mgmt_mode *cp = data; struct mgmt_pending_cmd *cmd; int err; bt_dev_dbg(hdev, "sock %p", sk); if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) || hdev->hci_ver < BLUETOOTH_VER_1_2) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE, MGMT_STATUS_NOT_SUPPORTED); if (cp->val != 0x00 && cp->val != 0x01) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE, MGMT_STATUS_INVALID_PARAMS); hci_dev_lock(hdev); if (!!cp->val == hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) { err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev); goto unlock; } if (!hdev_is_powered(hdev)) { hci_dev_change_flag(hdev, HCI_FAST_CONNECTABLE); err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev); new_settings(hdev, sk); goto unlock; } cmd = mgmt_pending_new(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev, data, len); if (!cmd) err = -ENOMEM; else err = hci_cmd_sync_queue(hdev, write_fast_connectable_sync, cmd, fast_connectable_complete); if (err < 0) { mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE, MGMT_STATUS_FAILED); if (cmd) mgmt_pending_free(cmd); } unlock: hci_dev_unlock(hdev); return err; } static void set_bredr_complete(struct hci_dev *hdev, void *data, int err) { struct mgmt_pending_cmd *cmd = data; bt_dev_dbg(hdev, "err %d", err); if (err) { u8 mgmt_err = mgmt_status(err); /* We need to restore the flag if related HCI commands * failed. */ hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED); mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err); } else { send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev); new_settings(hdev, cmd->sk); } mgmt_pending_free(cmd); } static int set_bredr_sync(struct hci_dev *hdev, void *data) { int status; status = hci_write_fast_connectable_sync(hdev, false); if (!status) status = hci_update_scan_sync(hdev); /* Since only the advertising data flags will change, there * is no need to update the scan response data. */ if (!status) status = hci_update_adv_data_sync(hdev, hdev->cur_adv_instance); return status; } static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) { struct mgmt_mode *cp = data; struct mgmt_pending_cmd *cmd; int err; bt_dev_dbg(hdev, "sock %p", sk); if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev)) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR, MGMT_STATUS_NOT_SUPPORTED); if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR, MGMT_STATUS_REJECTED); if (cp->val != 0x00 && cp->val != 0x01) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR, MGMT_STATUS_INVALID_PARAMS); hci_dev_lock(hdev); if (cp->val == hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) { err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev); goto unlock; } if (!hdev_is_powered(hdev)) { if (!cp->val) { hci_dev_clear_flag(hdev, HCI_DISCOVERABLE); hci_dev_clear_flag(hdev, HCI_SSP_ENABLED); hci_dev_clear_flag(hdev, HCI_LINK_SECURITY); hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE); hci_dev_clear_flag(hdev, HCI_HS_ENABLED); } hci_dev_change_flag(hdev, HCI_BREDR_ENABLED); err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev); if (err < 0) goto unlock; err = new_settings(hdev, sk); goto unlock; } /* Reject disabling when powered on */ if (!cp->val) { err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR, MGMT_STATUS_REJECTED); goto unlock; } else { /* When configuring a dual-mode controller to operate * with LE only and using a static address, then switching * BR/EDR back on is not allowed. * * Dual-mode controllers shall operate with the public * address as its identity address for BR/EDR and LE. So * reject the attempt to create an invalid configuration. * * The same restrictions applies when secure connections * has been enabled. For BR/EDR this is a controller feature * while for LE it is a host stack feature. This means that * switching BR/EDR back on when secure connections has been * enabled is not a supported transaction. */ if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) && (bacmp(&hdev->static_addr, BDADDR_ANY) || hci_dev_test_flag(hdev, HCI_SC_ENABLED))) { err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR, MGMT_STATUS_REJECTED); goto unlock; } } cmd = mgmt_pending_new(sk, MGMT_OP_SET_BREDR, hdev, data, len); if (!cmd) err = -ENOMEM; else err = hci_cmd_sync_queue(hdev, set_bredr_sync, cmd, set_bredr_complete); if (err < 0) { mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR, MGMT_STATUS_FAILED); if (cmd) mgmt_pending_free(cmd); goto unlock; } /* We need to flip the bit already here so that * hci_req_update_adv_data generates the correct flags. */ hci_dev_set_flag(hdev, HCI_BREDR_ENABLED); unlock: hci_dev_unlock(hdev); return err; } static void set_secure_conn_complete(struct hci_dev *hdev, void *data, int err) { struct mgmt_pending_cmd *cmd = data; struct mgmt_mode *cp; bt_dev_dbg(hdev, "err %d", err); if (err) { u8 mgmt_err = mgmt_status(err); mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err); goto done; } cp = cmd->param; switch (cp->val) { case 0x00: hci_dev_clear_flag(hdev, HCI_SC_ENABLED); hci_dev_clear_flag(hdev, HCI_SC_ONLY); break; case 0x01: hci_dev_set_flag(hdev, HCI_SC_ENABLED); hci_dev_clear_flag(hdev, HCI_SC_ONLY); break; case 0x02: hci_dev_set_flag(hdev, HCI_SC_ENABLED); hci_dev_set_flag(hdev, HCI_SC_ONLY); break; } send_settings_rsp(cmd->sk, cmd->opcode, hdev); new_settings(hdev, cmd->sk); done: mgmt_pending_free(cmd); } static int set_secure_conn_sync(struct hci_dev *hdev, void *data) { struct mgmt_pending_cmd *cmd = data; struct mgmt_mode *cp = cmd->param; u8 val = !!cp->val; /* Force write of val */ hci_dev_set_flag(hdev, HCI_SC_ENABLED); return hci_write_sc_support_sync(hdev, val); } static int set_secure_conn(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) { struct mgmt_mode *cp = data; struct mgmt_pending_cmd *cmd; u8 val; int err; bt_dev_dbg(hdev, "sock %p", sk); if (!lmp_sc_capable(hdev) && !hci_dev_test_flag(hdev, HCI_LE_ENABLED)) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN, MGMT_STATUS_NOT_SUPPORTED); if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) && lmp_sc_capable(hdev) && !hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN, MGMT_STATUS_REJECTED); if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN, MGMT_STATUS_INVALID_PARAMS); hci_dev_lock(hdev); if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) || !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) { bool changed; if (cp->val) { changed = !hci_dev_test_and_set_flag(hdev, HCI_SC_ENABLED); if (cp->val == 0x02) hci_dev_set_flag(hdev, HCI_SC_ONLY); else hci_dev_clear_flag(hdev, HCI_SC_ONLY); } else { changed = hci_dev_test_and_clear_flag(hdev, HCI_SC_ENABLED); hci_dev_clear_flag(hdev, HCI_SC_ONLY); } err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev); if (err < 0) goto failed; if (changed) err = new_settings(hdev, sk); goto failed; } val = !!cp->val; if (val == hci_dev_test_flag(hdev, HCI_SC_ENABLED) && (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_SC_ONLY)) { err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev); goto failed; } cmd = mgmt_pending_new(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len); if (!cmd) err = -ENOMEM; else err = hci_cmd_sync_queue(hdev, set_secure_conn_sync, cmd, set_secure_conn_complete); if (err < 0) { mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN, MGMT_STATUS_FAILED); if (cmd) mgmt_pending_free(cmd); } failed: hci_dev_unlock(hdev); return err; } static int set_debug_keys(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) { struct mgmt_mode *cp = data; bool changed, use_changed; int err; bt_dev_dbg(hdev, "sock %p", sk); if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS, MGMT_STATUS_INVALID_PARAMS); hci_dev_lock(hdev); if (cp->val) changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS); else changed = hci_dev_test_and_clear_flag(hdev, HCI_KEEP_DEBUG_KEYS); if (cp->val == 0x02) use_changed = !hci_dev_test_and_set_flag(hdev, HCI_USE_DEBUG_KEYS); else use_changed = hci_dev_test_and_clear_flag(hdev, HCI_USE_DEBUG_KEYS); if (hdev_is_powered(hdev) && use_changed && hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) { u8 mode = (cp->val == 0x02) ? 0x01 : 0x00; hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE, sizeof(mode), &mode); } err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev); if (err < 0) goto unlock; if (changed) err = new_settings(hdev, sk); unlock: hci_dev_unlock(hdev); return err; } static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data, u16 len) { struct mgmt_cp_set_privacy *cp = cp_data; bool changed; int err; bt_dev_dbg(hdev, "sock %p", sk); if (!lmp_le_capable(hdev)) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY, MGMT_STATUS_NOT_SUPPORTED); if (cp->privacy != 0x00 && cp->privacy != 0x01 && cp->privacy != 0x02) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY, MGMT_STATUS_INVALID_PARAMS); if (hdev_is_powered(hdev)) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY, MGMT_STATUS_REJECTED); hci_dev_lock(hdev); /* If user space supports this command it is also expected to * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag. */ hci_dev_set_flag(hdev, HCI_RPA_RESOLVING); if (cp->privacy) { changed = !hci_dev_test_and_set_flag(hdev, HCI_PRIVACY); memcpy(hdev->irk, cp->irk, sizeof(hdev->irk)); hci_dev_set_flag(hdev, HCI_RPA_EXPIRED); hci_adv_instances_set_rpa_expired(hdev, true); if (cp->privacy == 0x02) hci_dev_set_flag(hdev, HCI_LIMITED_PRIVACY); else hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY); } else { changed = hci_dev_test_and_clear_flag(hdev, HCI_PRIVACY); memset(hdev->irk, 0, sizeof(hdev->irk)); hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED); hci_adv_instances_set_rpa_expired(hdev, false); hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY); } err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev); if (err < 0) goto unlock; if (changed) err = new_settings(hdev, sk); unlock: hci_dev_unlock(hdev); return err; } static bool irk_is_valid(struct mgmt_irk_info *irk) { switch (irk->addr.type) { case BDADDR_LE_PUBLIC: return true; case BDADDR_LE_RANDOM: /* Two most significant bits shall be set */ if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0) return false; return true; } return false; } static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data, u16 len) { struct mgmt_cp_load_irks *cp = cp_data; const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) / sizeof(struct mgmt_irk_info)); u16 irk_count, expected_len; int i, err; bt_dev_dbg(hdev, "sock %p", sk); if (!lmp_le_capable(hdev)) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS, MGMT_STATUS_NOT_SUPPORTED); irk_count = __le16_to_cpu(cp->irk_count); if (irk_count > max_irk_count) { bt_dev_err(hdev, "load_irks: too big irk_count value %u", irk_count); return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS, MGMT_STATUS_INVALID_PARAMS); } expected_len = struct_size(cp, irks, irk_count); if (expected_len != len) { bt_dev_err(hdev, "load_irks: expected %u bytes, got %u bytes", expected_len, len); return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS, MGMT_STATUS_INVALID_PARAMS); } bt_dev_dbg(hdev, "irk_count %u", irk_count); for (i = 0; i < irk_count; i++) { struct mgmt_irk_info *key = &cp->irks[i]; if (!irk_is_valid(key)) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS, MGMT_STATUS_INVALID_PARAMS); } hci_dev_lock(hdev); hci_smp_irks_clear(hdev); for (i = 0; i < irk_count; i++) { struct mgmt_irk_info *irk = &cp->irks[i]; if (hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK, irk->val)) { bt_dev_warn(hdev, "Skipping blocked IRK for %pMR", &irk->addr.bdaddr); continue; } hci_add_irk(hdev, &irk->addr.bdaddr, le_addr_type(irk->addr.type), irk->val, BDADDR_ANY); } hci_dev_set_flag(hdev, HCI_RPA_RESOLVING); err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0); hci_dev_unlock(hdev); return err; } static bool ltk_is_valid(struct mgmt_ltk_info *key) { if (key->initiator != 0x00 && key->initiator != 0x01) return false; switch (key->addr.type) { case BDADDR_LE_PUBLIC: return true; case BDADDR_LE_RANDOM: /* Two most significant bits shall be set */ if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0) return false; return true; } return false; } static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev, void *cp_data, u16 len) { struct mgmt_cp_load_long_term_keys *cp = cp_data; const u16 max_key_count = ((U16_MAX - sizeof(*cp)) / sizeof(struct mgmt_ltk_info)); u16 key_count, expected_len; int i, err; bt_dev_dbg(hdev, "sock %p", sk); if (!lmp_le_capable(hdev)) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, MGMT_STATUS_NOT_SUPPORTED); key_count = __le16_to_cpu(cp->key_count); if (key_count > max_key_count) { bt_dev_err(hdev, "load_ltks: too big key_count value %u", key_count); return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, MGMT_STATUS_INVALID_PARAMS); } expected_len = struct_size(cp, keys, key_count); if (expected_len != len) { bt_dev_err(hdev, "load_keys: expected %u bytes, got %u bytes", expected_len, len); return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, MGMT_STATUS_INVALID_PARAMS); } bt_dev_dbg(hdev, "key_count %u", key_count); for (i = 0; i < key_count; i++) { struct mgmt_ltk_info *key = &cp->keys[i]; if (!ltk_is_valid(key)) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, MGMT_STATUS_INVALID_PARAMS); } hci_dev_lock(hdev); hci_smp_ltks_clear(hdev); for (i = 0; i < key_count; i++) { struct mgmt_ltk_info *key = &cp->keys[i]; u8 type, authenticated; if (hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_LTK, key->val)) { bt_dev_warn(hdev, "Skipping blocked LTK for %pMR", &key->addr.bdaddr); continue; } switch (key->type) { case MGMT_LTK_UNAUTHENTICATED: authenticated = 0x00; type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER; break; case MGMT_LTK_AUTHENTICATED: authenticated = 0x01; type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER; break; case MGMT_LTK_P256_UNAUTH: authenticated = 0x00; type = SMP_LTK_P256; break; case MGMT_LTK_P256_AUTH: authenticated = 0x01; type = SMP_LTK_P256; break; case MGMT_LTK_P256_DEBUG: authenticated = 0x00; type = SMP_LTK_P256_DEBUG; fallthrough; default: continue; } hci_add_ltk(hdev, &key->addr.bdaddr, le_addr_type(key->addr.type), type, authenticated, key->val, key->enc_size, key->ediv, key->rand); } err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0, NULL, 0); hci_dev_unlock(hdev); return err; } static void get_conn_info_complete(struct hci_dev *hdev, void *data, int err) { struct mgmt_pending_cmd *cmd = data; struct hci_conn *conn = cmd->user_data; struct mgmt_cp_get_conn_info *cp = cmd->param; struct mgmt_rp_get_conn_info rp; u8 status; bt_dev_dbg(hdev, "err %d", err); memcpy(&rp.addr, &cp->addr, sizeof(rp.addr)); status = mgmt_status(err); if (status == MGMT_STATUS_SUCCESS) { rp.rssi = conn->rssi; rp.tx_power = conn->tx_power; rp.max_tx_power = conn->max_tx_power; } else { rp.rssi = HCI_RSSI_INVALID; rp.tx_power = HCI_TX_POWER_INVALID; rp.max_tx_power = HCI_TX_POWER_INVALID; } mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO, status, &rp, sizeof(rp)); mgmt_pending_free(cmd); } static int get_conn_info_sync(struct hci_dev *hdev, void *data) { struct mgmt_pending_cmd *cmd = data; struct mgmt_cp_get_conn_info *cp = cmd->param; struct hci_conn *conn; int err; __le16 handle; /* Make sure we are still connected */ if (cp->addr.type == BDADDR_BREDR) conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr); else conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr); if (!conn || conn->state != BT_CONNECTED) return MGMT_STATUS_NOT_CONNECTED; cmd->user_data = conn; handle = cpu_to_le16(conn->handle); /* Refresh RSSI each time */ err = hci_read_rssi_sync(hdev, handle); /* For LE links TX power does not change thus we don't need to * query for it once value is known. */ if (!err && (!bdaddr_type_is_le(cp->addr.type) || conn->tx_power == HCI_TX_POWER_INVALID)) err = hci_read_tx_power_sync(hdev, handle, 0x00); /* Max TX power needs to be read only once per connection */ if (!err && conn->max_tx_power == HCI_TX_POWER_INVALID) err = hci_read_tx_power_sync(hdev, handle, 0x01); return err; } static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) { struct mgmt_cp_get_conn_info *cp = data; struct mgmt_rp_get_conn_info rp; struct hci_conn *conn; unsigned long conn_info_age; int err = 0; bt_dev_dbg(hdev, "sock %p", sk); memset(&rp, 0, sizeof(rp)); bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr); rp.addr.type = cp->addr.type; if (!bdaddr_type_is_valid(cp->addr.type)) return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO, MGMT_STATUS_INVALID_PARAMS, &rp, sizeof(rp)); hci_dev_lock(hdev); if (!hdev_is_powered(hdev)) { err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO, MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp)); goto unlock; } if (cp->addr.type == BDADDR_BREDR) conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr); else conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr); if (!conn || conn->state != BT_CONNECTED) { err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO, MGMT_STATUS_NOT_CONNECTED, &rp, sizeof(rp)); goto unlock; } /* To avoid client trying to guess when to poll again for information we * calculate conn info age as random value between min/max set in hdev. */ conn_info_age = get_random_u32_inclusive(hdev->conn_info_min_age, hdev->conn_info_max_age - 1); /* Query controller to refresh cached values if they are too old or were * never read. */ if (time_after(jiffies, conn->conn_info_timestamp + msecs_to_jiffies(conn_info_age)) || !conn->conn_info_timestamp) { struct mgmt_pending_cmd *cmd; cmd = mgmt_pending_new(sk, MGMT_OP_GET_CONN_INFO, hdev, data, len); if (!cmd) { err = -ENOMEM; } else { err = hci_cmd_sync_queue(hdev, get_conn_info_sync, cmd, get_conn_info_complete); } if (err < 0) { mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO, MGMT_STATUS_FAILED, &rp, sizeof(rp)); if (cmd) mgmt_pending_free(cmd); goto unlock; } conn->conn_info_timestamp = jiffies; } else { /* Cache is valid, just reply with values cached in hci_conn */ rp.rssi = conn->rssi; rp.tx_power = conn->tx_power; rp.max_tx_power = conn->max_tx_power; err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO, MGMT_STATUS_SUCCESS, &rp, sizeof(rp)); } unlock: hci_dev_unlock(hdev); return err; } static void get_clock_info_complete(struct hci_dev *hdev, void *data, int err) { struct mgmt_pending_cmd *cmd = data; struct mgmt_cp_get_clock_info *cp = cmd->param; struct mgmt_rp_get_clock_info rp; struct hci_conn *conn = cmd->user_data; u8 status = mgmt_status(err); bt_dev_dbg(hdev, "err %d", err); memset(&rp, 0, sizeof(rp)); bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr); rp.addr.type = cp->addr.type; if (err) goto complete; rp.local_clock = cpu_to_le32(hdev->clock); if (conn) { rp.piconet_clock = cpu_to_le32(conn->clock); rp.accuracy = cpu_to_le16(conn->clock_accuracy); } complete: mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp, sizeof(rp)); mgmt_pending_free(cmd); } static int get_clock_info_sync(struct hci_dev *hdev, void *data) { struct mgmt_pending_cmd *cmd = data; struct mgmt_cp_get_clock_info *cp = cmd->param; struct hci_cp_read_clock hci_cp; struct hci_conn *conn; memset(&hci_cp, 0, sizeof(hci_cp)); hci_read_clock_sync(hdev, &hci_cp); /* Make sure connection still exists */ conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr); if (!conn || conn->state != BT_CONNECTED) return MGMT_STATUS_NOT_CONNECTED; cmd->user_data = conn; hci_cp.handle = cpu_to_le16(conn->handle); hci_cp.which = 0x01; /* Piconet clock */ return hci_read_clock_sync(hdev, &hci_cp); } static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) { struct mgmt_cp_get_clock_info *cp = data; struct mgmt_rp_get_clock_info rp; struct mgmt_pending_cmd *cmd; struct hci_conn *conn; int err; bt_dev_dbg(hdev, "sock %p", sk); memset(&rp, 0, sizeof(rp)); bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr); rp.addr.type = cp->addr.type; if (cp->addr.type != BDADDR_BREDR) return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO, MGMT_STATUS_INVALID_PARAMS, &rp, sizeof(rp)); hci_dev_lock(hdev); if (!hdev_is_powered(hdev)) { err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO, MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp)); goto unlock; } if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) { conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr); if (!conn || conn->state != BT_CONNECTED) { err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO, MGMT_STATUS_NOT_CONNECTED, &rp, sizeof(rp)); goto unlock; } } else { conn = NULL; } cmd = mgmt_pending_new(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len); if (!cmd) err = -ENOMEM; else err = hci_cmd_sync_queue(hdev, get_clock_info_sync, cmd, get_clock_info_complete); if (err < 0) { err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO, MGMT_STATUS_FAILED, &rp, sizeof(rp)); if (cmd) mgmt_pending_free(cmd); } unlock: hci_dev_unlock(hdev); return err; } static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type) { struct hci_conn *conn; conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr); if (!conn) return false; if (conn->dst_type != type) return false; if (conn->state != BT_CONNECTED) return false; return true; } /* This function requires the caller holds hdev->lock */ static int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type, u8 auto_connect) { struct hci_conn_params *params; params = hci_conn_params_add(hdev, addr, addr_type); if (!params) return -EIO; if (params->auto_connect == auto_connect) return 0; hci_pend_le_list_del_init(params); switch (auto_connect) { case HCI_AUTO_CONN_DISABLED: case HCI_AUTO_CONN_LINK_LOSS: /* If auto connect is being disabled when we're trying to * connect to device, keep connecting. */ if (params->explicit_connect) hci_pend_le_list_add(params, &hdev->pend_le_conns); break; case HCI_AUTO_CONN_REPORT: if (params->explicit_connect) hci_pend_le_list_add(params, &hdev->pend_le_conns); else hci_pend_le_list_add(params, &hdev->pend_le_reports); break; case HCI_AUTO_CONN_DIRECT: case HCI_AUTO_CONN_ALWAYS: if (!is_connected(hdev, addr, addr_type)) hci_pend_le_list_add(params, &hdev->pend_le_conns); break; } params->auto_connect = auto_connect; bt_dev_dbg(hdev, "addr %pMR (type %u) auto_connect %u", addr, addr_type, auto_connect); return 0; } static void device_added(struct sock *sk, struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type, u8 action) { struct mgmt_ev_device_added ev; bacpy(&ev.addr.bdaddr, bdaddr); ev.addr.type = type; ev.action = action; mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk); } static int add_device_sync(struct hci_dev *hdev, void *data) { return hci_update_passive_scan_sync(hdev); } static int add_device(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) { struct mgmt_cp_add_device *cp = data; u8 auto_conn, addr_type; struct hci_conn_params *params; int err; u32 current_flags = 0; u32 supported_flags; bt_dev_dbg(hdev, "sock %p", sk); if (!bdaddr_type_is_valid(cp->addr.type) || !bacmp(&cp->addr.bdaddr, BDADDR_ANY)) return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE, MGMT_STATUS_INVALID_PARAMS, &cp->addr, sizeof(cp->addr)); if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02) return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE, MGMT_STATUS_INVALID_PARAMS, &cp->addr, sizeof(cp->addr)); hci_dev_lock(hdev); if (cp->addr.type == BDADDR_BREDR) { /* Only incoming connections action is supported for now */ if (cp->action != 0x01) { err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE, MGMT_STATUS_INVALID_PARAMS, &cp->addr, sizeof(cp->addr)); goto unlock; } err = hci_bdaddr_list_add_with_flags(&hdev->accept_list, &cp->addr.bdaddr, cp->addr.type, 0); if (err) goto unlock; hci_update_scan(hdev); goto added; } addr_type = le_addr_type(cp->addr.type); if (cp->action == 0x02) auto_conn = HCI_AUTO_CONN_ALWAYS; else if (cp->action == 0x01) auto_conn = HCI_AUTO_CONN_DIRECT; else auto_conn = HCI_AUTO_CONN_REPORT; /* Kernel internally uses conn_params with resolvable private * address, but Add Device allows only identity addresses. * Make sure it is enforced before calling * hci_conn_params_lookup. */ if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) { err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE, MGMT_STATUS_INVALID_PARAMS, &cp->addr, sizeof(cp->addr)); goto unlock; } /* If the connection parameters don't exist for this device, * they will be created and configured with defaults. */ if (hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type, auto_conn) < 0) { err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE, MGMT_STATUS_FAILED, &cp->addr, sizeof(cp->addr)); goto unlock; } else { params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, addr_type); if (params) current_flags = params->flags; } err = hci_cmd_sync_queue(hdev, add_device_sync, NULL, NULL); if (err < 0) goto unlock; added: device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action); supported_flags = hdev->conn_flags; device_flags_changed(NULL, hdev, &cp->addr.bdaddr, cp->addr.type, supported_flags, current_flags); err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE, MGMT_STATUS_SUCCESS, &cp->addr, sizeof(cp->addr)); unlock: hci_dev_unlock(hdev); return err; } static void device_removed(struct sock *sk, struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type) { struct mgmt_ev_device_removed ev; bacpy(&ev.addr.bdaddr, bdaddr); ev.addr.type = type; mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk); } static int remove_device_sync(struct hci_dev *hdev, void *data) { return hci_update_passive_scan_sync(hdev); } static int remove_device(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) { struct mgmt_cp_remove_device *cp = data; int err; bt_dev_dbg(hdev, "sock %p", sk); hci_dev_lock(hdev); if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) { struct hci_conn_params *params; u8 addr_type; if (!bdaddr_type_is_valid(cp->addr.type)) { err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE, MGMT_STATUS_INVALID_PARAMS, &cp->addr, sizeof(cp->addr)); goto unlock; } if (cp->addr.type == BDADDR_BREDR) { err = hci_bdaddr_list_del(&hdev->accept_list, &cp->addr.bdaddr, cp->addr.type); if (err) { err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE, MGMT_STATUS_INVALID_PARAMS, &cp->addr, sizeof(cp->addr)); goto unlock; } hci_update_scan(hdev); device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type); goto complete; } addr_type = le_addr_type(cp->addr.type); /* Kernel internally uses conn_params with resolvable private * address, but Remove Device allows only identity addresses. * Make sure it is enforced before calling * hci_conn_params_lookup. */ if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) { err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE, MGMT_STATUS_INVALID_PARAMS, &cp->addr, sizeof(cp->addr)); goto unlock; } params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, addr_type); if (!params) { err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE, MGMT_STATUS_INVALID_PARAMS, &cp->addr, sizeof(cp->addr)); goto unlock; } if (params->auto_connect == HCI_AUTO_CONN_DISABLED || params->auto_connect == HCI_AUTO_CONN_EXPLICIT) { err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE, MGMT_STATUS_INVALID_PARAMS, &cp->addr, sizeof(cp->addr)); goto unlock; } hci_conn_params_free(params); device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type); } else { struct hci_conn_params *p, *tmp; struct bdaddr_list *b, *btmp; if (cp->addr.type) { err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE, MGMT_STATUS_INVALID_PARAMS, &cp->addr, sizeof(cp->addr)); goto unlock; } list_for_each_entry_safe(b, btmp, &hdev->accept_list, list) { device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type); list_del(&b->list); kfree(b); } hci_update_scan(hdev); list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) { if (p->auto_connect == HCI_AUTO_CONN_DISABLED) continue; device_removed(sk, hdev, &p->addr, p->addr_type); if (p->explicit_connect) { p->auto_connect = HCI_AUTO_CONN_EXPLICIT; continue; } hci_conn_params_free(p); } bt_dev_dbg(hdev, "All LE connection parameters were removed"); } hci_cmd_sync_queue(hdev, remove_device_sync, NULL, NULL); complete: err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE, MGMT_STATUS_SUCCESS, &cp->addr, sizeof(cp->addr)); unlock: hci_dev_unlock(hdev); return err; } static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) { struct mgmt_cp_load_conn_param *cp = data; const u16 max_param_count = ((U16_MAX - sizeof(*cp)) / sizeof(struct mgmt_conn_param)); u16 param_count, expected_len; int i; if (!lmp_le_capable(hdev)) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, MGMT_STATUS_NOT_SUPPORTED); param_count = __le16_to_cpu(cp->param_count); if (param_count > max_param_count) { bt_dev_err(hdev, "load_conn_param: too big param_count value %u", param_count); return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, MGMT_STATUS_INVALID_PARAMS); } expected_len = struct_size(cp, params, param_count); if (expected_len != len) { bt_dev_err(hdev, "load_conn_param: expected %u bytes, got %u bytes", expected_len, len); return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, MGMT_STATUS_INVALID_PARAMS); } bt_dev_dbg(hdev, "param_count %u", param_count); hci_dev_lock(hdev); hci_conn_params_clear_disabled(hdev); for (i = 0; i < param_count; i++) { struct mgmt_conn_param *param = &cp->params[i]; struct hci_conn_params *hci_param; u16 min, max, latency, timeout; u8 addr_type; bt_dev_dbg(hdev, "Adding %pMR (type %u)", &param->addr.bdaddr, param->addr.type); if (param->addr.type == BDADDR_LE_PUBLIC) { addr_type = ADDR_LE_DEV_PUBLIC; } else if (param->addr.type == BDADDR_LE_RANDOM) { addr_type = ADDR_LE_DEV_RANDOM; } else { bt_dev_err(hdev, "ignoring invalid connection parameters"); continue; } min = le16_to_cpu(param->min_interval); max = le16_to_cpu(param->max_interval); latency = le16_to_cpu(param->latency); timeout = le16_to_cpu(param->timeout); bt_dev_dbg(hdev, "min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x", min, max, latency, timeout); if (hci_check_conn_params(min, max, latency, timeout) < 0) { bt_dev_err(hdev, "ignoring invalid connection parameters"); continue; } hci_param = hci_conn_params_add(hdev, &param->addr.bdaddr, addr_type); if (!hci_param) { bt_dev_err(hdev, "failed to add connection parameters"); continue; } hci_param->conn_min_interval = min; hci_param->conn_max_interval = max; hci_param->conn_latency = latency; hci_param->supervision_timeout = timeout; } hci_dev_unlock(hdev); return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0, NULL, 0); } static int set_external_config(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) { struct mgmt_cp_set_external_config *cp = data; bool changed; int err; bt_dev_dbg(hdev, "sock %p", sk); if (hdev_is_powered(hdev)) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG, MGMT_STATUS_REJECTED); if (cp->config != 0x00 && cp->config != 0x01) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG, MGMT_STATUS_INVALID_PARAMS); if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks)) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG, MGMT_STATUS_NOT_SUPPORTED); hci_dev_lock(hdev); if (cp->config) changed = !hci_dev_test_and_set_flag(hdev, HCI_EXT_CONFIGURED); else changed = hci_dev_test_and_clear_flag(hdev, HCI_EXT_CONFIGURED); err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev); if (err < 0) goto unlock; if (!changed) goto unlock; err = new_options(hdev, sk); if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) == is_configured(hdev)) { mgmt_index_removed(hdev); if (hci_dev_test_and_change_flag(hdev, HCI_UNCONFIGURED)) { hci_dev_set_flag(hdev, HCI_CONFIG); hci_dev_set_flag(hdev, HCI_AUTO_OFF); queue_work(hdev->req_workqueue, &hdev->power_on); } else { set_bit(HCI_RAW, &hdev->flags); mgmt_index_added(hdev); } } unlock: hci_dev_unlock(hdev); return err; } static int set_public_address(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) { struct mgmt_cp_set_public_address *cp = data; bool changed; int err; bt_dev_dbg(hdev, "sock %p", sk); if (hdev_is_powered(hdev)) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS, MGMT_STATUS_REJECTED); if (!bacmp(&cp->bdaddr, BDADDR_ANY)) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS, MGMT_STATUS_INVALID_PARAMS); if (!hdev->set_bdaddr) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS, MGMT_STATUS_NOT_SUPPORTED); hci_dev_lock(hdev); changed = !!bacmp(&hdev->public_addr, &cp->bdaddr); bacpy(&hdev->public_addr, &cp->bdaddr); err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev); if (err < 0) goto unlock; if (!changed) goto unlock; if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) err = new_options(hdev, sk); if (is_configured(hdev)) { mgmt_index_removed(hdev); hci_dev_clear_flag(hdev, HCI_UNCONFIGURED); hci_dev_set_flag(hdev, HCI_CONFIG); hci_dev_set_flag(hdev, HCI_AUTO_OFF); queue_work(hdev->req_workqueue, &hdev->power_on); } unlock: hci_dev_unlock(hdev); return err; } static void read_local_oob_ext_data_complete(struct hci_dev *hdev, void *data, int err) { const struct mgmt_cp_read_local_oob_ext_data *mgmt_cp; struct mgmt_rp_read_local_oob_ext_data *mgmt_rp; u8 *h192, *r192, *h256, *r256; struct mgmt_pending_cmd *cmd = data; struct sk_buff *skb = cmd->skb; u8 status = mgmt_status(err); u16 eir_len; if (cmd != pending_find(MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev)) return; if (!status) { if (!skb) status = MGMT_STATUS_FAILED; else if (IS_ERR(skb)) status = mgmt_status(PTR_ERR(skb)); else status = mgmt_status(skb->data[0]); } bt_dev_dbg(hdev, "status %u", status); mgmt_cp = cmd->param; if (status) { status = mgmt_status(status); eir_len = 0; h192 = NULL; r192 = NULL; h256 = NULL; r256 = NULL; } else if (!bredr_sc_enabled(hdev)) { struct hci_rp_read_local_oob_data *rp; if (skb->len != sizeof(*rp)) { status = MGMT_STATUS_FAILED; eir_len = 0; } else { status = MGMT_STATUS_SUCCESS; rp = (void *)skb->data; eir_len = 5 + 18 + 18; h192 = rp->hash; r192 = rp->rand; h256 = NULL; r256 = NULL; } } else { struct hci_rp_read_local_oob_ext_data *rp; if (skb->len != sizeof(*rp)) { status = MGMT_STATUS_FAILED; eir_len = 0; } else { status = MGMT_STATUS_SUCCESS; rp = (void *)skb->data; if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) { eir_len = 5 + 18 + 18; h192 = NULL; r192 = NULL; } else { eir_len = 5 + 18 + 18 + 18 + 18; h192 = rp->hash192; r192 = rp->rand192; } h256 = rp->hash256; r256 = rp->rand256; } } mgmt_rp = kmalloc(sizeof(*mgmt_rp) + eir_len, GFP_KERNEL); if (!mgmt_rp) goto done; if (eir_len == 0) goto send_rsp; eir_len = eir_append_data(mgmt_rp->eir, 0, EIR_CLASS_OF_DEV, hdev->dev_class, 3); if (h192 && r192) { eir_len = eir_append_data(mgmt_rp->eir, eir_len, EIR_SSP_HASH_C192, h192, 16); eir_len = eir_append_data(mgmt_rp->eir, eir_len, EIR_SSP_RAND_R192, r192, 16); } if (h256 && r256) { eir_len = eir_append_data(mgmt_rp->eir, eir_len, EIR_SSP_HASH_C256, h256, 16); eir_len = eir_append_data(mgmt_rp->eir, eir_len, EIR_SSP_RAND_R256, r256, 16); } send_rsp: mgmt_rp->type = mgmt_cp->type; mgmt_rp->eir_len = cpu_to_le16(eir_len); err = mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_EXT_DATA, status, mgmt_rp, sizeof(*mgmt_rp) + eir_len); if (err < 0 || status) goto done; hci_sock_set_flag(cmd->sk, HCI_MGMT_OOB_DATA_EVENTS); err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev, mgmt_rp, sizeof(*mgmt_rp) + eir_len, HCI_MGMT_OOB_DATA_EVENTS, cmd->sk); done: if (skb && !IS_ERR(skb)) kfree_skb(skb); kfree(mgmt_rp); mgmt_pending_remove(cmd); } static int read_local_ssp_oob_req(struct hci_dev *hdev, struct sock *sk, struct mgmt_cp_read_local_oob_ext_data *cp) { struct mgmt_pending_cmd *cmd; int err; cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev, cp, sizeof(*cp)); if (!cmd) return -ENOMEM; err = hci_cmd_sync_queue(hdev, read_local_oob_data_sync, cmd, read_local_oob_ext_data_complete); if (err < 0) { mgmt_pending_remove(cmd); return err; } return 0; } static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev, void *data, u16 data_len) { struct mgmt_cp_read_local_oob_ext_data *cp = data; struct mgmt_rp_read_local_oob_ext_data *rp; size_t rp_len; u16 eir_len; u8 status, flags, role, addr[7], hash[16], rand[16]; int err; bt_dev_dbg(hdev, "sock %p", sk); if (hdev_is_powered(hdev)) { switch (cp->type) { case BIT(BDADDR_BREDR): status = mgmt_bredr_support(hdev); if (status) eir_len = 0; else eir_len = 5; break; case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)): status = mgmt_le_support(hdev); if (status) eir_len = 0; else eir_len = 9 + 3 + 18 + 18 + 3; break; default: status = MGMT_STATUS_INVALID_PARAMS; eir_len = 0; break; } } else { status = MGMT_STATUS_NOT_POWERED; eir_len = 0; } rp_len = sizeof(*rp) + eir_len; rp = kmalloc(rp_len, GFP_ATOMIC); if (!rp) return -ENOMEM; if (!status && !lmp_ssp_capable(hdev)) { status = MGMT_STATUS_NOT_SUPPORTED; eir_len = 0; } if (status) goto complete; hci_dev_lock(hdev); eir_len = 0; switch (cp->type) { case BIT(BDADDR_BREDR): if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) { err = read_local_ssp_oob_req(hdev, sk, cp); hci_dev_unlock(hdev); if (!err) goto done; status = MGMT_STATUS_FAILED; goto complete; } else { eir_len = eir_append_data(rp->eir, eir_len, EIR_CLASS_OF_DEV, hdev->dev_class, 3); } break; case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)): if (hci_dev_test_flag(hdev, HCI_SC_ENABLED) && smp_generate_oob(hdev, hash, rand) < 0) { hci_dev_unlock(hdev); status = MGMT_STATUS_FAILED; goto complete; } /* This should return the active RPA, but since the RPA * is only programmed on demand, it is really hard to fill * this in at the moment. For now disallow retrieving * local out-of-band data when privacy is in use. * * Returning the identity address will not help here since * pairing happens before the identity resolving key is * known and thus the connection establishment happens * based on the RPA and not the identity address. */ if (hci_dev_test_flag(hdev, HCI_PRIVACY)) { hci_dev_unlock(hdev); status = MGMT_STATUS_REJECTED; goto complete; } if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) || !bacmp(&hdev->bdaddr, BDADDR_ANY) || (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) && bacmp(&hdev->static_addr, BDADDR_ANY))) { memcpy(addr, &hdev->static_addr, 6); addr[6] = 0x01; } else { memcpy(addr, &hdev->bdaddr, 6); addr[6] = 0x00; } eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_BDADDR, addr, sizeof(addr)); if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) role = 0x02; else role = 0x01; eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_ROLE, &role, sizeof(role)); if (hci_dev_test_flag(hdev, HCI_SC_ENABLED)) { eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_SC_CONFIRM, hash, sizeof(hash)); eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_SC_RANDOM, rand, sizeof(rand)); } flags = mgmt_get_adv_discov_flags(hdev); if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) flags |= LE_AD_NO_BREDR; eir_len = eir_append_data(rp->eir, eir_len, EIR_FLAGS, &flags, sizeof(flags)); break; } hci_dev_unlock(hdev); hci_sock_set_flag(sk, HCI_MGMT_OOB_DATA_EVENTS); status = MGMT_STATUS_SUCCESS; complete: rp->type = cp->type; rp->eir_len = cpu_to_le16(eir_len); err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_EXT_DATA, status, rp, sizeof(*rp) + eir_len); if (err < 0 || status) goto done; err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev, rp, sizeof(*rp) + eir_len, HCI_MGMT_OOB_DATA_EVENTS, sk); done: kfree(rp); return err; } static u32 get_supported_adv_flags(struct hci_dev *hdev) { u32 flags = 0; flags |= MGMT_ADV_FLAG_CONNECTABLE; flags |= MGMT_ADV_FLAG_DISCOV; flags |= MGMT_ADV_FLAG_LIMITED_DISCOV; flags |= MGMT_ADV_FLAG_MANAGED_FLAGS; flags |= MGMT_ADV_FLAG_APPEARANCE; flags |= MGMT_ADV_FLAG_LOCAL_NAME; flags |= MGMT_ADV_PARAM_DURATION; flags |= MGMT_ADV_PARAM_TIMEOUT; flags |= MGMT_ADV_PARAM_INTERVALS; flags |= MGMT_ADV_PARAM_TX_POWER; flags |= MGMT_ADV_PARAM_SCAN_RSP; /* In extended adv TX_POWER returned from Set Adv Param * will be always valid. */ if (hdev->adv_tx_power != HCI_TX_POWER_INVALID || ext_adv_capable(hdev)) flags |= MGMT_ADV_FLAG_TX_POWER; if (ext_adv_capable(hdev)) { flags |= MGMT_ADV_FLAG_SEC_1M; flags |= MGMT_ADV_FLAG_HW_OFFLOAD; flags |= MGMT_ADV_FLAG_CAN_SET_TX_POWER; if (le_2m_capable(hdev)) flags |= MGMT_ADV_FLAG_SEC_2M; if (le_coded_capable(hdev)) flags |= MGMT_ADV_FLAG_SEC_CODED; } return flags; } static int read_adv_features(struct sock *sk, struct hci_dev *hdev, void *data, u16 data_len) { struct mgmt_rp_read_adv_features *rp; size_t rp_len; int err; struct adv_info *adv_instance; u32 supported_flags; u8 *instance; bt_dev_dbg(hdev, "sock %p", sk); if (!lmp_le_capable(hdev)) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES, MGMT_STATUS_REJECTED); hci_dev_lock(hdev); rp_len = sizeof(*rp) + hdev->adv_instance_cnt; rp = kmalloc(rp_len, GFP_ATOMIC); if (!rp) { hci_dev_unlock(hdev); return -ENOMEM; } supported_flags = get_supported_adv_flags(hdev); rp->supported_flags = cpu_to_le32(supported_flags); rp->max_adv_data_len = max_adv_len(hdev); rp->max_scan_rsp_len = max_adv_len(hdev); rp->max_instances = hdev->le_num_of_adv_sets; rp->num_instances = hdev->adv_instance_cnt; instance = rp->instance; list_for_each_entry(adv_instance, &hdev->adv_instances, list) { /* Only instances 1-le_num_of_adv_sets are externally visible */ if (adv_instance->instance <= hdev->adv_instance_cnt) { *instance = adv_instance->instance; instance++; } else { rp->num_instances--; rp_len--; } } hci_dev_unlock(hdev); err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES, MGMT_STATUS_SUCCESS, rp, rp_len); kfree(rp); return err; } static u8 calculate_name_len(struct hci_dev *hdev) { u8 buf[HCI_MAX_SHORT_NAME_LENGTH + 3]; return eir_append_local_name(hdev, buf, 0); } static u8 tlv_data_max_len(struct hci_dev *hdev, u32 adv_flags, bool is_adv_data) { u8 max_len = max_adv_len(hdev); if (is_adv_data) { if (adv_flags & (MGMT_ADV_FLAG_DISCOV | MGMT_ADV_FLAG_LIMITED_DISCOV | MGMT_ADV_FLAG_MANAGED_FLAGS)) max_len -= 3; if (adv_flags & MGMT_ADV_FLAG_TX_POWER) max_len -= 3; } else { if (adv_flags & MGMT_ADV_FLAG_LOCAL_NAME) max_len -= calculate_name_len(hdev); if (adv_flags & (MGMT_ADV_FLAG_APPEARANCE)) max_len -= 4; } return max_len; } static bool flags_managed(u32 adv_flags) { return adv_flags & (MGMT_ADV_FLAG_DISCOV | MGMT_ADV_FLAG_LIMITED_DISCOV | MGMT_ADV_FLAG_MANAGED_FLAGS); } static bool tx_power_managed(u32 adv_flags) { return adv_flags & MGMT_ADV_FLAG_TX_POWER; } static bool name_managed(u32 adv_flags) { return adv_flags & MGMT_ADV_FLAG_LOCAL_NAME; } static bool appearance_managed(u32 adv_flags) { return adv_flags & MGMT_ADV_FLAG_APPEARANCE; } static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data, u8 len, bool is_adv_data) { int i, cur_len; u8 max_len; max_len = tlv_data_max_len(hdev, adv_flags, is_adv_data); if (len > max_len) return false; /* Make sure that the data is correctly formatted. */ for (i = 0; i < len; i += (cur_len + 1)) { cur_len = data[i]; if (!cur_len) continue; if (data[i + 1] == EIR_FLAGS && (!is_adv_data || flags_managed(adv_flags))) return false; if (data[i + 1] == EIR_TX_POWER && tx_power_managed(adv_flags)) return false; if (data[i + 1] == EIR_NAME_COMPLETE && name_managed(adv_flags)) return false; if (data[i + 1] == EIR_NAME_SHORT && name_managed(adv_flags)) return false; if (data[i + 1] == EIR_APPEARANCE && appearance_managed(adv_flags)) return false; /* If the current field length would exceed the total data * length, then it's invalid. */ if (i + cur_len >= len) return false; } return true; } static bool requested_adv_flags_are_valid(struct hci_dev *hdev, u32 adv_flags) { u32 supported_flags, phy_flags; /* The current implementation only supports a subset of the specified * flags. Also need to check mutual exclusiveness of sec flags. */ supported_flags = get_supported_adv_flags(hdev); phy_flags = adv_flags & MGMT_ADV_FLAG_SEC_MASK; if (adv_flags & ~supported_flags || ((phy_flags && (phy_flags ^ (phy_flags & -phy_flags))))) return false; return true; } static bool adv_busy(struct hci_dev *hdev) { return pending_find(MGMT_OP_SET_LE, hdev); } static void add_adv_complete(struct hci_dev *hdev, struct sock *sk, u8 instance, int err) { struct adv_info *adv, *n; bt_dev_dbg(hdev, "err %d", err); hci_dev_lock(hdev); list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) { u8 instance; if (!adv->pending) continue; if (!err) { adv->pending = false; continue; } instance = adv->instance; if (hdev->cur_adv_instance == instance) cancel_adv_timeout(hdev); hci_remove_adv_instance(hdev, instance); mgmt_advertising_removed(sk, hdev, instance); } hci_dev_unlock(hdev); } static void add_advertising_complete(struct hci_dev *hdev, void *data, int err) { struct mgmt_pending_cmd *cmd = data; struct mgmt_cp_add_advertising *cp = cmd->param; struct mgmt_rp_add_advertising rp; memset(&rp, 0, sizeof(rp)); rp.instance = cp->instance; if (err) mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_status(err)); else mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(err), &rp, sizeof(rp)); add_adv_complete(hdev, cmd->sk, cp->instance, err); mgmt_pending_free(cmd); } static int add_advertising_sync(struct hci_dev *hdev, void *data) { struct mgmt_pending_cmd *cmd = data; struct mgmt_cp_add_advertising *cp = cmd->param; return hci_schedule_adv_instance_sync(hdev, cp->instance, true); } static int add_advertising(struct sock *sk, struct hci_dev *hdev, void *data, u16 data_len) { struct mgmt_cp_add_advertising *cp = data; struct mgmt_rp_add_advertising rp; u32 flags; u8 status; u16 timeout, duration; unsigned int prev_instance_cnt; u8 schedule_instance = 0; struct adv_info *adv, *next_instance; int err; struct mgmt_pending_cmd *cmd; bt_dev_dbg(hdev, "sock %p", sk); status = mgmt_le_support(hdev); if (status) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING, status); if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING, MGMT_STATUS_INVALID_PARAMS); if (data_len != sizeof(*cp) + cp->adv_data_len + cp->scan_rsp_len) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING, MGMT_STATUS_INVALID_PARAMS); flags = __le32_to_cpu(cp->flags); timeout = __le16_to_cpu(cp->timeout); duration = __le16_to_cpu(cp->duration); if (!requested_adv_flags_are_valid(hdev, flags)) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING, MGMT_STATUS_INVALID_PARAMS); hci_dev_lock(hdev); if (timeout && !hdev_is_powered(hdev)) { err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING, MGMT_STATUS_REJECTED); goto unlock; } if (adv_busy(hdev)) { err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING, MGMT_STATUS_BUSY); goto unlock; } if (!tlv_data_is_valid(hdev, flags, cp->data, cp->adv_data_len, true) || !tlv_data_is_valid(hdev, flags, cp->data + cp->adv_data_len, cp->scan_rsp_len, false)) { err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING, MGMT_STATUS_INVALID_PARAMS); goto unlock; } prev_instance_cnt = hdev->adv_instance_cnt; adv = hci_add_adv_instance(hdev, cp->instance, flags, cp->adv_data_len, cp->data, cp->scan_rsp_len, cp->data + cp->adv_data_len, timeout, duration, HCI_ADV_TX_POWER_NO_PREFERENCE, hdev->le_adv_min_interval, hdev->le_adv_max_interval, 0); if (IS_ERR(adv)) { err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING, MGMT_STATUS_FAILED); goto unlock; } /* Only trigger an advertising added event if a new instance was * actually added. */ if (hdev->adv_instance_cnt > prev_instance_cnt) mgmt_advertising_added(sk, hdev, cp->instance); if (hdev->cur_adv_instance == cp->instance) { /* If the currently advertised instance is being changed then * cancel the current advertising and schedule the next * instance. If there is only one instance then the overridden * advertising data will be visible right away. */ cancel_adv_timeout(hdev); next_instance = hci_get_next_instance(hdev, cp->instance); if (next_instance) schedule_instance = next_instance->instance; } else if (!hdev->adv_instance_timeout) { /* Immediately advertise the new instance if no other * instance is currently being advertised. */ schedule_instance = cp->instance; } /* If the HCI_ADVERTISING flag is set or the device isn't powered or * there is no instance to be advertised then we have no HCI * communication to make. Simply return. */ if (!hdev_is_powered(hdev) || hci_dev_test_flag(hdev, HCI_ADVERTISING) || !schedule_instance) { rp.instance = cp->instance; err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADVERTISING, MGMT_STATUS_SUCCESS, &rp, sizeof(rp)); goto unlock; } /* We're good to go, update advertising data, parameters, and start * advertising. */ cmd = mgmt_pending_new(sk, MGMT_OP_ADD_ADVERTISING, hdev, data, data_len); if (!cmd) { err = -ENOMEM; goto unlock; } cp->instance = schedule_instance; err = hci_cmd_sync_queue(hdev, add_advertising_sync, cmd, add_advertising_complete); if (err < 0) mgmt_pending_free(cmd); unlock: hci_dev_unlock(hdev); return err; } static void add_ext_adv_params_complete(struct hci_dev *hdev, void *data, int err) { struct mgmt_pending_cmd *cmd = data; struct mgmt_cp_add_ext_adv_params *cp = cmd->param; struct mgmt_rp_add_ext_adv_params rp; struct adv_info *adv; u32 flags; BT_DBG("%s", hdev->name); hci_dev_lock(hdev); adv = hci_find_adv_instance(hdev, cp->instance); if (!adv) goto unlock; rp.instance = cp->instance; rp.tx_power = adv->tx_power; /* While we're at it, inform userspace of the available space for this * advertisement, given the flags that will be used. */ flags = __le32_to_cpu(cp->flags); rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true); rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false); if (err) { /* If this advertisement was previously advertising and we * failed to update it, we signal that it has been removed and * delete its structure */ if (!adv->pending) mgmt_advertising_removed(cmd->sk, hdev, cp->instance); hci_remove_adv_instance(hdev, cp->instance); mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_status(err)); } else { mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(err), &rp, sizeof(rp)); } unlock: if (cmd) mgmt_pending_free(cmd); hci_dev_unlock(hdev); } static int add_ext_adv_params_sync(struct hci_dev *hdev, void *data) { struct mgmt_pending_cmd *cmd = data; struct mgmt_cp_add_ext_adv_params *cp = cmd->param; return hci_setup_ext_adv_instance_sync(hdev, cp->instance); } static int add_ext_adv_params(struct sock *sk, struct hci_dev *hdev, void *data, u16 data_len) { struct mgmt_cp_add_ext_adv_params *cp = data; struct mgmt_rp_add_ext_adv_params rp; struct mgmt_pending_cmd *cmd = NULL; struct adv_info *adv; u32 flags, min_interval, max_interval; u16 timeout, duration; u8 status; s8 tx_power; int err; BT_DBG("%s", hdev->name); status = mgmt_le_support(hdev); if (status) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS, status); if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS, MGMT_STATUS_INVALID_PARAMS); /* The purpose of breaking add_advertising into two separate MGMT calls * for params and data is to allow more parameters to be added to this * structure in the future. For this reason, we verify that we have the * bare minimum structure we know of when the interface was defined. Any * extra parameters we don't know about will be ignored in this request. */ if (data_len < MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS, MGMT_STATUS_INVALID_PARAMS); flags = __le32_to_cpu(cp->flags); if (!requested_adv_flags_are_valid(hdev, flags)) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS, MGMT_STATUS_INVALID_PARAMS); hci_dev_lock(hdev); /* In new interface, we require that we are powered to register */ if (!hdev_is_powered(hdev)) { err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS, MGMT_STATUS_REJECTED); goto unlock; } if (adv_busy(hdev)) { err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS, MGMT_STATUS_BUSY); goto unlock; } /* Parse defined parameters from request, use defaults otherwise */ timeout = (flags & MGMT_ADV_PARAM_TIMEOUT) ? __le16_to_cpu(cp->timeout) : 0; duration = (flags & MGMT_ADV_PARAM_DURATION) ? __le16_to_cpu(cp->duration) : hdev->def_multi_adv_rotation_duration; min_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ? __le32_to_cpu(cp->min_interval) : hdev->le_adv_min_interval; max_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ? __le32_to_cpu(cp->max_interval) : hdev->le_adv_max_interval; tx_power = (flags & MGMT_ADV_PARAM_TX_POWER) ? cp->tx_power : HCI_ADV_TX_POWER_NO_PREFERENCE; /* Create advertising instance with no advertising or response data */ adv = hci_add_adv_instance(hdev, cp->instance, flags, 0, NULL, 0, NULL, timeout, duration, tx_power, min_interval, max_interval, 0); if (IS_ERR(adv)) { err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS, MGMT_STATUS_FAILED); goto unlock; } /* Submit request for advertising params if ext adv available */ if (ext_adv_capable(hdev)) { cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_PARAMS, hdev, data, data_len); if (!cmd) { err = -ENOMEM; hci_remove_adv_instance(hdev, cp->instance); goto unlock; } err = hci_cmd_sync_queue(hdev, add_ext_adv_params_sync, cmd, add_ext_adv_params_complete); if (err < 0) mgmt_pending_free(cmd); } else { rp.instance = cp->instance; rp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE; rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true); rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false); err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS, MGMT_STATUS_SUCCESS, &rp, sizeof(rp)); } unlock: hci_dev_unlock(hdev); return err; } static void add_ext_adv_data_complete(struct hci_dev *hdev, void *data, int err) { struct mgmt_pending_cmd *cmd = data; struct mgmt_cp_add_ext_adv_data *cp = cmd->param; struct mgmt_rp_add_advertising rp; add_adv_complete(hdev, cmd->sk, cp->instance, err); memset(&rp, 0, sizeof(rp)); rp.instance = cp->instance; if (err) mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_status(err)); else mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(err), &rp, sizeof(rp)); mgmt_pending_free(cmd); } static int add_ext_adv_data_sync(struct hci_dev *hdev, void *data) { struct mgmt_pending_cmd *cmd = data; struct mgmt_cp_add_ext_adv_data *cp = cmd->param; int err; if (ext_adv_capable(hdev)) { err = hci_update_adv_data_sync(hdev, cp->instance); if (err) return err; err = hci_update_scan_rsp_data_sync(hdev, cp->instance); if (err) return err; return hci_enable_ext_advertising_sync(hdev, cp->instance); } return hci_schedule_adv_instance_sync(hdev, cp->instance, true); } static int add_ext_adv_data(struct sock *sk, struct hci_dev *hdev, void *data, u16 data_len) { struct mgmt_cp_add_ext_adv_data *cp = data; struct mgmt_rp_add_ext_adv_data rp; u8 schedule_instance = 0; struct adv_info *next_instance; struct adv_info *adv_instance; int err = 0; struct mgmt_pending_cmd *cmd; BT_DBG("%s", hdev->name); hci_dev_lock(hdev); adv_instance = hci_find_adv_instance(hdev, cp->instance); if (!adv_instance) { err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA, MGMT_STATUS_INVALID_PARAMS); goto unlock; } /* In new interface, we require that we are powered to register */ if (!hdev_is_powered(hdev)) { err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA, MGMT_STATUS_REJECTED); goto clear_new_instance; } if (adv_busy(hdev)) { err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA, MGMT_STATUS_BUSY); goto clear_new_instance; } /* Validate new data */ if (!tlv_data_is_valid(hdev, adv_instance->flags, cp->data, cp->adv_data_len, true) || !tlv_data_is_valid(hdev, adv_instance->flags, cp->data + cp->adv_data_len, cp->scan_rsp_len, false)) { err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA, MGMT_STATUS_INVALID_PARAMS); goto clear_new_instance; } /* Set the data in the advertising instance */ hci_set_adv_instance_data(hdev, cp->instance, cp->adv_data_len, cp->data, cp->scan_rsp_len, cp->data + cp->adv_data_len); /* If using software rotation, determine next instance to use */ if (hdev->cur_adv_instance == cp->instance) { /* If the currently advertised instance is being changed * then cancel the current advertising and schedule the * next instance. If there is only one instance then the * overridden advertising data will be visible right * away */ cancel_adv_timeout(hdev); next_instance = hci_get_next_instance(hdev, cp->instance); if (next_instance) schedule_instance = next_instance->instance; } else if (!hdev->adv_instance_timeout) { /* Immediately advertise the new instance if no other * instance is currently being advertised. */ schedule_instance = cp->instance; } /* If the HCI_ADVERTISING flag is set or there is no instance to * be advertised then we have no HCI communication to make. * Simply return. */ if (hci_dev_test_flag(hdev, HCI_ADVERTISING) || !schedule_instance) { if (adv_instance->pending) { mgmt_advertising_added(sk, hdev, cp->instance); adv_instance->pending = false; } rp.instance = cp->instance; err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA, MGMT_STATUS_SUCCESS, &rp, sizeof(rp)); goto unlock; } cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_DATA, hdev, data, data_len); if (!cmd) { err = -ENOMEM; goto clear_new_instance; } err = hci_cmd_sync_queue(hdev, add_ext_adv_data_sync, cmd, add_ext_adv_data_complete); if (err < 0) { mgmt_pending_free(cmd); goto clear_new_instance; } /* We were successful in updating data, so trigger advertising_added * event if this is an instance that wasn't previously advertising. If * a failure occurs in the requests we initiated, we will remove the * instance again in add_advertising_complete */ if (adv_instance->pending) mgmt_advertising_added(sk, hdev, cp->instance); goto unlock; clear_new_instance: hci_remove_adv_instance(hdev, cp->instance); unlock: hci_dev_unlock(hdev); return err; } static void remove_advertising_complete(struct hci_dev *hdev, void *data, int err) { struct mgmt_pending_cmd *cmd = data; struct mgmt_cp_remove_advertising *cp = cmd->param; struct mgmt_rp_remove_advertising rp; bt_dev_dbg(hdev, "err %d", err); memset(&rp, 0, sizeof(rp)); rp.instance = cp->instance; if (err) mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_status(err)); else mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, MGMT_STATUS_SUCCESS, &rp, sizeof(rp)); mgmt_pending_free(cmd); } static int remove_advertising_sync(struct hci_dev *hdev, void *data) { struct mgmt_pending_cmd *cmd = data; struct mgmt_cp_remove_advertising *cp = cmd->param; int err; err = hci_remove_advertising_sync(hdev, cmd->sk, cp->instance, true); if (err) return err; if (list_empty(&hdev->adv_instances)) err = hci_disable_advertising_sync(hdev); return err; } static int remove_advertising(struct sock *sk, struct hci_dev *hdev, void *data, u16 data_len) { struct mgmt_cp_remove_advertising *cp = data; struct mgmt_pending_cmd *cmd; int err; bt_dev_dbg(hdev, "sock %p", sk); hci_dev_lock(hdev); if (cp->instance && !hci_find_adv_instance(hdev, cp->instance)) { err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING, MGMT_STATUS_INVALID_PARAMS); goto unlock; } if (pending_find(MGMT_OP_SET_LE, hdev)) { err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING, MGMT_STATUS_BUSY); goto unlock; } if (list_empty(&hdev->adv_instances)) { err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING, MGMT_STATUS_INVALID_PARAMS); goto unlock; } cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_ADVERTISING, hdev, data, data_len); if (!cmd) { err = -ENOMEM; goto unlock; } err = hci_cmd_sync_queue(hdev, remove_advertising_sync, cmd, remove_advertising_complete); if (err < 0) mgmt_pending_free(cmd); unlock: hci_dev_unlock(hdev); return err; } static int get_adv_size_info(struct sock *sk, struct hci_dev *hdev, void *data, u16 data_len) { struct mgmt_cp_get_adv_size_info *cp = data; struct mgmt_rp_get_adv_size_info rp; u32 flags, supported_flags; bt_dev_dbg(hdev, "sock %p", sk); if (!lmp_le_capable(hdev)) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO, MGMT_STATUS_REJECTED); if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO, MGMT_STATUS_INVALID_PARAMS); flags = __le32_to_cpu(cp->flags); /* The current implementation only supports a subset of the specified * flags. */ supported_flags = get_supported_adv_flags(hdev); if (flags & ~supported_flags) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO, MGMT_STATUS_INVALID_PARAMS); rp.instance = cp->instance; rp.flags = cp->flags; rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true); rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false); return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO, MGMT_STATUS_SUCCESS, &rp, sizeof(rp)); } static const struct hci_mgmt_handler mgmt_handlers[] = { { NULL }, /* 0x0000 (no command) */ { read_version, MGMT_READ_VERSION_SIZE, HCI_MGMT_NO_HDEV | HCI_MGMT_UNTRUSTED }, { read_commands, MGMT_READ_COMMANDS_SIZE, HCI_MGMT_NO_HDEV | HCI_MGMT_UNTRUSTED }, { read_index_list, MGMT_READ_INDEX_LIST_SIZE, HCI_MGMT_NO_HDEV | HCI_MGMT_UNTRUSTED }, { read_controller_info, MGMT_READ_INFO_SIZE, HCI_MGMT_UNTRUSTED }, { set_powered, MGMT_SETTING_SIZE }, { set_discoverable, MGMT_SET_DISCOVERABLE_SIZE }, { set_connectable, MGMT_SETTING_SIZE }, { set_fast_connectable, MGMT_SETTING_SIZE }, { set_bondable, MGMT_SETTING_SIZE }, { set_link_security, MGMT_SETTING_SIZE }, { set_ssp, MGMT_SETTING_SIZE }, { set_hs, MGMT_SETTING_SIZE }, { set_le, MGMT_SETTING_SIZE }, { set_dev_class, MGMT_SET_DEV_CLASS_SIZE }, { set_local_name, MGMT_SET_LOCAL_NAME_SIZE }, { add_uuid, MGMT_ADD_UUID_SIZE }, { remove_uuid, MGMT_REMOVE_UUID_SIZE }, { load_link_keys, MGMT_LOAD_LINK_KEYS_SIZE, HCI_MGMT_VAR_LEN }, { load_long_term_keys, MGMT_LOAD_LONG_TERM_KEYS_SIZE, HCI_MGMT_VAR_LEN }, { disconnect, MGMT_DISCONNECT_SIZE }, { get_connections, MGMT_GET_CONNECTIONS_SIZE }, { pin_code_reply, MGMT_PIN_CODE_REPLY_SIZE }, { pin_code_neg_reply, MGMT_PIN_CODE_NEG_REPLY_SIZE }, { set_io_capability, MGMT_SET_IO_CAPABILITY_SIZE }, { pair_device, MGMT_PAIR_DEVICE_SIZE }, { cancel_pair_device, MGMT_CANCEL_PAIR_DEVICE_SIZE }, { unpair_device, MGMT_UNPAIR_DEVICE_SIZE }, { user_confirm_reply, MGMT_USER_CONFIRM_REPLY_SIZE }, { user_confirm_neg_reply, MGMT_USER_CONFIRM_NEG_REPLY_SIZE }, { user_passkey_reply, MGMT_USER_PASSKEY_REPLY_SIZE }, { user_passkey_neg_reply, MGMT_USER_PASSKEY_NEG_REPLY_SIZE }, { read_local_oob_data, MGMT_READ_LOCAL_OOB_DATA_SIZE }, { add_remote_oob_data, MGMT_ADD_REMOTE_OOB_DATA_SIZE, HCI_MGMT_VAR_LEN }, { remove_remote_oob_data, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE }, { start_discovery, MGMT_START_DISCOVERY_SIZE }, { stop_discovery, MGMT_STOP_DISCOVERY_SIZE }, { confirm_name, MGMT_CONFIRM_NAME_SIZE }, { block_device, MGMT_BLOCK_DEVICE_SIZE }, { unblock_device, MGMT_UNBLOCK_DEVICE_SIZE }, { set_device_id, MGMT_SET_DEVICE_ID_SIZE }, { set_advertising, MGMT_SETTING_SIZE }, { set_bredr, MGMT_SETTING_SIZE }, { set_static_address, MGMT_SET_STATIC_ADDRESS_SIZE }, { set_scan_params, MGMT_SET_SCAN_PARAMS_SIZE }, { set_secure_conn, MGMT_SETTING_SIZE }, { set_debug_keys, MGMT_SETTING_SIZE }, { set_privacy, MGMT_SET_PRIVACY_SIZE }, { load_irks, MGMT_LOAD_IRKS_SIZE, HCI_MGMT_VAR_LEN }, { get_conn_info, MGMT_GET_CONN_INFO_SIZE }, { get_clock_info, MGMT_GET_CLOCK_INFO_SIZE }, { add_device, MGMT_ADD_DEVICE_SIZE }, { remove_device, MGMT_REMOVE_DEVICE_SIZE }, { load_conn_param, MGMT_LOAD_CONN_PARAM_SIZE, HCI_MGMT_VAR_LEN }, { read_unconf_index_list, MGMT_READ_UNCONF_INDEX_LIST_SIZE, HCI_MGMT_NO_HDEV | HCI_MGMT_UNTRUSTED }, { read_config_info, MGMT_READ_CONFIG_INFO_SIZE, HCI_MGMT_UNCONFIGURED | HCI_MGMT_UNTRUSTED }, { set_external_config, MGMT_SET_EXTERNAL_CONFIG_SIZE, HCI_MGMT_UNCONFIGURED }, { set_public_address, MGMT_SET_PUBLIC_ADDRESS_SIZE, HCI_MGMT_UNCONFIGURED }, { start_service_discovery, MGMT_START_SERVICE_DISCOVERY_SIZE, HCI_MGMT_VAR_LEN }, { read_local_oob_ext_data, MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE }, { read_ext_index_list, MGMT_READ_EXT_INDEX_LIST_SIZE, HCI_MGMT_NO_HDEV | HCI_MGMT_UNTRUSTED }, { read_adv_features, MGMT_READ_ADV_FEATURES_SIZE }, { add_advertising, MGMT_ADD_ADVERTISING_SIZE, HCI_MGMT_VAR_LEN }, { remove_advertising, MGMT_REMOVE_ADVERTISING_SIZE }, { get_adv_size_info, MGMT_GET_ADV_SIZE_INFO_SIZE }, { start_limited_discovery, MGMT_START_DISCOVERY_SIZE }, { read_ext_controller_info,MGMT_READ_EXT_INFO_SIZE, HCI_MGMT_UNTRUSTED }, { set_appearance, MGMT_SET_APPEARANCE_SIZE }, { get_phy_configuration, MGMT_GET_PHY_CONFIGURATION_SIZE }, { set_phy_configuration, MGMT_SET_PHY_CONFIGURATION_SIZE }, { set_blocked_keys, MGMT_OP_SET_BLOCKED_KEYS_SIZE, HCI_MGMT_VAR_LEN }, { set_wideband_speech, MGMT_SETTING_SIZE }, { read_controller_cap, MGMT_READ_CONTROLLER_CAP_SIZE, HCI_MGMT_UNTRUSTED }, { read_exp_features_info, MGMT_READ_EXP_FEATURES_INFO_SIZE, HCI_MGMT_UNTRUSTED | HCI_MGMT_HDEV_OPTIONAL }, { set_exp_feature, MGMT_SET_EXP_FEATURE_SIZE, HCI_MGMT_VAR_LEN | HCI_MGMT_HDEV_OPTIONAL }, { read_def_system_config, MGMT_READ_DEF_SYSTEM_CONFIG_SIZE, HCI_MGMT_UNTRUSTED }, { set_def_system_config, MGMT_SET_DEF_SYSTEM_CONFIG_SIZE, HCI_MGMT_VAR_LEN }, { read_def_runtime_config, MGMT_READ_DEF_RUNTIME_CONFIG_SIZE, HCI_MGMT_UNTRUSTED }, { set_def_runtime_config, MGMT_SET_DEF_RUNTIME_CONFIG_SIZE, HCI_MGMT_VAR_LEN }, { get_device_flags, MGMT_GET_DEVICE_FLAGS_SIZE }, { set_device_flags, MGMT_SET_DEVICE_FLAGS_SIZE }, { read_adv_mon_features, MGMT_READ_ADV_MONITOR_FEATURES_SIZE }, { add_adv_patterns_monitor,MGMT_ADD_ADV_PATTERNS_MONITOR_SIZE, HCI_MGMT_VAR_LEN }, { remove_adv_monitor, MGMT_REMOVE_ADV_MONITOR_SIZE }, { add_ext_adv_params, MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE, HCI_MGMT_VAR_LEN }, { add_ext_adv_data, MGMT_ADD_EXT_ADV_DATA_SIZE, HCI_MGMT_VAR_LEN }, { add_adv_patterns_monitor_rssi, MGMT_ADD_ADV_PATTERNS_MONITOR_RSSI_SIZE, HCI_MGMT_VAR_LEN }, { set_mesh, MGMT_SET_MESH_RECEIVER_SIZE, HCI_MGMT_VAR_LEN }, { mesh_features, MGMT_MESH_READ_FEATURES_SIZE }, { mesh_send, MGMT_MESH_SEND_SIZE, HCI_MGMT_VAR_LEN }, { mesh_send_cancel, MGMT_MESH_SEND_CANCEL_SIZE }, }; void mgmt_index_added(struct hci_dev *hdev) { struct mgmt_ev_ext_index ev; if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) return; switch (hdev->dev_type) { case HCI_PRIMARY: if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) { mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev, NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS); ev.type = 0x01; } else { mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0, HCI_MGMT_INDEX_EVENTS); ev.type = 0x00; } break; case HCI_AMP: ev.type = 0x02; break; default: return; } ev.bus = hdev->bus; mgmt_index_event(MGMT_EV_EXT_INDEX_ADDED, hdev, &ev, sizeof(ev), HCI_MGMT_EXT_INDEX_EVENTS); } void mgmt_index_removed(struct hci_dev *hdev) { struct mgmt_ev_ext_index ev; u8 status = MGMT_STATUS_INVALID_INDEX; if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) return; switch (hdev->dev_type) { case HCI_PRIMARY: mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status); if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) { mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev, NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS); ev.type = 0x01; } else { mgmt_index_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0, HCI_MGMT_INDEX_EVENTS); ev.type = 0x00; } break; case HCI_AMP: ev.type = 0x02; break; default: return; } ev.bus = hdev->bus; mgmt_index_event(MGMT_EV_EXT_INDEX_REMOVED, hdev, &ev, sizeof(ev), HCI_MGMT_EXT_INDEX_EVENTS); /* Cancel any remaining timed work */ if (!hci_dev_test_flag(hdev, HCI_MGMT)) return; cancel_delayed_work_sync(&hdev->discov_off); cancel_delayed_work_sync(&hdev->service_cache); cancel_delayed_work_sync(&hdev->rpa_expired); } void mgmt_power_on(struct hci_dev *hdev, int err) { struct cmd_lookup match = { NULL, hdev }; bt_dev_dbg(hdev, "err %d", err); hci_dev_lock(hdev); if (!err) { restart_le_actions(hdev); hci_update_passive_scan(hdev); } mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match); new_settings(hdev, match.sk); if (match.sk) sock_put(match.sk); hci_dev_unlock(hdev); } void __mgmt_power_off(struct hci_dev *hdev) { struct cmd_lookup match = { NULL, hdev }; u8 status, zero_cod[] = { 0, 0, 0 }; mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match); /* If the power off is because of hdev unregistration let * use the appropriate INVALID_INDEX status. Otherwise use * NOT_POWERED. We cover both scenarios here since later in * mgmt_index_removed() any hci_conn callbacks will have already * been triggered, potentially causing misleading DISCONNECTED * status responses. */ if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) status = MGMT_STATUS_INVALID_INDEX; else status = MGMT_STATUS_NOT_POWERED; mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status); if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0) { mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, zero_cod, sizeof(zero_cod), HCI_MGMT_DEV_CLASS_EVENTS, NULL); ext_info_changed(hdev, NULL); } new_settings(hdev, match.sk); if (match.sk) sock_put(match.sk); } void mgmt_set_powered_failed(struct hci_dev *hdev, int err) { struct mgmt_pending_cmd *cmd; u8 status; cmd = pending_find(MGMT_OP_SET_POWERED, hdev); if (!cmd) return; if (err == -ERFKILL) status = MGMT_STATUS_RFKILLED; else status = MGMT_STATUS_FAILED; mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status); mgmt_pending_remove(cmd); } void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key, bool persistent) { struct mgmt_ev_new_link_key ev; memset(&ev, 0, sizeof(ev)); ev.store_hint = persistent; bacpy(&ev.key.addr.bdaddr, &key->bdaddr); ev.key.addr.type = BDADDR_BREDR; ev.key.type = key->type; memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE); ev.key.pin_len = key->pin_len; mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL); } static u8 mgmt_ltk_type(struct smp_ltk *ltk) { switch (ltk->type) { case SMP_LTK: case SMP_LTK_RESPONDER: if (ltk->authenticated) return MGMT_LTK_AUTHENTICATED; return MGMT_LTK_UNAUTHENTICATED; case SMP_LTK_P256: if (ltk->authenticated) return MGMT_LTK_P256_AUTH; return MGMT_LTK_P256_UNAUTH; case SMP_LTK_P256_DEBUG: return MGMT_LTK_P256_DEBUG; } return MGMT_LTK_UNAUTHENTICATED; } void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent) { struct mgmt_ev_new_long_term_key ev; memset(&ev, 0, sizeof(ev)); /* Devices using resolvable or non-resolvable random addresses * without providing an identity resolving key don't require * to store long term keys. Their addresses will change the * next time around. * * Only when a remote device provides an identity address * make sure the long term key is stored. If the remote * identity is known, the long term keys are internally * mapped to the identity address. So allow static random * and public addresses here. */ if (key->bdaddr_type == ADDR_LE_DEV_RANDOM && (key->bdaddr.b[5] & 0xc0) != 0xc0) ev.store_hint = 0x00; else ev.store_hint = persistent; bacpy(&ev.key.addr.bdaddr, &key->bdaddr); ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type); ev.key.type = mgmt_ltk_type(key); ev.key.enc_size = key->enc_size; ev.key.ediv = key->ediv; ev.key.rand = key->rand; if (key->type == SMP_LTK) ev.key.initiator = 1; /* Make sure we copy only the significant bytes based on the * encryption key size, and set the rest of the value to zeroes. */ memcpy(ev.key.val, key->val, key->enc_size); memset(ev.key.val + key->enc_size, 0, sizeof(ev.key.val) - key->enc_size); mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL); } void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk, bool persistent) { struct mgmt_ev_new_irk ev; memset(&ev, 0, sizeof(ev)); ev.store_hint = persistent; bacpy(&ev.rpa, &irk->rpa); bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr); ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type); memcpy(ev.irk.val, irk->val, sizeof(irk->val)); mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL); } void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk, bool persistent) { struct mgmt_ev_new_csrk ev; memset(&ev, 0, sizeof(ev)); /* Devices using resolvable or non-resolvable random addresses * without providing an identity resolving key don't require * to store signature resolving keys. Their addresses will change * the next time around. * * Only when a remote device provides an identity address * make sure the signature resolving key is stored. So allow * static random and public addresses here. */ if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM && (csrk->bdaddr.b[5] & 0xc0) != 0xc0) ev.store_hint = 0x00; else ev.store_hint = persistent; bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr); ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type); ev.key.type = csrk->type; memcpy(ev.key.val, csrk->val, sizeof(csrk->val)); mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL); } void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type, u8 store_hint, u16 min_interval, u16 max_interval, u16 latency, u16 timeout) { struct mgmt_ev_new_conn_param ev; if (!hci_is_identity_address(bdaddr, bdaddr_type)) return; memset(&ev, 0, sizeof(ev)); bacpy(&ev.addr.bdaddr, bdaddr); ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type); ev.store_hint = store_hint; ev.min_interval = cpu_to_le16(min_interval); ev.max_interval = cpu_to_le16(max_interval); ev.latency = cpu_to_le16(latency); ev.timeout = cpu_to_le16(timeout); mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL); } void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn, u8 *name, u8 name_len) { struct sk_buff *skb; struct mgmt_ev_device_connected *ev; u16 eir_len = 0; u32 flags = 0; /* allocate buff for LE or BR/EDR adv */ if (conn->le_adv_data_len > 0) skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED, sizeof(*ev) + conn->le_adv_data_len); else skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED, sizeof(*ev) + (name ? eir_precalc_len(name_len) : 0) + eir_precalc_len(sizeof(conn->dev_class))); ev = skb_put(skb, sizeof(*ev)); bacpy(&ev->addr.bdaddr, &conn->dst); ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type); if (conn->out) flags |= MGMT_DEV_FOUND_INITIATED_CONN; ev->flags = __cpu_to_le32(flags); /* We must ensure that the EIR Data fields are ordered and * unique. Keep it simple for now and avoid the problem by not * adding any BR/EDR data to the LE adv. */ if (conn->le_adv_data_len > 0) { skb_put_data(skb, conn->le_adv_data, conn->le_adv_data_len); eir_len = conn->le_adv_data_len; } else { if (name) eir_len += eir_skb_put_data(skb, EIR_NAME_COMPLETE, name, name_len); if (memcmp(conn->dev_class, "\0\0\0", sizeof(conn->dev_class))) eir_len += eir_skb_put_data(skb, EIR_CLASS_OF_DEV, conn->dev_class, sizeof(conn->dev_class)); } ev->eir_len = cpu_to_le16(eir_len); mgmt_event_skb(skb, NULL); } static void disconnect_rsp(struct mgmt_pending_cmd *cmd, void *data) { struct sock **sk = data; cmd->cmd_complete(cmd, 0); *sk = cmd->sk; sock_hold(*sk); mgmt_pending_remove(cmd); } static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data) { struct hci_dev *hdev = data; struct mgmt_cp_unpair_device *cp = cmd->param; device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk); cmd->cmd_complete(cmd, 0); mgmt_pending_remove(cmd); } bool mgmt_powering_down(struct hci_dev *hdev) { struct mgmt_pending_cmd *cmd; struct mgmt_mode *cp; cmd = pending_find(MGMT_OP_SET_POWERED, hdev); if (!cmd) return false; cp = cmd->param; if (!cp->val) return true; return false; } void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, u8 addr_type, u8 reason, bool mgmt_connected) { struct mgmt_ev_device_disconnected ev; struct sock *sk = NULL; /* The connection is still in hci_conn_hash so test for 1 * instead of 0 to know if this is the last one. */ if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) { cancel_delayed_work(&hdev->power_off); queue_work(hdev->req_workqueue, &hdev->power_off.work); } if (!mgmt_connected) return; if (link_type != ACL_LINK && link_type != LE_LINK) return; mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk); bacpy(&ev.addr.bdaddr, bdaddr); ev.addr.type = link_to_bdaddr(link_type, addr_type); ev.reason = reason; /* Report disconnects due to suspend */ if (hdev->suspended) ev.reason = MGMT_DEV_DISCONN_LOCAL_HOST_SUSPEND; mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk); if (sk) sock_put(sk); mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp, hdev); } void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, u8 addr_type, u8 status) { u8 bdaddr_type = link_to_bdaddr(link_type, addr_type); struct mgmt_cp_disconnect *cp; struct mgmt_pending_cmd *cmd; mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp, hdev); cmd = pending_find(MGMT_OP_DISCONNECT, hdev); if (!cmd) return; cp = cmd->param; if (bacmp(bdaddr, &cp->addr.bdaddr)) return; if (cp->addr.type != bdaddr_type) return; cmd->cmd_complete(cmd, mgmt_status(status)); mgmt_pending_remove(cmd); } void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, u8 addr_type, u8 status) { struct mgmt_ev_connect_failed ev; /* The connection is still in hci_conn_hash so test for 1 * instead of 0 to know if this is the last one. */ if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) { cancel_delayed_work(&hdev->power_off); queue_work(hdev->req_workqueue, &hdev->power_off.work); } bacpy(&ev.addr.bdaddr, bdaddr); ev.addr.type = link_to_bdaddr(link_type, addr_type); ev.status = mgmt_status(status); mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL); } void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure) { struct mgmt_ev_pin_code_request ev; bacpy(&ev.addr.bdaddr, bdaddr); ev.addr.type = BDADDR_BREDR; ev.secure = secure; mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL); } void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 status) { struct mgmt_pending_cmd *cmd; cmd = pending_find(MGMT_OP_PIN_CODE_REPLY, hdev); if (!cmd) return; cmd->cmd_complete(cmd, mgmt_status(status)); mgmt_pending_remove(cmd); } void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 status) { struct mgmt_pending_cmd *cmd; cmd = pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev); if (!cmd) return; cmd->cmd_complete(cmd, mgmt_status(status)); mgmt_pending_remove(cmd); } int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, u8 addr_type, u32 value, u8 confirm_hint) { struct mgmt_ev_user_confirm_request ev; bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr); bacpy(&ev.addr.bdaddr, bdaddr); ev.addr.type = link_to_bdaddr(link_type, addr_type); ev.confirm_hint = confirm_hint; ev.value = cpu_to_le32(value); return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev), NULL); } int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, u8 addr_type) { struct mgmt_ev_user_passkey_request ev; bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr); bacpy(&ev.addr.bdaddr, bdaddr); ev.addr.type = link_to_bdaddr(link_type, addr_type); return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev), NULL); } static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, u8 addr_type, u8 status, u8 opcode) { struct mgmt_pending_cmd *cmd; cmd = pending_find(opcode, hdev); if (!cmd) return -ENOENT; cmd->cmd_complete(cmd, mgmt_status(status)); mgmt_pending_remove(cmd); return 0; } int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, u8 addr_type, u8 status) { return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type, status, MGMT_OP_USER_CONFIRM_REPLY); } int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, u8 addr_type, u8 status) { return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type, status, MGMT_OP_USER_CONFIRM_NEG_REPLY); } int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, u8 addr_type, u8 status) { return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type, status, MGMT_OP_USER_PASSKEY_REPLY); } int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, u8 addr_type, u8 status) { return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type, status, MGMT_OP_USER_PASSKEY_NEG_REPLY); } int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, u8 addr_type, u32 passkey, u8 entered) { struct mgmt_ev_passkey_notify ev; bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr); bacpy(&ev.addr.bdaddr, bdaddr); ev.addr.type = link_to_bdaddr(link_type, addr_type); ev.passkey = __cpu_to_le32(passkey); ev.entered = entered; return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL); } void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status) { struct mgmt_ev_auth_failed ev; struct mgmt_pending_cmd *cmd; u8 status = mgmt_status(hci_status); bacpy(&ev.addr.bdaddr, &conn->dst); ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type); ev.status = status; cmd = find_pairing(conn); mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev), cmd ? cmd->sk : NULL); if (cmd) { cmd->cmd_complete(cmd, status); mgmt_pending_remove(cmd); } } void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status) { struct cmd_lookup match = { NULL, hdev }; bool changed; if (status) { u8 mgmt_err = mgmt_status(status); mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, cmd_status_rsp, &mgmt_err); return; } if (test_bit(HCI_AUTH, &hdev->flags)) changed = !hci_dev_test_and_set_flag(hdev, HCI_LINK_SECURITY); else changed = hci_dev_test_and_clear_flag(hdev, HCI_LINK_SECURITY); mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp, &match); if (changed) new_settings(hdev, match.sk); if (match.sk) sock_put(match.sk); } static void sk_lookup(struct mgmt_pending_cmd *cmd, void *data) { struct cmd_lookup *match = data; if (match->sk == NULL) { match->sk = cmd->sk; sock_hold(match->sk); } } void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class, u8 status) { struct cmd_lookup match = { NULL, hdev, mgmt_status(status) }; mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match); mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match); mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match); if (!status) { mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class, 3, HCI_MGMT_DEV_CLASS_EVENTS, NULL); ext_info_changed(hdev, NULL); } if (match.sk) sock_put(match.sk); } void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status) { struct mgmt_cp_set_local_name ev; struct mgmt_pending_cmd *cmd; if (status) return; memset(&ev, 0, sizeof(ev)); memcpy(ev.name, name, HCI_MAX_NAME_LENGTH); memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH); cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev); if (!cmd) { memcpy(hdev->dev_name, name, sizeof(hdev->dev_name)); /* If this is a HCI command related to powering on the * HCI dev don't send any mgmt signals. */ if (pending_find(MGMT_OP_SET_POWERED, hdev)) return; } mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev), HCI_MGMT_LOCAL_NAME_EVENTS, cmd ? cmd->sk : NULL); ext_info_changed(hdev, cmd ? cmd->sk : NULL); } static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16]) { int i; for (i = 0; i < uuid_count; i++) { if (!memcmp(uuid, uuids[i], 16)) return true; } return false; } static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16]) { u16 parsed = 0; while (parsed < eir_len) { u8 field_len = eir[0]; u8 uuid[16]; int i; if (field_len == 0) break; if (eir_len - parsed < field_len + 1) break; switch (eir[1]) { case EIR_UUID16_ALL: case EIR_UUID16_SOME: for (i = 0; i + 3 <= field_len; i += 2) { memcpy(uuid, bluetooth_base_uuid, 16); uuid[13] = eir[i + 3]; uuid[12] = eir[i + 2]; if (has_uuid(uuid, uuid_count, uuids)) return true; } break; case EIR_UUID32_ALL: case EIR_UUID32_SOME: for (i = 0; i + 5 <= field_len; i += 4) { memcpy(uuid, bluetooth_base_uuid, 16); uuid[15] = eir[i + 5]; uuid[14] = eir[i + 4]; uuid[13] = eir[i + 3]; uuid[12] = eir[i + 2]; if (has_uuid(uuid, uuid_count, uuids)) return true; } break; case EIR_UUID128_ALL: case EIR_UUID128_SOME: for (i = 0; i + 17 <= field_len; i += 16) { memcpy(uuid, eir + i + 2, 16); if (has_uuid(uuid, uuid_count, uuids)) return true; } break; } parsed += field_len + 1; eir += field_len + 1; } return false; } static void restart_le_scan(struct hci_dev *hdev) { /* If controller is not scanning we are done. */ if (!hci_dev_test_flag(hdev, HCI_LE_SCAN)) return; if (time_after(jiffies + DISCOV_LE_RESTART_DELAY, hdev->discovery.scan_start + hdev->discovery.scan_duration)) return; queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_restart, DISCOV_LE_RESTART_DELAY); } static bool is_filter_match(struct hci_dev *hdev, s8 rssi, u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len) { /* If a RSSI threshold has been specified, and * HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, then all results with * a RSSI smaller than the RSSI threshold will be dropped. If the quirk * is set, let it through for further processing, as we might need to * restart the scan. * * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry, * the results are also dropped. */ if (hdev->discovery.rssi != HCI_RSSI_INVALID && (rssi == HCI_RSSI_INVALID || (rssi < hdev->discovery.rssi && !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks)))) return false; if (hdev->discovery.uuid_count != 0) { /* If a list of UUIDs is provided in filter, results with no * matching UUID should be dropped. */ if (!eir_has_uuids(eir, eir_len, hdev->discovery.uuid_count, hdev->discovery.uuids) && !eir_has_uuids(scan_rsp, scan_rsp_len, hdev->discovery.uuid_count, hdev->discovery.uuids)) return false; } /* If duplicate filtering does not report RSSI changes, then restart * scanning to ensure updated result with updated RSSI values. */ if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks)) { restart_le_scan(hdev); /* Validate RSSI value against the RSSI threshold once more. */ if (hdev->discovery.rssi != HCI_RSSI_INVALID && rssi < hdev->discovery.rssi) return false; } return true; } void mgmt_adv_monitor_device_lost(struct hci_dev *hdev, u16 handle, bdaddr_t *bdaddr, u8 addr_type) { struct mgmt_ev_adv_monitor_device_lost ev; ev.monitor_handle = cpu_to_le16(handle); bacpy(&ev.addr.bdaddr, bdaddr); ev.addr.type = addr_type; mgmt_event(MGMT_EV_ADV_MONITOR_DEVICE_LOST, hdev, &ev, sizeof(ev), NULL); } static void mgmt_send_adv_monitor_device_found(struct hci_dev *hdev, struct sk_buff *skb, struct sock *skip_sk, u16 handle) { struct sk_buff *advmon_skb; size_t advmon_skb_len; __le16 *monitor_handle; if (!skb) return; advmon_skb_len = (sizeof(struct mgmt_ev_adv_monitor_device_found) - sizeof(struct mgmt_ev_device_found)) + skb->len; advmon_skb = mgmt_alloc_skb(hdev, MGMT_EV_ADV_MONITOR_DEVICE_FOUND, advmon_skb_len); if (!advmon_skb) return; /* ADV_MONITOR_DEVICE_FOUND is similar to DEVICE_FOUND event except * that it also has 'monitor_handle'. Make a copy of DEVICE_FOUND and * store monitor_handle of the matched monitor. */ monitor_handle = skb_put(advmon_skb, sizeof(*monitor_handle)); *monitor_handle = cpu_to_le16(handle); skb_put_data(advmon_skb, skb->data, skb->len); mgmt_event_skb(advmon_skb, skip_sk); } static void mgmt_adv_monitor_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, bool report_device, struct sk_buff *skb, struct sock *skip_sk) { struct monitored_device *dev, *tmp; bool matched = false; bool notified = false; /* We have received the Advertisement Report because: * 1. the kernel has initiated active discovery * 2. if not, we have pend_le_reports > 0 in which case we are doing * passive scanning * 3. if none of the above is true, we have one or more active * Advertisement Monitor * * For case 1 and 2, report all advertisements via MGMT_EV_DEVICE_FOUND * and report ONLY one advertisement per device for the matched Monitor * via MGMT_EV_ADV_MONITOR_DEVICE_FOUND event. * * For case 3, since we are not active scanning and all advertisements * received are due to a matched Advertisement Monitor, report all * advertisements ONLY via MGMT_EV_ADV_MONITOR_DEVICE_FOUND event. */ if (report_device && !hdev->advmon_pend_notify) { mgmt_event_skb(skb, skip_sk); return; } hdev->advmon_pend_notify = false; list_for_each_entry_safe(dev, tmp, &hdev->monitored_devices, list) { if (!bacmp(&dev->bdaddr, bdaddr)) { matched = true; if (!dev->notified) { mgmt_send_adv_monitor_device_found(hdev, skb, skip_sk, dev->handle); notified = true; dev->notified = true; } } if (!dev->notified) hdev->advmon_pend_notify = true; } if (!report_device && ((matched && !notified) || !msft_monitor_supported(hdev))) { /* Handle 0 indicates that we are not active scanning and this * is a subsequent advertisement report for an already matched * Advertisement Monitor or the controller offloading support * is not available. */ mgmt_send_adv_monitor_device_found(hdev, skb, skip_sk, 0); } if (report_device) mgmt_event_skb(skb, skip_sk); else kfree_skb(skb); } static void mesh_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, s8 rssi, u32 flags, u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len, u64 instant) { struct sk_buff *skb; struct mgmt_ev_mesh_device_found *ev; int i, j; if (!hdev->mesh_ad_types[0]) goto accepted; /* Scan for requested AD types */ if (eir_len > 0) { for (i = 0; i + 1 < eir_len; i += eir[i] + 1) { for (j = 0; j < sizeof(hdev->mesh_ad_types); j++) { if (!hdev->mesh_ad_types[j]) break; if (hdev->mesh_ad_types[j] == eir[i + 1]) goto accepted; } } } if (scan_rsp_len > 0) { for (i = 0; i + 1 < scan_rsp_len; i += scan_rsp[i] + 1) { for (j = 0; j < sizeof(hdev->mesh_ad_types); j++) { if (!hdev->mesh_ad_types[j]) break; if (hdev->mesh_ad_types[j] == scan_rsp[i + 1]) goto accepted; } } } return; accepted: skb = mgmt_alloc_skb(hdev, MGMT_EV_MESH_DEVICE_FOUND, sizeof(*ev) + eir_len + scan_rsp_len); if (!skb) return; ev = skb_put(skb, sizeof(*ev)); bacpy(&ev->addr.bdaddr, bdaddr); ev->addr.type = link_to_bdaddr(LE_LINK, addr_type); ev->rssi = rssi; ev->flags = cpu_to_le32(flags); ev->instant = cpu_to_le64(instant); if (eir_len > 0) /* Copy EIR or advertising data into event */ skb_put_data(skb, eir, eir_len); if (scan_rsp_len > 0) /* Append scan response data to event */ skb_put_data(skb, scan_rsp, scan_rsp_len); ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len); mgmt_event_skb(skb, NULL); } void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, u8 addr_type, u8 *dev_class, s8 rssi, u32 flags, u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len, u64 instant) { struct sk_buff *skb; struct mgmt_ev_device_found *ev; bool report_device = hci_discovery_active(hdev); if (hci_dev_test_flag(hdev, HCI_MESH) && link_type == LE_LINK) mesh_device_found(hdev, bdaddr, addr_type, rssi, flags, eir, eir_len, scan_rsp, scan_rsp_len, instant); /* Don't send events for a non-kernel initiated discovery. With * LE one exception is if we have pend_le_reports > 0 in which * case we're doing passive scanning and want these events. */ if (!hci_discovery_active(hdev)) { if (link_type == ACL_LINK) return; if (link_type == LE_LINK && !list_empty(&hdev->pend_le_reports)) report_device = true; else if (!hci_is_adv_monitoring(hdev)) return; } if (hdev->discovery.result_filtering) { /* We are using service discovery */ if (!is_filter_match(hdev, rssi, eir, eir_len, scan_rsp, scan_rsp_len)) return; } if (hdev->discovery.limited) { /* Check for limited discoverable bit */ if (dev_class) { if (!(dev_class[1] & 0x20)) return; } else { u8 *flags = eir_get_data(eir, eir_len, EIR_FLAGS, NULL); if (!flags || !(flags[0] & LE_AD_LIMITED)) return; } } /* Allocate skb. The 5 extra bytes are for the potential CoD field */ skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND, sizeof(*ev) + eir_len + scan_rsp_len + 5); if (!skb) return; ev = skb_put(skb, sizeof(*ev)); /* In case of device discovery with BR/EDR devices (pre 1.2), the * RSSI value was reported as 0 when not available. This behavior * is kept when using device discovery. This is required for full * backwards compatibility with the API. * * However when using service discovery, the value 127 will be * returned when the RSSI is not available. */ if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi && link_type == ACL_LINK) rssi = 0; bacpy(&ev->addr.bdaddr, bdaddr); ev->addr.type = link_to_bdaddr(link_type, addr_type); ev->rssi = rssi; ev->flags = cpu_to_le32(flags); if (eir_len > 0) /* Copy EIR or advertising data into event */ skb_put_data(skb, eir, eir_len); if (dev_class && !eir_get_data(eir, eir_len, EIR_CLASS_OF_DEV, NULL)) { u8 eir_cod[5]; eir_len += eir_append_data(eir_cod, 0, EIR_CLASS_OF_DEV, dev_class, 3); skb_put_data(skb, eir_cod, sizeof(eir_cod)); } if (scan_rsp_len > 0) /* Append scan response data to event */ skb_put_data(skb, scan_rsp, scan_rsp_len); ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len); mgmt_adv_monitor_device_found(hdev, bdaddr, report_device, skb, NULL); } void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, u8 addr_type, s8 rssi, u8 *name, u8 name_len) { struct sk_buff *skb; struct mgmt_ev_device_found *ev; u16 eir_len = 0; u32 flags = 0; skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND, sizeof(*ev) + (name ? eir_precalc_len(name_len) : 0)); ev = skb_put(skb, sizeof(*ev)); bacpy(&ev->addr.bdaddr, bdaddr); ev->addr.type = link_to_bdaddr(link_type, addr_type); ev->rssi = rssi; if (name) eir_len += eir_skb_put_data(skb, EIR_NAME_COMPLETE, name, name_len); else flags = MGMT_DEV_FOUND_NAME_REQUEST_FAILED; ev->eir_len = cpu_to_le16(eir_len); ev->flags = cpu_to_le32(flags); mgmt_event_skb(skb, NULL); } void mgmt_discovering(struct hci_dev *hdev, u8 discovering) { struct mgmt_ev_discovering ev; bt_dev_dbg(hdev, "discovering %u", discovering); memset(&ev, 0, sizeof(ev)); ev.type = hdev->discovery.type; ev.discovering = discovering; mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL); } void mgmt_suspending(struct hci_dev *hdev, u8 state) { struct mgmt_ev_controller_suspend ev; ev.suspend_state = state; mgmt_event(MGMT_EV_CONTROLLER_SUSPEND, hdev, &ev, sizeof(ev), NULL); } void mgmt_resuming(struct hci_dev *hdev, u8 reason, bdaddr_t *bdaddr, u8 addr_type) { struct mgmt_ev_controller_resume ev; ev.wake_reason = reason; if (bdaddr) { bacpy(&ev.addr.bdaddr, bdaddr); ev.addr.type = addr_type; } else { memset(&ev.addr, 0, sizeof(ev.addr)); } mgmt_event(MGMT_EV_CONTROLLER_RESUME, hdev, &ev, sizeof(ev), NULL); } static struct hci_mgmt_chan chan = { .channel = HCI_CHANNEL_CONTROL, .handler_count = ARRAY_SIZE(mgmt_handlers), .handlers = mgmt_handlers, .hdev_init = mgmt_init_hdev, }; int mgmt_init(void) { return hci_mgmt_chan_register(&chan); } void mgmt_exit(void) { hci_mgmt_chan_unregister(&chan); } void mgmt_cleanup(struct sock *sk) { struct mgmt_mesh_tx *mesh_tx; struct hci_dev *hdev; read_lock(&hci_dev_list_lock); list_for_each_entry(hdev, &hci_dev_list, list) { do { mesh_tx = mgmt_mesh_next(hdev, sk); if (mesh_tx) mesh_send_complete(hdev, mesh_tx, true); } while (mesh_tx); } read_unlock(&hci_dev_list_lock); }
linux-master
net/bluetooth/mgmt.c
// SPDX-License-Identifier: GPL-2.0-only /* Copyright (c) 2010,2011 Code Aurora Forum. All rights reserved. Copyright (c) 2011,2012 Intel Corp. */ #include <net/bluetooth/bluetooth.h> #include <net/bluetooth/hci_core.h> #include <net/bluetooth/l2cap.h> #include "hci_request.h" #include "a2mp.h" #include "amp.h" #define A2MP_FEAT_EXT 0x8000 /* Global AMP Manager list */ static LIST_HEAD(amp_mgr_list); static DEFINE_MUTEX(amp_mgr_list_lock); /* A2MP build & send command helper functions */ static struct a2mp_cmd *__a2mp_build(u8 code, u8 ident, u16 len, void *data) { struct a2mp_cmd *cmd; int plen; plen = sizeof(*cmd) + len; cmd = kzalloc(plen, GFP_KERNEL); if (!cmd) return NULL; cmd->code = code; cmd->ident = ident; cmd->len = cpu_to_le16(len); memcpy(cmd->data, data, len); return cmd; } static void a2mp_send(struct amp_mgr *mgr, u8 code, u8 ident, u16 len, void *data) { struct l2cap_chan *chan = mgr->a2mp_chan; struct a2mp_cmd *cmd; u16 total_len = len + sizeof(*cmd); struct kvec iv; struct msghdr msg; cmd = __a2mp_build(code, ident, len, data); if (!cmd) return; iv.iov_base = cmd; iv.iov_len = total_len; memset(&msg, 0, sizeof(msg)); iov_iter_kvec(&msg.msg_iter, ITER_SOURCE, &iv, 1, total_len); l2cap_chan_send(chan, &msg, total_len); kfree(cmd); } static u8 __next_ident(struct amp_mgr *mgr) { if (++mgr->ident == 0) mgr->ident = 1; return mgr->ident; } static struct amp_mgr *amp_mgr_lookup_by_state(u8 state) { struct amp_mgr *mgr; mutex_lock(&amp_mgr_list_lock); list_for_each_entry(mgr, &amp_mgr_list, list) { if (test_and_clear_bit(state, &mgr->state)) { amp_mgr_get(mgr); mutex_unlock(&amp_mgr_list_lock); return mgr; } } mutex_unlock(&amp_mgr_list_lock); return NULL; } /* hci_dev_list shall be locked */ static void __a2mp_add_cl(struct amp_mgr *mgr, struct a2mp_cl *cl) { struct hci_dev *hdev; int i = 1; cl[0].id = AMP_ID_BREDR; cl[0].type = AMP_TYPE_BREDR; cl[0].status = AMP_STATUS_BLUETOOTH_ONLY; list_for_each_entry(hdev, &hci_dev_list, list) { if (hdev->dev_type == HCI_AMP) { cl[i].id = hdev->id; cl[i].type = hdev->amp_type; if (test_bit(HCI_UP, &hdev->flags)) cl[i].status = hdev->amp_status; else cl[i].status = AMP_STATUS_POWERED_DOWN; i++; } } } /* Processing A2MP messages */ static int a2mp_command_rej(struct amp_mgr *mgr, struct sk_buff *skb, struct a2mp_cmd *hdr) { struct a2mp_cmd_rej *rej = (void *) skb->data; if (le16_to_cpu(hdr->len) < sizeof(*rej)) return -EINVAL; BT_DBG("ident %u reason %d", hdr->ident, le16_to_cpu(rej->reason)); skb_pull(skb, sizeof(*rej)); return 0; } static int a2mp_discover_req(struct amp_mgr *mgr, struct sk_buff *skb, struct a2mp_cmd *hdr) { struct a2mp_discov_req *req = (void *) skb->data; u16 len = le16_to_cpu(hdr->len); struct a2mp_discov_rsp *rsp; u16 ext_feat; u8 num_ctrl; struct hci_dev *hdev; if (len < sizeof(*req)) return -EINVAL; skb_pull(skb, sizeof(*req)); ext_feat = le16_to_cpu(req->ext_feat); BT_DBG("mtu %d efm 0x%4.4x", le16_to_cpu(req->mtu), ext_feat); /* check that packet is not broken for now */ while (ext_feat & A2MP_FEAT_EXT) { if (len < sizeof(ext_feat)) return -EINVAL; ext_feat = get_unaligned_le16(skb->data); BT_DBG("efm 0x%4.4x", ext_feat); len -= sizeof(ext_feat); skb_pull(skb, sizeof(ext_feat)); } read_lock(&hci_dev_list_lock); /* at minimum the BR/EDR needs to be listed */ num_ctrl = 1; list_for_each_entry(hdev, &hci_dev_list, list) { if (hdev->dev_type == HCI_AMP) num_ctrl++; } len = struct_size(rsp, cl, num_ctrl); rsp = kmalloc(len, GFP_ATOMIC); if (!rsp) { read_unlock(&hci_dev_list_lock); return -ENOMEM; } rsp->mtu = cpu_to_le16(L2CAP_A2MP_DEFAULT_MTU); rsp->ext_feat = 0; __a2mp_add_cl(mgr, rsp->cl); read_unlock(&hci_dev_list_lock); a2mp_send(mgr, A2MP_DISCOVER_RSP, hdr->ident, len, rsp); kfree(rsp); return 0; } static int a2mp_discover_rsp(struct amp_mgr *mgr, struct sk_buff *skb, struct a2mp_cmd *hdr) { struct a2mp_discov_rsp *rsp = (void *) skb->data; u16 len = le16_to_cpu(hdr->len); struct a2mp_cl *cl; u16 ext_feat; bool found = false; if (len < sizeof(*rsp)) return -EINVAL; len -= sizeof(*rsp); skb_pull(skb, sizeof(*rsp)); ext_feat = le16_to_cpu(rsp->ext_feat); BT_DBG("mtu %d efm 0x%4.4x", le16_to_cpu(rsp->mtu), ext_feat); /* check that packet is not broken for now */ while (ext_feat & A2MP_FEAT_EXT) { if (len < sizeof(ext_feat)) return -EINVAL; ext_feat = get_unaligned_le16(skb->data); BT_DBG("efm 0x%4.4x", ext_feat); len -= sizeof(ext_feat); skb_pull(skb, sizeof(ext_feat)); } cl = (void *) skb->data; while (len >= sizeof(*cl)) { BT_DBG("Remote AMP id %u type %u status %u", cl->id, cl->type, cl->status); if (cl->id != AMP_ID_BREDR && cl->type != AMP_TYPE_BREDR) { struct a2mp_info_req req; found = true; memset(&req, 0, sizeof(req)); req.id = cl->id; a2mp_send(mgr, A2MP_GETINFO_REQ, __next_ident(mgr), sizeof(req), &req); } len -= sizeof(*cl); cl = skb_pull(skb, sizeof(*cl)); } /* Fall back to L2CAP init sequence */ if (!found) { struct l2cap_conn *conn = mgr->l2cap_conn; struct l2cap_chan *chan; mutex_lock(&conn->chan_lock); list_for_each_entry(chan, &conn->chan_l, list) { BT_DBG("chan %p state %s", chan, state_to_string(chan->state)); if (chan->scid == L2CAP_CID_A2MP) continue; l2cap_chan_lock(chan); if (chan->state == BT_CONNECT) l2cap_send_conn_req(chan); l2cap_chan_unlock(chan); } mutex_unlock(&conn->chan_lock); } return 0; } static int a2mp_change_notify(struct amp_mgr *mgr, struct sk_buff *skb, struct a2mp_cmd *hdr) { struct a2mp_cl *cl = (void *) skb->data; while (skb->len >= sizeof(*cl)) { BT_DBG("Controller id %u type %u status %u", cl->id, cl->type, cl->status); cl = skb_pull(skb, sizeof(*cl)); } /* TODO send A2MP_CHANGE_RSP */ return 0; } static void read_local_amp_info_complete(struct hci_dev *hdev, u8 status, u16 opcode) { BT_DBG("%s status 0x%2.2x", hdev->name, status); a2mp_send_getinfo_rsp(hdev); } static int a2mp_getinfo_req(struct amp_mgr *mgr, struct sk_buff *skb, struct a2mp_cmd *hdr) { struct a2mp_info_req *req = (void *) skb->data; struct hci_dev *hdev; struct hci_request hreq; int err = 0; if (le16_to_cpu(hdr->len) < sizeof(*req)) return -EINVAL; BT_DBG("id %u", req->id); hdev = hci_dev_get(req->id); if (!hdev || hdev->dev_type != HCI_AMP) { struct a2mp_info_rsp rsp; memset(&rsp, 0, sizeof(rsp)); rsp.id = req->id; rsp.status = A2MP_STATUS_INVALID_CTRL_ID; a2mp_send(mgr, A2MP_GETINFO_RSP, hdr->ident, sizeof(rsp), &rsp); goto done; } set_bit(READ_LOC_AMP_INFO, &mgr->state); hci_req_init(&hreq, hdev); hci_req_add(&hreq, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL); err = hci_req_run(&hreq, read_local_amp_info_complete); if (err < 0) a2mp_send_getinfo_rsp(hdev); done: if (hdev) hci_dev_put(hdev); skb_pull(skb, sizeof(*req)); return 0; } static int a2mp_getinfo_rsp(struct amp_mgr *mgr, struct sk_buff *skb, struct a2mp_cmd *hdr) { struct a2mp_info_rsp *rsp = (struct a2mp_info_rsp *) skb->data; struct a2mp_amp_assoc_req req; struct amp_ctrl *ctrl; if (le16_to_cpu(hdr->len) < sizeof(*rsp)) return -EINVAL; BT_DBG("id %u status 0x%2.2x", rsp->id, rsp->status); if (rsp->status) return -EINVAL; ctrl = amp_ctrl_add(mgr, rsp->id); if (!ctrl) return -ENOMEM; memset(&req, 0, sizeof(req)); req.id = rsp->id; a2mp_send(mgr, A2MP_GETAMPASSOC_REQ, __next_ident(mgr), sizeof(req), &req); skb_pull(skb, sizeof(*rsp)); return 0; } static int a2mp_getampassoc_req(struct amp_mgr *mgr, struct sk_buff *skb, struct a2mp_cmd *hdr) { struct a2mp_amp_assoc_req *req = (void *) skb->data; struct hci_dev *hdev; struct amp_mgr *tmp; if (le16_to_cpu(hdr->len) < sizeof(*req)) return -EINVAL; BT_DBG("id %u", req->id); /* Make sure that other request is not processed */ tmp = amp_mgr_lookup_by_state(READ_LOC_AMP_ASSOC); hdev = hci_dev_get(req->id); if (!hdev || hdev->amp_type == AMP_TYPE_BREDR || tmp) { struct a2mp_amp_assoc_rsp rsp; memset(&rsp, 0, sizeof(rsp)); rsp.id = req->id; if (tmp) { rsp.status = A2MP_STATUS_COLLISION_OCCURED; amp_mgr_put(tmp); } else { rsp.status = A2MP_STATUS_INVALID_CTRL_ID; } a2mp_send(mgr, A2MP_GETAMPASSOC_RSP, hdr->ident, sizeof(rsp), &rsp); goto done; } amp_read_loc_assoc(hdev, mgr); done: if (hdev) hci_dev_put(hdev); skb_pull(skb, sizeof(*req)); return 0; } static int a2mp_getampassoc_rsp(struct amp_mgr *mgr, struct sk_buff *skb, struct a2mp_cmd *hdr) { struct a2mp_amp_assoc_rsp *rsp = (void *) skb->data; u16 len = le16_to_cpu(hdr->len); struct hci_dev *hdev; struct amp_ctrl *ctrl; struct hci_conn *hcon; size_t assoc_len; if (len < sizeof(*rsp)) return -EINVAL; assoc_len = len - sizeof(*rsp); BT_DBG("id %u status 0x%2.2x assoc len %zu", rsp->id, rsp->status, assoc_len); if (rsp->status) return -EINVAL; /* Save remote ASSOC data */ ctrl = amp_ctrl_lookup(mgr, rsp->id); if (ctrl) { u8 *assoc; assoc = kmemdup(rsp->amp_assoc, assoc_len, GFP_KERNEL); if (!assoc) { amp_ctrl_put(ctrl); return -ENOMEM; } ctrl->assoc = assoc; ctrl->assoc_len = assoc_len; ctrl->assoc_rem_len = assoc_len; ctrl->assoc_len_so_far = 0; amp_ctrl_put(ctrl); } /* Create Phys Link */ hdev = hci_dev_get(rsp->id); if (!hdev) return -EINVAL; hcon = phylink_add(hdev, mgr, rsp->id, true); if (!hcon) goto done; BT_DBG("Created hcon %p: loc:%u -> rem:%u", hcon, hdev->id, rsp->id); mgr->bredr_chan->remote_amp_id = rsp->id; amp_create_phylink(hdev, mgr, hcon); done: hci_dev_put(hdev); skb_pull(skb, len); return 0; } static int a2mp_createphyslink_req(struct amp_mgr *mgr, struct sk_buff *skb, struct a2mp_cmd *hdr) { struct a2mp_physlink_req *req = (void *) skb->data; struct a2mp_physlink_rsp rsp; struct hci_dev *hdev; struct hci_conn *hcon; struct amp_ctrl *ctrl; if (le16_to_cpu(hdr->len) < sizeof(*req)) return -EINVAL; BT_DBG("local_id %u, remote_id %u", req->local_id, req->remote_id); memset(&rsp, 0, sizeof(rsp)); rsp.local_id = req->remote_id; rsp.remote_id = req->local_id; hdev = hci_dev_get(req->remote_id); if (!hdev || hdev->amp_type == AMP_TYPE_BREDR) { rsp.status = A2MP_STATUS_INVALID_CTRL_ID; goto send_rsp; } ctrl = amp_ctrl_lookup(mgr, rsp.remote_id); if (!ctrl) { ctrl = amp_ctrl_add(mgr, rsp.remote_id); if (ctrl) { amp_ctrl_get(ctrl); } else { rsp.status = A2MP_STATUS_UNABLE_START_LINK_CREATION; goto send_rsp; } } if (ctrl) { size_t assoc_len = le16_to_cpu(hdr->len) - sizeof(*req); u8 *assoc; assoc = kmemdup(req->amp_assoc, assoc_len, GFP_KERNEL); if (!assoc) { amp_ctrl_put(ctrl); hci_dev_put(hdev); return -ENOMEM; } ctrl->assoc = assoc; ctrl->assoc_len = assoc_len; ctrl->assoc_rem_len = assoc_len; ctrl->assoc_len_so_far = 0; amp_ctrl_put(ctrl); } hcon = phylink_add(hdev, mgr, req->local_id, false); if (hcon) { amp_accept_phylink(hdev, mgr, hcon); rsp.status = A2MP_STATUS_SUCCESS; } else { rsp.status = A2MP_STATUS_UNABLE_START_LINK_CREATION; } send_rsp: if (hdev) hci_dev_put(hdev); /* Reply error now and success after HCI Write Remote AMP Assoc command complete with success status */ if (rsp.status != A2MP_STATUS_SUCCESS) { a2mp_send(mgr, A2MP_CREATEPHYSLINK_RSP, hdr->ident, sizeof(rsp), &rsp); } else { set_bit(WRITE_REMOTE_AMP_ASSOC, &mgr->state); mgr->ident = hdr->ident; } skb_pull(skb, le16_to_cpu(hdr->len)); return 0; } static int a2mp_discphyslink_req(struct amp_mgr *mgr, struct sk_buff *skb, struct a2mp_cmd *hdr) { struct a2mp_physlink_req *req = (void *) skb->data; struct a2mp_physlink_rsp rsp; struct hci_dev *hdev; struct hci_conn *hcon; if (le16_to_cpu(hdr->len) < sizeof(*req)) return -EINVAL; BT_DBG("local_id %u remote_id %u", req->local_id, req->remote_id); memset(&rsp, 0, sizeof(rsp)); rsp.local_id = req->remote_id; rsp.remote_id = req->local_id; rsp.status = A2MP_STATUS_SUCCESS; hdev = hci_dev_get(req->remote_id); if (!hdev) { rsp.status = A2MP_STATUS_INVALID_CTRL_ID; goto send_rsp; } hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK, &mgr->l2cap_conn->hcon->dst); if (!hcon) { bt_dev_err(hdev, "no phys link exist"); rsp.status = A2MP_STATUS_NO_PHYSICAL_LINK_EXISTS; goto clean; } /* TODO Disconnect Phys Link here */ clean: hci_dev_put(hdev); send_rsp: a2mp_send(mgr, A2MP_DISCONNPHYSLINK_RSP, hdr->ident, sizeof(rsp), &rsp); skb_pull(skb, sizeof(*req)); return 0; } static inline int a2mp_cmd_rsp(struct amp_mgr *mgr, struct sk_buff *skb, struct a2mp_cmd *hdr) { BT_DBG("ident %u code 0x%2.2x", hdr->ident, hdr->code); skb_pull(skb, le16_to_cpu(hdr->len)); return 0; } /* Handle A2MP signalling */ static int a2mp_chan_recv_cb(struct l2cap_chan *chan, struct sk_buff *skb) { struct a2mp_cmd *hdr; struct amp_mgr *mgr = chan->data; int err = 0; amp_mgr_get(mgr); while (skb->len >= sizeof(*hdr)) { u16 len; hdr = (void *) skb->data; len = le16_to_cpu(hdr->len); BT_DBG("code 0x%2.2x id %u len %u", hdr->code, hdr->ident, len); skb_pull(skb, sizeof(*hdr)); if (len > skb->len || !hdr->ident) { err = -EINVAL; break; } mgr->ident = hdr->ident; switch (hdr->code) { case A2MP_COMMAND_REJ: a2mp_command_rej(mgr, skb, hdr); break; case A2MP_DISCOVER_REQ: err = a2mp_discover_req(mgr, skb, hdr); break; case A2MP_CHANGE_NOTIFY: err = a2mp_change_notify(mgr, skb, hdr); break; case A2MP_GETINFO_REQ: err = a2mp_getinfo_req(mgr, skb, hdr); break; case A2MP_GETAMPASSOC_REQ: err = a2mp_getampassoc_req(mgr, skb, hdr); break; case A2MP_CREATEPHYSLINK_REQ: err = a2mp_createphyslink_req(mgr, skb, hdr); break; case A2MP_DISCONNPHYSLINK_REQ: err = a2mp_discphyslink_req(mgr, skb, hdr); break; case A2MP_DISCOVER_RSP: err = a2mp_discover_rsp(mgr, skb, hdr); break; case A2MP_GETINFO_RSP: err = a2mp_getinfo_rsp(mgr, skb, hdr); break; case A2MP_GETAMPASSOC_RSP: err = a2mp_getampassoc_rsp(mgr, skb, hdr); break; case A2MP_CHANGE_RSP: case A2MP_CREATEPHYSLINK_RSP: case A2MP_DISCONNPHYSLINK_RSP: err = a2mp_cmd_rsp(mgr, skb, hdr); break; default: BT_ERR("Unknown A2MP sig cmd 0x%2.2x", hdr->code); err = -EINVAL; break; } } if (err) { struct a2mp_cmd_rej rej; memset(&rej, 0, sizeof(rej)); rej.reason = cpu_to_le16(0); hdr = (void *) skb->data; BT_DBG("Send A2MP Rej: cmd 0x%2.2x err %d", hdr->code, err); a2mp_send(mgr, A2MP_COMMAND_REJ, hdr->ident, sizeof(rej), &rej); } /* Always free skb and return success error code to prevent from sending L2CAP Disconnect over A2MP channel */ kfree_skb(skb); amp_mgr_put(mgr); return 0; } static void a2mp_chan_close_cb(struct l2cap_chan *chan) { l2cap_chan_put(chan); } static void a2mp_chan_state_change_cb(struct l2cap_chan *chan, int state, int err) { struct amp_mgr *mgr = chan->data; if (!mgr) return; BT_DBG("chan %p state %s", chan, state_to_string(state)); chan->state = state; switch (state) { case BT_CLOSED: if (mgr) amp_mgr_put(mgr); break; } } static struct sk_buff *a2mp_chan_alloc_skb_cb(struct l2cap_chan *chan, unsigned long hdr_len, unsigned long len, int nb) { struct sk_buff *skb; skb = bt_skb_alloc(hdr_len + len, GFP_KERNEL); if (!skb) return ERR_PTR(-ENOMEM); return skb; } static const struct l2cap_ops a2mp_chan_ops = { .name = "L2CAP A2MP channel", .recv = a2mp_chan_recv_cb, .close = a2mp_chan_close_cb, .state_change = a2mp_chan_state_change_cb, .alloc_skb = a2mp_chan_alloc_skb_cb, /* Not implemented for A2MP */ .new_connection = l2cap_chan_no_new_connection, .teardown = l2cap_chan_no_teardown, .ready = l2cap_chan_no_ready, .defer = l2cap_chan_no_defer, .resume = l2cap_chan_no_resume, .set_shutdown = l2cap_chan_no_set_shutdown, .get_sndtimeo = l2cap_chan_no_get_sndtimeo, }; static struct l2cap_chan *a2mp_chan_open(struct l2cap_conn *conn, bool locked) { struct l2cap_chan *chan; int err; chan = l2cap_chan_create(); if (!chan) return NULL; BT_DBG("chan %p", chan); chan->chan_type = L2CAP_CHAN_FIXED; chan->scid = L2CAP_CID_A2MP; chan->dcid = L2CAP_CID_A2MP; chan->omtu = L2CAP_A2MP_DEFAULT_MTU; chan->imtu = L2CAP_A2MP_DEFAULT_MTU; chan->flush_to = L2CAP_DEFAULT_FLUSH_TO; chan->ops = &a2mp_chan_ops; l2cap_chan_set_defaults(chan); chan->remote_max_tx = chan->max_tx; chan->remote_tx_win = chan->tx_win; chan->retrans_timeout = L2CAP_DEFAULT_RETRANS_TO; chan->monitor_timeout = L2CAP_DEFAULT_MONITOR_TO; skb_queue_head_init(&chan->tx_q); chan->mode = L2CAP_MODE_ERTM; err = l2cap_ertm_init(chan); if (err < 0) { l2cap_chan_del(chan, 0); return NULL; } chan->conf_state = 0; if (locked) __l2cap_chan_add(conn, chan); else l2cap_chan_add(conn, chan); chan->remote_mps = chan->omtu; chan->mps = chan->omtu; chan->state = BT_CONNECTED; return chan; } /* AMP Manager functions */ struct amp_mgr *amp_mgr_get(struct amp_mgr *mgr) { BT_DBG("mgr %p orig refcnt %d", mgr, kref_read(&mgr->kref)); kref_get(&mgr->kref); return mgr; } static void amp_mgr_destroy(struct kref *kref) { struct amp_mgr *mgr = container_of(kref, struct amp_mgr, kref); BT_DBG("mgr %p", mgr); mutex_lock(&amp_mgr_list_lock); list_del(&mgr->list); mutex_unlock(&amp_mgr_list_lock); amp_ctrl_list_flush(mgr); kfree(mgr); } int amp_mgr_put(struct amp_mgr *mgr) { BT_DBG("mgr %p orig refcnt %d", mgr, kref_read(&mgr->kref)); return kref_put(&mgr->kref, &amp_mgr_destroy); } static struct amp_mgr *amp_mgr_create(struct l2cap_conn *conn, bool locked) { struct amp_mgr *mgr; struct l2cap_chan *chan; mgr = kzalloc(sizeof(*mgr), GFP_KERNEL); if (!mgr) return NULL; BT_DBG("conn %p mgr %p", conn, mgr); mgr->l2cap_conn = conn; chan = a2mp_chan_open(conn, locked); if (!chan) { kfree(mgr); return NULL; } mgr->a2mp_chan = chan; chan->data = mgr; conn->hcon->amp_mgr = mgr; kref_init(&mgr->kref); /* Remote AMP ctrl list initialization */ INIT_LIST_HEAD(&mgr->amp_ctrls); mutex_init(&mgr->amp_ctrls_lock); mutex_lock(&amp_mgr_list_lock); list_add(&mgr->list, &amp_mgr_list); mutex_unlock(&amp_mgr_list_lock); return mgr; } struct l2cap_chan *a2mp_channel_create(struct l2cap_conn *conn, struct sk_buff *skb) { struct amp_mgr *mgr; if (conn->hcon->type != ACL_LINK) return NULL; mgr = amp_mgr_create(conn, false); if (!mgr) { BT_ERR("Could not create AMP manager"); return NULL; } BT_DBG("mgr: %p chan %p", mgr, mgr->a2mp_chan); return mgr->a2mp_chan; } void a2mp_send_getinfo_rsp(struct hci_dev *hdev) { struct amp_mgr *mgr; struct a2mp_info_rsp rsp; mgr = amp_mgr_lookup_by_state(READ_LOC_AMP_INFO); if (!mgr) return; BT_DBG("%s mgr %p", hdev->name, mgr); memset(&rsp, 0, sizeof(rsp)); rsp.id = hdev->id; rsp.status = A2MP_STATUS_INVALID_CTRL_ID; if (hdev->amp_type != AMP_TYPE_BREDR) { rsp.status = 0; rsp.total_bw = cpu_to_le32(hdev->amp_total_bw); rsp.max_bw = cpu_to_le32(hdev->amp_max_bw); rsp.min_latency = cpu_to_le32(hdev->amp_min_latency); rsp.pal_cap = cpu_to_le16(hdev->amp_pal_cap); rsp.assoc_size = cpu_to_le16(hdev->amp_assoc_size); } a2mp_send(mgr, A2MP_GETINFO_RSP, mgr->ident, sizeof(rsp), &rsp); amp_mgr_put(mgr); } void a2mp_send_getampassoc_rsp(struct hci_dev *hdev, u8 status) { struct amp_mgr *mgr; struct amp_assoc *loc_assoc = &hdev->loc_assoc; struct a2mp_amp_assoc_rsp *rsp; size_t len; mgr = amp_mgr_lookup_by_state(READ_LOC_AMP_ASSOC); if (!mgr) return; BT_DBG("%s mgr %p", hdev->name, mgr); len = sizeof(struct a2mp_amp_assoc_rsp) + loc_assoc->len; rsp = kzalloc(len, GFP_KERNEL); if (!rsp) { amp_mgr_put(mgr); return; } rsp->id = hdev->id; if (status) { rsp->status = A2MP_STATUS_INVALID_CTRL_ID; } else { rsp->status = A2MP_STATUS_SUCCESS; memcpy(rsp->amp_assoc, loc_assoc->data, loc_assoc->len); } a2mp_send(mgr, A2MP_GETAMPASSOC_RSP, mgr->ident, len, rsp); amp_mgr_put(mgr); kfree(rsp); } void a2mp_send_create_phy_link_req(struct hci_dev *hdev, u8 status) { struct amp_mgr *mgr; struct amp_assoc *loc_assoc = &hdev->loc_assoc; struct a2mp_physlink_req *req; struct l2cap_chan *bredr_chan; size_t len; mgr = amp_mgr_lookup_by_state(READ_LOC_AMP_ASSOC_FINAL); if (!mgr) return; len = sizeof(*req) + loc_assoc->len; BT_DBG("%s mgr %p assoc_len %zu", hdev->name, mgr, len); req = kzalloc(len, GFP_KERNEL); if (!req) { amp_mgr_put(mgr); return; } bredr_chan = mgr->bredr_chan; if (!bredr_chan) goto clean; req->local_id = hdev->id; req->remote_id = bredr_chan->remote_amp_id; memcpy(req->amp_assoc, loc_assoc->data, loc_assoc->len); a2mp_send(mgr, A2MP_CREATEPHYSLINK_REQ, __next_ident(mgr), len, req); clean: amp_mgr_put(mgr); kfree(req); } void a2mp_send_create_phy_link_rsp(struct hci_dev *hdev, u8 status) { struct amp_mgr *mgr; struct a2mp_physlink_rsp rsp; struct hci_conn *hs_hcon; mgr = amp_mgr_lookup_by_state(WRITE_REMOTE_AMP_ASSOC); if (!mgr) return; memset(&rsp, 0, sizeof(rsp)); hs_hcon = hci_conn_hash_lookup_state(hdev, AMP_LINK, BT_CONNECT); if (!hs_hcon) { rsp.status = A2MP_STATUS_UNABLE_START_LINK_CREATION; } else { rsp.remote_id = hs_hcon->remote_id; rsp.status = A2MP_STATUS_SUCCESS; } BT_DBG("%s mgr %p hs_hcon %p status %u", hdev->name, mgr, hs_hcon, status); rsp.local_id = hdev->id; a2mp_send(mgr, A2MP_CREATEPHYSLINK_RSP, mgr->ident, sizeof(rsp), &rsp); amp_mgr_put(mgr); } void a2mp_discover_amp(struct l2cap_chan *chan) { struct l2cap_conn *conn = chan->conn; struct amp_mgr *mgr = conn->hcon->amp_mgr; struct a2mp_discov_req req; BT_DBG("chan %p conn %p mgr %p", chan, conn, mgr); if (!mgr) { mgr = amp_mgr_create(conn, true); if (!mgr) return; } mgr->bredr_chan = chan; memset(&req, 0, sizeof(req)); req.mtu = cpu_to_le16(L2CAP_A2MP_DEFAULT_MTU); req.ext_feat = 0; a2mp_send(mgr, A2MP_DISCOVER_REQ, 1, sizeof(req), &req); }
linux-master
net/bluetooth/a2mp.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2020 Google Corporation */ #include <net/bluetooth/bluetooth.h> #include <net/bluetooth/hci_core.h> #include <net/bluetooth/mgmt.h> #include "mgmt_util.h" #include "mgmt_config.h" #define HDEV_PARAM_U16(_param_name_) \ struct {\ struct mgmt_tlv entry; \ __le16 value; \ } __packed _param_name_ #define HDEV_PARAM_U8(_param_name_) \ struct {\ struct mgmt_tlv entry; \ __u8 value; \ } __packed _param_name_ #define TLV_SET_U16(_param_code_, _param_name_) \ { \ { cpu_to_le16(_param_code_), sizeof(__u16) }, \ cpu_to_le16(hdev->_param_name_) \ } #define TLV_SET_U8(_param_code_, _param_name_) \ { \ { cpu_to_le16(_param_code_), sizeof(__u8) }, \ hdev->_param_name_ \ } #define TLV_SET_U16_JIFFIES_TO_MSECS(_param_code_, _param_name_) \ { \ { cpu_to_le16(_param_code_), sizeof(__u16) }, \ cpu_to_le16(jiffies_to_msecs(hdev->_param_name_)) \ } int read_def_system_config(struct sock *sk, struct hci_dev *hdev, void *data, u16 data_len) { int ret; struct mgmt_rp_read_def_system_config { /* Please see mgmt-api.txt for documentation of these values */ HDEV_PARAM_U16(def_page_scan_type); HDEV_PARAM_U16(def_page_scan_int); HDEV_PARAM_U16(def_page_scan_window); HDEV_PARAM_U16(def_inq_scan_type); HDEV_PARAM_U16(def_inq_scan_int); HDEV_PARAM_U16(def_inq_scan_window); HDEV_PARAM_U16(def_br_lsto); HDEV_PARAM_U16(def_page_timeout); HDEV_PARAM_U16(sniff_min_interval); HDEV_PARAM_U16(sniff_max_interval); HDEV_PARAM_U16(le_adv_min_interval); HDEV_PARAM_U16(le_adv_max_interval); HDEV_PARAM_U16(def_multi_adv_rotation_duration); HDEV_PARAM_U16(le_scan_interval); HDEV_PARAM_U16(le_scan_window); HDEV_PARAM_U16(le_scan_int_suspend); HDEV_PARAM_U16(le_scan_window_suspend); HDEV_PARAM_U16(le_scan_int_discovery); HDEV_PARAM_U16(le_scan_window_discovery); HDEV_PARAM_U16(le_scan_int_adv_monitor); HDEV_PARAM_U16(le_scan_window_adv_monitor); HDEV_PARAM_U16(le_scan_int_connect); HDEV_PARAM_U16(le_scan_window_connect); HDEV_PARAM_U16(le_conn_min_interval); HDEV_PARAM_U16(le_conn_max_interval); HDEV_PARAM_U16(le_conn_latency); HDEV_PARAM_U16(le_supv_timeout); HDEV_PARAM_U16(def_le_autoconnect_timeout); HDEV_PARAM_U16(advmon_allowlist_duration); HDEV_PARAM_U16(advmon_no_filter_duration); HDEV_PARAM_U8(enable_advmon_interleave_scan); } __packed rp = { TLV_SET_U16(0x0000, def_page_scan_type), TLV_SET_U16(0x0001, def_page_scan_int), TLV_SET_U16(0x0002, def_page_scan_window), TLV_SET_U16(0x0003, def_inq_scan_type), TLV_SET_U16(0x0004, def_inq_scan_int), TLV_SET_U16(0x0005, def_inq_scan_window), TLV_SET_U16(0x0006, def_br_lsto), TLV_SET_U16(0x0007, def_page_timeout), TLV_SET_U16(0x0008, sniff_min_interval), TLV_SET_U16(0x0009, sniff_max_interval), TLV_SET_U16(0x000a, le_adv_min_interval), TLV_SET_U16(0x000b, le_adv_max_interval), TLV_SET_U16(0x000c, def_multi_adv_rotation_duration), TLV_SET_U16(0x000d, le_scan_interval), TLV_SET_U16(0x000e, le_scan_window), TLV_SET_U16(0x000f, le_scan_int_suspend), TLV_SET_U16(0x0010, le_scan_window_suspend), TLV_SET_U16(0x0011, le_scan_int_discovery), TLV_SET_U16(0x0012, le_scan_window_discovery), TLV_SET_U16(0x0013, le_scan_int_adv_monitor), TLV_SET_U16(0x0014, le_scan_window_adv_monitor), TLV_SET_U16(0x0015, le_scan_int_connect), TLV_SET_U16(0x0016, le_scan_window_connect), TLV_SET_U16(0x0017, le_conn_min_interval), TLV_SET_U16(0x0018, le_conn_max_interval), TLV_SET_U16(0x0019, le_conn_latency), TLV_SET_U16(0x001a, le_supv_timeout), TLV_SET_U16_JIFFIES_TO_MSECS(0x001b, def_le_autoconnect_timeout), TLV_SET_U16(0x001d, advmon_allowlist_duration), TLV_SET_U16(0x001e, advmon_no_filter_duration), TLV_SET_U8(0x001f, enable_advmon_interleave_scan), }; bt_dev_dbg(hdev, "sock %p", sk); ret = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_DEF_SYSTEM_CONFIG, 0, &rp, sizeof(rp)); return ret; } #define TO_TLV(x) ((struct mgmt_tlv *)(x)) #define TLV_GET_LE16(tlv) le16_to_cpu(*((__le16 *)(TO_TLV(tlv)->value))) #define TLV_GET_U8(tlv) (*((__u8 *)(TO_TLV(tlv)->value))) int set_def_system_config(struct sock *sk, struct hci_dev *hdev, void *data, u16 data_len) { u16 buffer_left = data_len; u8 *buffer = data; if (buffer_left < sizeof(struct mgmt_tlv)) { return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEF_SYSTEM_CONFIG, MGMT_STATUS_INVALID_PARAMS); } /* First pass to validate the tlv */ while (buffer_left >= sizeof(struct mgmt_tlv)) { const u8 len = TO_TLV(buffer)->length; size_t exp_type_len; const u16 exp_len = sizeof(struct mgmt_tlv) + len; const u16 type = le16_to_cpu(TO_TLV(buffer)->type); if (buffer_left < exp_len) { bt_dev_warn(hdev, "invalid len left %u, exp >= %u", buffer_left, exp_len); return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEF_SYSTEM_CONFIG, MGMT_STATUS_INVALID_PARAMS); } /* Please see mgmt-api.txt for documentation of these values */ switch (type) { case 0x0000: case 0x0001: case 0x0002: case 0x0003: case 0x0004: case 0x0005: case 0x0006: case 0x0007: case 0x0008: case 0x0009: case 0x000a: case 0x000b: case 0x000c: case 0x000d: case 0x000e: case 0x000f: case 0x0010: case 0x0011: case 0x0012: case 0x0013: case 0x0014: case 0x0015: case 0x0016: case 0x0017: case 0x0018: case 0x0019: case 0x001a: case 0x001b: case 0x001d: case 0x001e: exp_type_len = sizeof(u16); break; case 0x001f: exp_type_len = sizeof(u8); break; default: exp_type_len = 0; bt_dev_warn(hdev, "unsupported parameter %u", type); break; } if (exp_type_len && len != exp_type_len) { bt_dev_warn(hdev, "invalid length %d, exp %zu for type %u", len, exp_type_len, type); return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEF_SYSTEM_CONFIG, MGMT_STATUS_INVALID_PARAMS); } buffer_left -= exp_len; buffer += exp_len; } buffer_left = data_len; buffer = data; while (buffer_left >= sizeof(struct mgmt_tlv)) { const u8 len = TO_TLV(buffer)->length; const u16 exp_len = sizeof(struct mgmt_tlv) + len; const u16 type = le16_to_cpu(TO_TLV(buffer)->type); switch (type) { case 0x0000: hdev->def_page_scan_type = TLV_GET_LE16(buffer); break; case 0x0001: hdev->def_page_scan_int = TLV_GET_LE16(buffer); break; case 0x0002: hdev->def_page_scan_window = TLV_GET_LE16(buffer); break; case 0x0003: hdev->def_inq_scan_type = TLV_GET_LE16(buffer); break; case 0x0004: hdev->def_inq_scan_int = TLV_GET_LE16(buffer); break; case 0x0005: hdev->def_inq_scan_window = TLV_GET_LE16(buffer); break; case 0x0006: hdev->def_br_lsto = TLV_GET_LE16(buffer); break; case 0x0007: hdev->def_page_timeout = TLV_GET_LE16(buffer); break; case 0x0008: hdev->sniff_min_interval = TLV_GET_LE16(buffer); break; case 0x0009: hdev->sniff_max_interval = TLV_GET_LE16(buffer); break; case 0x000a: hdev->le_adv_min_interval = TLV_GET_LE16(buffer); break; case 0x000b: hdev->le_adv_max_interval = TLV_GET_LE16(buffer); break; case 0x000c: hdev->def_multi_adv_rotation_duration = TLV_GET_LE16(buffer); break; case 0x000d: hdev->le_scan_interval = TLV_GET_LE16(buffer); break; case 0x000e: hdev->le_scan_window = TLV_GET_LE16(buffer); break; case 0x000f: hdev->le_scan_int_suspend = TLV_GET_LE16(buffer); break; case 0x0010: hdev->le_scan_window_suspend = TLV_GET_LE16(buffer); break; case 0x0011: hdev->le_scan_int_discovery = TLV_GET_LE16(buffer); break; case 0x00012: hdev->le_scan_window_discovery = TLV_GET_LE16(buffer); break; case 0x00013: hdev->le_scan_int_adv_monitor = TLV_GET_LE16(buffer); break; case 0x00014: hdev->le_scan_window_adv_monitor = TLV_GET_LE16(buffer); break; case 0x00015: hdev->le_scan_int_connect = TLV_GET_LE16(buffer); break; case 0x00016: hdev->le_scan_window_connect = TLV_GET_LE16(buffer); break; case 0x00017: hdev->le_conn_min_interval = TLV_GET_LE16(buffer); break; case 0x00018: hdev->le_conn_max_interval = TLV_GET_LE16(buffer); break; case 0x00019: hdev->le_conn_latency = TLV_GET_LE16(buffer); break; case 0x0001a: hdev->le_supv_timeout = TLV_GET_LE16(buffer); break; case 0x0001b: hdev->def_le_autoconnect_timeout = msecs_to_jiffies(TLV_GET_LE16(buffer)); break; case 0x0001d: hdev->advmon_allowlist_duration = TLV_GET_LE16(buffer); break; case 0x0001e: hdev->advmon_no_filter_duration = TLV_GET_LE16(buffer); break; case 0x0001f: hdev->enable_advmon_interleave_scan = TLV_GET_U8(buffer); break; default: bt_dev_warn(hdev, "unsupported parameter %u", type); break; } buffer_left -= exp_len; buffer += exp_len; } return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEF_SYSTEM_CONFIG, 0, NULL, 0); } int read_def_runtime_config(struct sock *sk, struct hci_dev *hdev, void *data, u16 data_len) { bt_dev_dbg(hdev, "sock %p", sk); return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_DEF_RUNTIME_CONFIG, 0, NULL, 0); } int set_def_runtime_config(struct sock *sk, struct hci_dev *hdev, void *data, u16 data_len) { bt_dev_dbg(hdev, "sock %p", sk); return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEF_SYSTEM_CONFIG, MGMT_STATUS_INVALID_PARAMS); }
linux-master
net/bluetooth/mgmt_config.c
/* BlueZ - Bluetooth protocol stack for Linux Copyright (C) 2000-2001 Qualcomm Incorporated Written 2000,2001 by Maxim Krasnyansky <[email protected]> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License version 2 as published by the Free Software Foundation; THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS SOFTWARE IS DISCLAIMED. */ /* Bluetooth kernel library. */ #define pr_fmt(fmt) "Bluetooth: " fmt #include <linux/export.h> #include <net/bluetooth/bluetooth.h> void baswap(bdaddr_t *dst, const bdaddr_t *src) { const unsigned char *s = (const unsigned char *)src; unsigned char *d = (unsigned char *)dst; unsigned int i; for (i = 0; i < 6; i++) d[i] = s[5 - i]; } EXPORT_SYMBOL(baswap); /* Bluetooth error codes to Unix errno mapping */ int bt_to_errno(__u16 code) { switch (code) { case 0: return 0; case 0x01: return EBADRQC; case 0x02: return ENOTCONN; case 0x03: return EIO; case 0x04: case 0x3c: return EHOSTDOWN; case 0x05: return EACCES; case 0x06: return EBADE; case 0x07: return ENOMEM; case 0x08: return ETIMEDOUT; case 0x09: return EMLINK; case 0x0a: return EMLINK; case 0x0b: return EALREADY; case 0x0c: return EBUSY; case 0x0d: case 0x0e: case 0x0f: return ECONNREFUSED; case 0x10: return ETIMEDOUT; case 0x11: case 0x27: case 0x29: case 0x20: return EOPNOTSUPP; case 0x12: return EINVAL; case 0x13: case 0x14: case 0x15: return ECONNRESET; case 0x16: return ECONNABORTED; case 0x17: return ELOOP; case 0x18: return EACCES; case 0x1a: return EPROTONOSUPPORT; case 0x1b: return ECONNREFUSED; case 0x19: case 0x1e: case 0x23: case 0x24: case 0x25: return EPROTO; default: return ENOSYS; } } EXPORT_SYMBOL(bt_to_errno); /* Unix errno to Bluetooth error codes mapping */ __u8 bt_status(int err) { /* Don't convert if already positive value */ if (err >= 0) return err; switch (err) { case -EBADRQC: return 0x01; case -ENOTCONN: return 0x02; case -EIO: return 0x03; case -EHOSTDOWN: return 0x04; case -EACCES: return 0x05; case -EBADE: return 0x06; case -ENOMEM: return 0x07; case -ETIMEDOUT: return 0x08; case -EMLINK: return 0x09; case -EALREADY: return 0x0b; case -EBUSY: return 0x0c; case -ECONNREFUSED: return 0x0d; case -EOPNOTSUPP: return 0x11; case -EINVAL: return 0x12; case -ECONNRESET: return 0x13; case -ECONNABORTED: return 0x16; case -ELOOP: return 0x17; case -EPROTONOSUPPORT: return 0x1a; case -EPROTO: return 0x19; default: return 0x1f; } } EXPORT_SYMBOL(bt_status); void bt_info(const char *format, ...) { struct va_format vaf; va_list args; va_start(args, format); vaf.fmt = format; vaf.va = &args; pr_info("%pV", &vaf); va_end(args); } EXPORT_SYMBOL(bt_info); void bt_warn(const char *format, ...) { struct va_format vaf; va_list args; va_start(args, format); vaf.fmt = format; vaf.va = &args; pr_warn("%pV", &vaf); va_end(args); } EXPORT_SYMBOL(bt_warn); void bt_err(const char *format, ...) { struct va_format vaf; va_list args; va_start(args, format); vaf.fmt = format; vaf.va = &args; pr_err("%pV", &vaf); va_end(args); } EXPORT_SYMBOL(bt_err); #ifdef CONFIG_BT_FEATURE_DEBUG static bool debug_enable; void bt_dbg_set(bool enable) { debug_enable = enable; } bool bt_dbg_get(void) { return debug_enable; } void bt_dbg(const char *format, ...) { struct va_format vaf; va_list args; if (likely(!debug_enable)) return; va_start(args, format); vaf.fmt = format; vaf.va = &args; printk(KERN_DEBUG pr_fmt("%pV"), &vaf); va_end(args); } EXPORT_SYMBOL(bt_dbg); #endif void bt_warn_ratelimited(const char *format, ...) { struct va_format vaf; va_list args; va_start(args, format); vaf.fmt = format; vaf.va = &args; pr_warn_ratelimited("%pV", &vaf); va_end(args); } EXPORT_SYMBOL(bt_warn_ratelimited); void bt_err_ratelimited(const char *format, ...) { struct va_format vaf; va_list args; va_start(args, format); vaf.fmt = format; vaf.va = &args; pr_err_ratelimited("%pV", &vaf); va_end(args); } EXPORT_SYMBOL(bt_err_ratelimited);
linux-master
net/bluetooth/lib.c
/* * ECDH helper functions - KPP wrappings * * Copyright (C) 2017 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation; * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. * IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY * CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. * * ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, * COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS * SOFTWARE IS DISCLAIMED. */ #include "ecdh_helper.h" #include <linux/scatterlist.h> #include <crypto/ecdh.h> static inline void swap_digits(u64 *in, u64 *out, unsigned int ndigits) { int i; for (i = 0; i < ndigits; i++) out[i] = __swab64(in[ndigits - 1 - i]); } /* compute_ecdh_secret() - function assumes that the private key was * already set. * @tfm: KPP tfm handle allocated with crypto_alloc_kpp(). * @public_key: pair's ecc public key. * secret: memory where the ecdh computed shared secret will be saved. * * Return: zero on success; error code in case of error. */ int compute_ecdh_secret(struct crypto_kpp *tfm, const u8 public_key[64], u8 secret[32]) { DECLARE_CRYPTO_WAIT(result); struct kpp_request *req; u8 *tmp; struct scatterlist src, dst; int err; tmp = kmalloc(64, GFP_KERNEL); if (!tmp) return -ENOMEM; req = kpp_request_alloc(tfm, GFP_KERNEL); if (!req) { err = -ENOMEM; goto free_tmp; } swap_digits((u64 *)public_key, (u64 *)tmp, 4); /* x */ swap_digits((u64 *)&public_key[32], (u64 *)&tmp[32], 4); /* y */ sg_init_one(&src, tmp, 64); sg_init_one(&dst, secret, 32); kpp_request_set_input(req, &src, 64); kpp_request_set_output(req, &dst, 32); kpp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, crypto_req_done, &result); err = crypto_kpp_compute_shared_secret(req); err = crypto_wait_req(err, &result); if (err < 0) { pr_err("alg: ecdh: compute shared secret failed. err %d\n", err); goto free_all; } swap_digits((u64 *)secret, (u64 *)tmp, 4); memcpy(secret, tmp, 32); free_all: kpp_request_free(req); free_tmp: kfree_sensitive(tmp); return err; } /* set_ecdh_privkey() - set or generate ecc private key. * * Function generates an ecc private key in the crypto subsystem when receiving * a NULL private key or sets the received key when not NULL. * * @tfm: KPP tfm handle allocated with crypto_alloc_kpp(). * @private_key: user's ecc private key. When not NULL, the key is expected * in little endian format. * * Return: zero on success; error code in case of error. */ int set_ecdh_privkey(struct crypto_kpp *tfm, const u8 private_key[32]) { u8 *buf, *tmp = NULL; unsigned int buf_len; int err; struct ecdh p = {0}; if (private_key) { tmp = kmalloc(32, GFP_KERNEL); if (!tmp) return -ENOMEM; swap_digits((u64 *)private_key, (u64 *)tmp, 4); p.key = tmp; p.key_size = 32; } buf_len = crypto_ecdh_key_len(&p); buf = kmalloc(buf_len, GFP_KERNEL); if (!buf) { err = -ENOMEM; goto free_tmp; } err = crypto_ecdh_encode_key(buf, buf_len, &p); if (err) goto free_all; err = crypto_kpp_set_secret(tfm, buf, buf_len); /* fall through */ free_all: kfree_sensitive(buf); free_tmp: kfree_sensitive(tmp); return err; } /* generate_ecdh_public_key() - function assumes that the private key was * already set. * * @tfm: KPP tfm handle allocated with crypto_alloc_kpp(). * @public_key: memory where the computed ecc public key will be saved. * * Return: zero on success; error code in case of error. */ int generate_ecdh_public_key(struct crypto_kpp *tfm, u8 public_key[64]) { DECLARE_CRYPTO_WAIT(result); struct kpp_request *req; u8 *tmp; struct scatterlist dst; int err; tmp = kmalloc(64, GFP_KERNEL); if (!tmp) return -ENOMEM; req = kpp_request_alloc(tfm, GFP_KERNEL); if (!req) { err = -ENOMEM; goto free_tmp; } sg_init_one(&dst, tmp, 64); kpp_request_set_input(req, NULL, 0); kpp_request_set_output(req, &dst, 64); kpp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, crypto_req_done, &result); err = crypto_kpp_generate_public_key(req); err = crypto_wait_req(err, &result); if (err < 0) goto free_all; /* The public key is handed back in little endian as expected by * the Security Manager Protocol. */ swap_digits((u64 *)tmp, (u64 *)public_key, 4); /* x */ swap_digits((u64 *)&tmp[32], (u64 *)&public_key[32], 4); /* y */ free_all: kpp_request_free(req); free_tmp: kfree(tmp); return err; } /* generate_ecdh_keys() - generate ecc key pair. * * @tfm: KPP tfm handle allocated with crypto_alloc_kpp(). * @public_key: memory where the computed ecc public key will be saved. * * Return: zero on success; error code in case of error. */ int generate_ecdh_keys(struct crypto_kpp *tfm, u8 public_key[64]) { int err; err = set_ecdh_privkey(tfm, NULL); if (err) return err; return generate_ecdh_public_key(tfm, public_key); }
linux-master
net/bluetooth/ecdh_helper.c
/* BlueZ - Bluetooth protocol stack for Linux Copyright (C) 2000-2001 Qualcomm Incorporated Copyright (C) 2011 ProFUSION Embedded Systems Written 2000,2001 by Maxim Krasnyansky <[email protected]> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License version 2 as published by the Free Software Foundation; THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS SOFTWARE IS DISCLAIMED. */ /* Bluetooth HCI core. */ #include <linux/export.h> #include <linux/rfkill.h> #include <linux/debugfs.h> #include <linux/crypto.h> #include <linux/kcov.h> #include <linux/property.h> #include <linux/suspend.h> #include <linux/wait.h> #include <asm/unaligned.h> #include <net/bluetooth/bluetooth.h> #include <net/bluetooth/hci_core.h> #include <net/bluetooth/l2cap.h> #include <net/bluetooth/mgmt.h> #include "hci_request.h" #include "hci_debugfs.h" #include "smp.h" #include "leds.h" #include "msft.h" #include "aosp.h" #include "hci_codec.h" static void hci_rx_work(struct work_struct *work); static void hci_cmd_work(struct work_struct *work); static void hci_tx_work(struct work_struct *work); /* HCI device list */ LIST_HEAD(hci_dev_list); DEFINE_RWLOCK(hci_dev_list_lock); /* HCI callback list */ LIST_HEAD(hci_cb_list); DEFINE_MUTEX(hci_cb_list_lock); /* HCI ID Numbering */ static DEFINE_IDA(hci_index_ida); static int hci_scan_req(struct hci_request *req, unsigned long opt) { __u8 scan = opt; BT_DBG("%s %x", req->hdev->name, scan); /* Inquiry and Page scans */ hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan); return 0; } static int hci_auth_req(struct hci_request *req, unsigned long opt) { __u8 auth = opt; BT_DBG("%s %x", req->hdev->name, auth); /* Authentication */ hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth); return 0; } static int hci_encrypt_req(struct hci_request *req, unsigned long opt) { __u8 encrypt = opt; BT_DBG("%s %x", req->hdev->name, encrypt); /* Encryption */ hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt); return 0; } static int hci_linkpol_req(struct hci_request *req, unsigned long opt) { __le16 policy = cpu_to_le16(opt); BT_DBG("%s %x", req->hdev->name, policy); /* Default link policy */ hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy); return 0; } /* Get HCI device by index. * Device is held on return. */ struct hci_dev *hci_dev_get(int index) { struct hci_dev *hdev = NULL, *d; BT_DBG("%d", index); if (index < 0) return NULL; read_lock(&hci_dev_list_lock); list_for_each_entry(d, &hci_dev_list, list) { if (d->id == index) { hdev = hci_dev_hold(d); break; } } read_unlock(&hci_dev_list_lock); return hdev; } /* ---- Inquiry support ---- */ bool hci_discovery_active(struct hci_dev *hdev) { struct discovery_state *discov = &hdev->discovery; switch (discov->state) { case DISCOVERY_FINDING: case DISCOVERY_RESOLVING: return true; default: return false; } } void hci_discovery_set_state(struct hci_dev *hdev, int state) { int old_state = hdev->discovery.state; BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state); if (old_state == state) return; hdev->discovery.state = state; switch (state) { case DISCOVERY_STOPPED: hci_update_passive_scan(hdev); if (old_state != DISCOVERY_STARTING) mgmt_discovering(hdev, 0); break; case DISCOVERY_STARTING: break; case DISCOVERY_FINDING: mgmt_discovering(hdev, 1); break; case DISCOVERY_RESOLVING: break; case DISCOVERY_STOPPING: break; } } void hci_inquiry_cache_flush(struct hci_dev *hdev) { struct discovery_state *cache = &hdev->discovery; struct inquiry_entry *p, *n; list_for_each_entry_safe(p, n, &cache->all, all) { list_del(&p->all); kfree(p); } INIT_LIST_HEAD(&cache->unknown); INIT_LIST_HEAD(&cache->resolve); } struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr) { struct discovery_state *cache = &hdev->discovery; struct inquiry_entry *e; BT_DBG("cache %p, %pMR", cache, bdaddr); list_for_each_entry(e, &cache->all, all) { if (!bacmp(&e->data.bdaddr, bdaddr)) return e; } return NULL; } struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev, bdaddr_t *bdaddr) { struct discovery_state *cache = &hdev->discovery; struct inquiry_entry *e; BT_DBG("cache %p, %pMR", cache, bdaddr); list_for_each_entry(e, &cache->unknown, list) { if (!bacmp(&e->data.bdaddr, bdaddr)) return e; } return NULL; } struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev, bdaddr_t *bdaddr, int state) { struct discovery_state *cache = &hdev->discovery; struct inquiry_entry *e; BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state); list_for_each_entry(e, &cache->resolve, list) { if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state) return e; if (!bacmp(&e->data.bdaddr, bdaddr)) return e; } return NULL; } void hci_inquiry_cache_update_resolve(struct hci_dev *hdev, struct inquiry_entry *ie) { struct discovery_state *cache = &hdev->discovery; struct list_head *pos = &cache->resolve; struct inquiry_entry *p; list_del(&ie->list); list_for_each_entry(p, &cache->resolve, list) { if (p->name_state != NAME_PENDING && abs(p->data.rssi) >= abs(ie->data.rssi)) break; pos = &p->list; } list_add(&ie->list, pos); } u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data, bool name_known) { struct discovery_state *cache = &hdev->discovery; struct inquiry_entry *ie; u32 flags = 0; BT_DBG("cache %p, %pMR", cache, &data->bdaddr); hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR); if (!data->ssp_mode) flags |= MGMT_DEV_FOUND_LEGACY_PAIRING; ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr); if (ie) { if (!ie->data.ssp_mode) flags |= MGMT_DEV_FOUND_LEGACY_PAIRING; if (ie->name_state == NAME_NEEDED && data->rssi != ie->data.rssi) { ie->data.rssi = data->rssi; hci_inquiry_cache_update_resolve(hdev, ie); } goto update; } /* Entry not in the cache. Add new one. */ ie = kzalloc(sizeof(*ie), GFP_KERNEL); if (!ie) { flags |= MGMT_DEV_FOUND_CONFIRM_NAME; goto done; } list_add(&ie->all, &cache->all); if (name_known) { ie->name_state = NAME_KNOWN; } else { ie->name_state = NAME_NOT_KNOWN; list_add(&ie->list, &cache->unknown); } update: if (name_known && ie->name_state != NAME_KNOWN && ie->name_state != NAME_PENDING) { ie->name_state = NAME_KNOWN; list_del(&ie->list); } memcpy(&ie->data, data, sizeof(*data)); ie->timestamp = jiffies; cache->timestamp = jiffies; if (ie->name_state == NAME_NOT_KNOWN) flags |= MGMT_DEV_FOUND_CONFIRM_NAME; done: return flags; } static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf) { struct discovery_state *cache = &hdev->discovery; struct inquiry_info *info = (struct inquiry_info *) buf; struct inquiry_entry *e; int copied = 0; list_for_each_entry(e, &cache->all, all) { struct inquiry_data *data = &e->data; if (copied >= num) break; bacpy(&info->bdaddr, &data->bdaddr); info->pscan_rep_mode = data->pscan_rep_mode; info->pscan_period_mode = data->pscan_period_mode; info->pscan_mode = data->pscan_mode; memcpy(info->dev_class, data->dev_class, 3); info->clock_offset = data->clock_offset; info++; copied++; } BT_DBG("cache %p, copied %d", cache, copied); return copied; } static int hci_inq_req(struct hci_request *req, unsigned long opt) { struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt; struct hci_dev *hdev = req->hdev; struct hci_cp_inquiry cp; BT_DBG("%s", hdev->name); if (test_bit(HCI_INQUIRY, &hdev->flags)) return 0; /* Start Inquiry */ memcpy(&cp.lap, &ir->lap, 3); cp.length = ir->length; cp.num_rsp = ir->num_rsp; hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp); return 0; } int hci_inquiry(void __user *arg) { __u8 __user *ptr = arg; struct hci_inquiry_req ir; struct hci_dev *hdev; int err = 0, do_inquiry = 0, max_rsp; long timeo; __u8 *buf; if (copy_from_user(&ir, ptr, sizeof(ir))) return -EFAULT; hdev = hci_dev_get(ir.dev_id); if (!hdev) return -ENODEV; if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) { err = -EBUSY; goto done; } if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) { err = -EOPNOTSUPP; goto done; } if (hdev->dev_type != HCI_PRIMARY) { err = -EOPNOTSUPP; goto done; } if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) { err = -EOPNOTSUPP; goto done; } /* Restrict maximum inquiry length to 60 seconds */ if (ir.length > 60) { err = -EINVAL; goto done; } hci_dev_lock(hdev); if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX || inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) { hci_inquiry_cache_flush(hdev); do_inquiry = 1; } hci_dev_unlock(hdev); timeo = ir.length * msecs_to_jiffies(2000); if (do_inquiry) { err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir, timeo, NULL); if (err < 0) goto done; /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is * cleared). If it is interrupted by a signal, return -EINTR. */ if (wait_on_bit(&hdev->flags, HCI_INQUIRY, TASK_INTERRUPTIBLE)) { err = -EINTR; goto done; } } /* for unlimited number of responses we will use buffer with * 255 entries */ max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp; /* cache_dump can't sleep. Therefore we allocate temp buffer and then * copy it to the user space. */ buf = kmalloc_array(max_rsp, sizeof(struct inquiry_info), GFP_KERNEL); if (!buf) { err = -ENOMEM; goto done; } hci_dev_lock(hdev); ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf); hci_dev_unlock(hdev); BT_DBG("num_rsp %d", ir.num_rsp); if (!copy_to_user(ptr, &ir, sizeof(ir))) { ptr += sizeof(ir); if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) * ir.num_rsp)) err = -EFAULT; } else err = -EFAULT; kfree(buf); done: hci_dev_put(hdev); return err; } static int hci_dev_do_open(struct hci_dev *hdev) { int ret = 0; BT_DBG("%s %p", hdev->name, hdev); hci_req_sync_lock(hdev); ret = hci_dev_open_sync(hdev); hci_req_sync_unlock(hdev); return ret; } /* ---- HCI ioctl helpers ---- */ int hci_dev_open(__u16 dev) { struct hci_dev *hdev; int err; hdev = hci_dev_get(dev); if (!hdev) return -ENODEV; /* Devices that are marked as unconfigured can only be powered * up as user channel. Trying to bring them up as normal devices * will result into a failure. Only user channel operation is * possible. * * When this function is called for a user channel, the flag * HCI_USER_CHANNEL will be set first before attempting to * open the device. */ if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) && !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) { err = -EOPNOTSUPP; goto done; } /* We need to ensure that no other power on/off work is pending * before proceeding to call hci_dev_do_open. This is * particularly important if the setup procedure has not yet * completed. */ if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) cancel_delayed_work(&hdev->power_off); /* After this call it is guaranteed that the setup procedure * has finished. This means that error conditions like RFKILL * or no valid public or static random address apply. */ flush_workqueue(hdev->req_workqueue); /* For controllers not using the management interface and that * are brought up using legacy ioctl, set the HCI_BONDABLE bit * so that pairing works for them. Once the management interface * is in use this bit will be cleared again and userspace has * to explicitly enable it. */ if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) && !hci_dev_test_flag(hdev, HCI_MGMT)) hci_dev_set_flag(hdev, HCI_BONDABLE); err = hci_dev_do_open(hdev); done: hci_dev_put(hdev); return err; } int hci_dev_do_close(struct hci_dev *hdev) { int err; BT_DBG("%s %p", hdev->name, hdev); hci_req_sync_lock(hdev); err = hci_dev_close_sync(hdev); hci_req_sync_unlock(hdev); return err; } int hci_dev_close(__u16 dev) { struct hci_dev *hdev; int err; hdev = hci_dev_get(dev); if (!hdev) return -ENODEV; if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) { err = -EBUSY; goto done; } cancel_work_sync(&hdev->power_on); if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) cancel_delayed_work(&hdev->power_off); err = hci_dev_do_close(hdev); done: hci_dev_put(hdev); return err; } static int hci_dev_do_reset(struct hci_dev *hdev) { int ret; BT_DBG("%s %p", hdev->name, hdev); hci_req_sync_lock(hdev); /* Drop queues */ skb_queue_purge(&hdev->rx_q); skb_queue_purge(&hdev->cmd_q); /* Cancel these to avoid queueing non-chained pending work */ hci_dev_set_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE); /* Wait for * * if (!hci_dev_test_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE)) * queue_delayed_work(&hdev->{cmd,ncmd}_timer) * * inside RCU section to see the flag or complete scheduling. */ synchronize_rcu(); /* Explicitly cancel works in case scheduled after setting the flag. */ cancel_delayed_work(&hdev->cmd_timer); cancel_delayed_work(&hdev->ncmd_timer); /* Avoid potential lockdep warnings from the *_flush() calls by * ensuring the workqueue is empty up front. */ drain_workqueue(hdev->workqueue); hci_dev_lock(hdev); hci_inquiry_cache_flush(hdev); hci_conn_hash_flush(hdev); hci_dev_unlock(hdev); if (hdev->flush) hdev->flush(hdev); hci_dev_clear_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE); atomic_set(&hdev->cmd_cnt, 1); hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0; hdev->iso_cnt = 0; ret = hci_reset_sync(hdev); hci_req_sync_unlock(hdev); return ret; } int hci_dev_reset(__u16 dev) { struct hci_dev *hdev; int err; hdev = hci_dev_get(dev); if (!hdev) return -ENODEV; if (!test_bit(HCI_UP, &hdev->flags)) { err = -ENETDOWN; goto done; } if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) { err = -EBUSY; goto done; } if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) { err = -EOPNOTSUPP; goto done; } err = hci_dev_do_reset(hdev); done: hci_dev_put(hdev); return err; } int hci_dev_reset_stat(__u16 dev) { struct hci_dev *hdev; int ret = 0; hdev = hci_dev_get(dev); if (!hdev) return -ENODEV; if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) { ret = -EBUSY; goto done; } if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) { ret = -EOPNOTSUPP; goto done; } memset(&hdev->stat, 0, sizeof(struct hci_dev_stats)); done: hci_dev_put(hdev); return ret; } static void hci_update_passive_scan_state(struct hci_dev *hdev, u8 scan) { bool conn_changed, discov_changed; BT_DBG("%s scan 0x%02x", hdev->name, scan); if ((scan & SCAN_PAGE)) conn_changed = !hci_dev_test_and_set_flag(hdev, HCI_CONNECTABLE); else conn_changed = hci_dev_test_and_clear_flag(hdev, HCI_CONNECTABLE); if ((scan & SCAN_INQUIRY)) { discov_changed = !hci_dev_test_and_set_flag(hdev, HCI_DISCOVERABLE); } else { hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE); discov_changed = hci_dev_test_and_clear_flag(hdev, HCI_DISCOVERABLE); } if (!hci_dev_test_flag(hdev, HCI_MGMT)) return; if (conn_changed || discov_changed) { /* In case this was disabled through mgmt */ hci_dev_set_flag(hdev, HCI_BREDR_ENABLED); if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) hci_update_adv_data(hdev, hdev->cur_adv_instance); mgmt_new_settings(hdev); } } int hci_dev_cmd(unsigned int cmd, void __user *arg) { struct hci_dev *hdev; struct hci_dev_req dr; int err = 0; if (copy_from_user(&dr, arg, sizeof(dr))) return -EFAULT; hdev = hci_dev_get(dr.dev_id); if (!hdev) return -ENODEV; if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) { err = -EBUSY; goto done; } if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) { err = -EOPNOTSUPP; goto done; } if (hdev->dev_type != HCI_PRIMARY) { err = -EOPNOTSUPP; goto done; } if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) { err = -EOPNOTSUPP; goto done; } switch (cmd) { case HCISETAUTH: err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt, HCI_INIT_TIMEOUT, NULL); break; case HCISETENCRYPT: if (!lmp_encrypt_capable(hdev)) { err = -EOPNOTSUPP; break; } if (!test_bit(HCI_AUTH, &hdev->flags)) { /* Auth must be enabled first */ err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt, HCI_INIT_TIMEOUT, NULL); if (err) break; } err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt, HCI_INIT_TIMEOUT, NULL); break; case HCISETSCAN: err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt, HCI_INIT_TIMEOUT, NULL); /* Ensure that the connectable and discoverable states * get correctly modified as this was a non-mgmt change. */ if (!err) hci_update_passive_scan_state(hdev, dr.dev_opt); break; case HCISETLINKPOL: err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt, HCI_INIT_TIMEOUT, NULL); break; case HCISETLINKMODE: hdev->link_mode = ((__u16) dr.dev_opt) & (HCI_LM_MASTER | HCI_LM_ACCEPT); break; case HCISETPTYPE: if (hdev->pkt_type == (__u16) dr.dev_opt) break; hdev->pkt_type = (__u16) dr.dev_opt; mgmt_phy_configuration_changed(hdev, NULL); break; case HCISETACLMTU: hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1); hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0); break; case HCISETSCOMTU: hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1); hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0); break; default: err = -EINVAL; break; } done: hci_dev_put(hdev); return err; } int hci_get_dev_list(void __user *arg) { struct hci_dev *hdev; struct hci_dev_list_req *dl; struct hci_dev_req *dr; int n = 0, size, err; __u16 dev_num; if (get_user(dev_num, (__u16 __user *) arg)) return -EFAULT; if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr)) return -EINVAL; size = sizeof(*dl) + dev_num * sizeof(*dr); dl = kzalloc(size, GFP_KERNEL); if (!dl) return -ENOMEM; dr = dl->dev_req; read_lock(&hci_dev_list_lock); list_for_each_entry(hdev, &hci_dev_list, list) { unsigned long flags = hdev->flags; /* When the auto-off is configured it means the transport * is running, but in that case still indicate that the * device is actually down. */ if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) flags &= ~BIT(HCI_UP); (dr + n)->dev_id = hdev->id; (dr + n)->dev_opt = flags; if (++n >= dev_num) break; } read_unlock(&hci_dev_list_lock); dl->dev_num = n; size = sizeof(*dl) + n * sizeof(*dr); err = copy_to_user(arg, dl, size); kfree(dl); return err ? -EFAULT : 0; } int hci_get_dev_info(void __user *arg) { struct hci_dev *hdev; struct hci_dev_info di; unsigned long flags; int err = 0; if (copy_from_user(&di, arg, sizeof(di))) return -EFAULT; hdev = hci_dev_get(di.dev_id); if (!hdev) return -ENODEV; /* When the auto-off is configured it means the transport * is running, but in that case still indicate that the * device is actually down. */ if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) flags = hdev->flags & ~BIT(HCI_UP); else flags = hdev->flags; strcpy(di.name, hdev->name); di.bdaddr = hdev->bdaddr; di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4); di.flags = flags; di.pkt_type = hdev->pkt_type; if (lmp_bredr_capable(hdev)) { di.acl_mtu = hdev->acl_mtu; di.acl_pkts = hdev->acl_pkts; di.sco_mtu = hdev->sco_mtu; di.sco_pkts = hdev->sco_pkts; } else { di.acl_mtu = hdev->le_mtu; di.acl_pkts = hdev->le_pkts; di.sco_mtu = 0; di.sco_pkts = 0; } di.link_policy = hdev->link_policy; di.link_mode = hdev->link_mode; memcpy(&di.stat, &hdev->stat, sizeof(di.stat)); memcpy(&di.features, &hdev->features, sizeof(di.features)); if (copy_to_user(arg, &di, sizeof(di))) err = -EFAULT; hci_dev_put(hdev); return err; } /* ---- Interface to HCI drivers ---- */ static int hci_rfkill_set_block(void *data, bool blocked) { struct hci_dev *hdev = data; BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked); if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) return -EBUSY; if (blocked) { hci_dev_set_flag(hdev, HCI_RFKILLED); if (!hci_dev_test_flag(hdev, HCI_SETUP) && !hci_dev_test_flag(hdev, HCI_CONFIG)) hci_dev_do_close(hdev); } else { hci_dev_clear_flag(hdev, HCI_RFKILLED); } return 0; } static const struct rfkill_ops hci_rfkill_ops = { .set_block = hci_rfkill_set_block, }; static void hci_power_on(struct work_struct *work) { struct hci_dev *hdev = container_of(work, struct hci_dev, power_on); int err; BT_DBG("%s", hdev->name); if (test_bit(HCI_UP, &hdev->flags) && hci_dev_test_flag(hdev, HCI_MGMT) && hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) { cancel_delayed_work(&hdev->power_off); err = hci_powered_update_sync(hdev); mgmt_power_on(hdev, err); return; } err = hci_dev_do_open(hdev); if (err < 0) { hci_dev_lock(hdev); mgmt_set_powered_failed(hdev, err); hci_dev_unlock(hdev); return; } /* During the HCI setup phase, a few error conditions are * ignored and they need to be checked now. If they are still * valid, it is important to turn the device back off. */ if (hci_dev_test_flag(hdev, HCI_RFKILLED) || hci_dev_test_flag(hdev, HCI_UNCONFIGURED) || (hdev->dev_type == HCI_PRIMARY && !bacmp(&hdev->bdaddr, BDADDR_ANY) && !bacmp(&hdev->static_addr, BDADDR_ANY))) { hci_dev_clear_flag(hdev, HCI_AUTO_OFF); hci_dev_do_close(hdev); } else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) { queue_delayed_work(hdev->req_workqueue, &hdev->power_off, HCI_AUTO_OFF_TIMEOUT); } if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) { /* For unconfigured devices, set the HCI_RAW flag * so that userspace can easily identify them. */ if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) set_bit(HCI_RAW, &hdev->flags); /* For fully configured devices, this will send * the Index Added event. For unconfigured devices, * it will send Unconfigued Index Added event. * * Devices with HCI_QUIRK_RAW_DEVICE are ignored * and no event will be send. */ mgmt_index_added(hdev); } else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) { /* When the controller is now configured, then it * is important to clear the HCI_RAW flag. */ if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) clear_bit(HCI_RAW, &hdev->flags); /* Powering on the controller with HCI_CONFIG set only * happens with the transition from unconfigured to * configured. This will send the Index Added event. */ mgmt_index_added(hdev); } } static void hci_power_off(struct work_struct *work) { struct hci_dev *hdev = container_of(work, struct hci_dev, power_off.work); BT_DBG("%s", hdev->name); hci_dev_do_close(hdev); } static void hci_error_reset(struct work_struct *work) { struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset); BT_DBG("%s", hdev->name); if (hdev->hw_error) hdev->hw_error(hdev, hdev->hw_error_code); else bt_dev_err(hdev, "hardware error 0x%2.2x", hdev->hw_error_code); if (hci_dev_do_close(hdev)) return; hci_dev_do_open(hdev); } void hci_uuids_clear(struct hci_dev *hdev) { struct bt_uuid *uuid, *tmp; list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) { list_del(&uuid->list); kfree(uuid); } } void hci_link_keys_clear(struct hci_dev *hdev) { struct link_key *key, *tmp; list_for_each_entry_safe(key, tmp, &hdev->link_keys, list) { list_del_rcu(&key->list); kfree_rcu(key, rcu); } } void hci_smp_ltks_clear(struct hci_dev *hdev) { struct smp_ltk *k, *tmp; list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) { list_del_rcu(&k->list); kfree_rcu(k, rcu); } } void hci_smp_irks_clear(struct hci_dev *hdev) { struct smp_irk *k, *tmp; list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) { list_del_rcu(&k->list); kfree_rcu(k, rcu); } } void hci_blocked_keys_clear(struct hci_dev *hdev) { struct blocked_key *b, *tmp; list_for_each_entry_safe(b, tmp, &hdev->blocked_keys, list) { list_del_rcu(&b->list); kfree_rcu(b, rcu); } } bool hci_is_blocked_key(struct hci_dev *hdev, u8 type, u8 val[16]) { bool blocked = false; struct blocked_key *b; rcu_read_lock(); list_for_each_entry_rcu(b, &hdev->blocked_keys, list) { if (b->type == type && !memcmp(b->val, val, sizeof(b->val))) { blocked = true; break; } } rcu_read_unlock(); return blocked; } struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr) { struct link_key *k; rcu_read_lock(); list_for_each_entry_rcu(k, &hdev->link_keys, list) { if (bacmp(bdaddr, &k->bdaddr) == 0) { rcu_read_unlock(); if (hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_LINKKEY, k->val)) { bt_dev_warn_ratelimited(hdev, "Link key blocked for %pMR", &k->bdaddr); return NULL; } return k; } } rcu_read_unlock(); return NULL; } static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn, u8 key_type, u8 old_key_type) { /* Legacy key */ if (key_type < 0x03) return true; /* Debug keys are insecure so don't store them persistently */ if (key_type == HCI_LK_DEBUG_COMBINATION) return false; /* Changed combination key and there's no previous one */ if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff) return false; /* Security mode 3 case */ if (!conn) return true; /* BR/EDR key derived using SC from an LE link */ if (conn->type == LE_LINK) return true; /* Neither local nor remote side had no-bonding as requirement */ if (conn->auth_type > 0x01 && conn->remote_auth > 0x01) return true; /* Local side had dedicated bonding as requirement */ if (conn->auth_type == 0x02 || conn->auth_type == 0x03) return true; /* Remote side had dedicated bonding as requirement */ if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03) return true; /* If none of the above criteria match, then don't store the key * persistently */ return false; } static u8 ltk_role(u8 type) { if (type == SMP_LTK) return HCI_ROLE_MASTER; return HCI_ROLE_SLAVE; } struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 role) { struct smp_ltk *k; rcu_read_lock(); list_for_each_entry_rcu(k, &hdev->long_term_keys, list) { if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr)) continue; if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) { rcu_read_unlock(); if (hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_LTK, k->val)) { bt_dev_warn_ratelimited(hdev, "LTK blocked for %pMR", &k->bdaddr); return NULL; } return k; } } rcu_read_unlock(); return NULL; } struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa) { struct smp_irk *irk_to_return = NULL; struct smp_irk *irk; rcu_read_lock(); list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) { if (!bacmp(&irk->rpa, rpa)) { irk_to_return = irk; goto done; } } list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) { if (smp_irk_matches(hdev, irk->val, rpa)) { bacpy(&irk->rpa, rpa); irk_to_return = irk; goto done; } } done: if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK, irk_to_return->val)) { bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR", &irk_to_return->bdaddr); irk_to_return = NULL; } rcu_read_unlock(); return irk_to_return; } struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type) { struct smp_irk *irk_to_return = NULL; struct smp_irk *irk; /* Identity Address must be public or static random */ if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0) return NULL; rcu_read_lock(); list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) { if (addr_type == irk->addr_type && bacmp(bdaddr, &irk->bdaddr) == 0) { irk_to_return = irk; goto done; } } done: if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK, irk_to_return->val)) { bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR", &irk_to_return->bdaddr); irk_to_return = NULL; } rcu_read_unlock(); return irk_to_return; } struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len, bool *persistent) { struct link_key *key, *old_key; u8 old_key_type; old_key = hci_find_link_key(hdev, bdaddr); if (old_key) { old_key_type = old_key->type; key = old_key; } else { old_key_type = conn ? conn->key_type : 0xff; key = kzalloc(sizeof(*key), GFP_KERNEL); if (!key) return NULL; list_add_rcu(&key->list, &hdev->link_keys); } BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type); /* Some buggy controller combinations generate a changed * combination key for legacy pairing even when there's no * previous key */ if (type == HCI_LK_CHANGED_COMBINATION && (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) { type = HCI_LK_COMBINATION; if (conn) conn->key_type = type; } bacpy(&key->bdaddr, bdaddr); memcpy(key->val, val, HCI_LINK_KEY_SIZE); key->pin_len = pin_len; if (type == HCI_LK_CHANGED_COMBINATION) key->type = old_key_type; else key->type = type; if (persistent) *persistent = hci_persistent_key(hdev, conn, type, old_key_type); return key; } struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type, u8 authenticated, u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand) { struct smp_ltk *key, *old_key; u8 role = ltk_role(type); old_key = hci_find_ltk(hdev, bdaddr, addr_type, role); if (old_key) key = old_key; else { key = kzalloc(sizeof(*key), GFP_KERNEL); if (!key) return NULL; list_add_rcu(&key->list, &hdev->long_term_keys); } bacpy(&key->bdaddr, bdaddr); key->bdaddr_type = addr_type; memcpy(key->val, tk, sizeof(key->val)); key->authenticated = authenticated; key->ediv = ediv; key->rand = rand; key->enc_size = enc_size; key->type = type; return key; } struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 val[16], bdaddr_t *rpa) { struct smp_irk *irk; irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type); if (!irk) { irk = kzalloc(sizeof(*irk), GFP_KERNEL); if (!irk) return NULL; bacpy(&irk->bdaddr, bdaddr); irk->addr_type = addr_type; list_add_rcu(&irk->list, &hdev->identity_resolving_keys); } memcpy(irk->val, val, 16); bacpy(&irk->rpa, rpa); return irk; } int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr) { struct link_key *key; key = hci_find_link_key(hdev, bdaddr); if (!key) return -ENOENT; BT_DBG("%s removing %pMR", hdev->name, bdaddr); list_del_rcu(&key->list); kfree_rcu(key, rcu); return 0; } int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type) { struct smp_ltk *k, *tmp; int removed = 0; list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) { if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type) continue; BT_DBG("%s removing %pMR", hdev->name, bdaddr); list_del_rcu(&k->list); kfree_rcu(k, rcu); removed++; } return removed ? 0 : -ENOENT; } void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type) { struct smp_irk *k, *tmp; list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) { if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type) continue; BT_DBG("%s removing %pMR", hdev->name, bdaddr); list_del_rcu(&k->list); kfree_rcu(k, rcu); } } bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type) { struct smp_ltk *k; struct smp_irk *irk; u8 addr_type; if (type == BDADDR_BREDR) { if (hci_find_link_key(hdev, bdaddr)) return true; return false; } /* Convert to HCI addr type which struct smp_ltk uses */ if (type == BDADDR_LE_PUBLIC) addr_type = ADDR_LE_DEV_PUBLIC; else addr_type = ADDR_LE_DEV_RANDOM; irk = hci_get_irk(hdev, bdaddr, addr_type); if (irk) { bdaddr = &irk->bdaddr; addr_type = irk->addr_type; } rcu_read_lock(); list_for_each_entry_rcu(k, &hdev->long_term_keys, list) { if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) { rcu_read_unlock(); return true; } } rcu_read_unlock(); return false; } /* HCI command timer function */ static void hci_cmd_timeout(struct work_struct *work) { struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_timer.work); if (hdev->sent_cmd) { struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data; u16 opcode = __le16_to_cpu(sent->opcode); bt_dev_err(hdev, "command 0x%4.4x tx timeout", opcode); } else { bt_dev_err(hdev, "command tx timeout"); } if (hdev->cmd_timeout) hdev->cmd_timeout(hdev); atomic_set(&hdev->cmd_cnt, 1); queue_work(hdev->workqueue, &hdev->cmd_work); } /* HCI ncmd timer function */ static void hci_ncmd_timeout(struct work_struct *work) { struct hci_dev *hdev = container_of(work, struct hci_dev, ncmd_timer.work); bt_dev_err(hdev, "Controller not accepting commands anymore: ncmd = 0"); /* During HCI_INIT phase no events can be injected if the ncmd timer * triggers since the procedure has its own timeout handling. */ if (test_bit(HCI_INIT, &hdev->flags)) return; /* This is an irrecoverable state, inject hardware error event */ hci_reset_dev(hdev); } struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type) { struct oob_data *data; list_for_each_entry(data, &hdev->remote_oob_data, list) { if (bacmp(bdaddr, &data->bdaddr) != 0) continue; if (data->bdaddr_type != bdaddr_type) continue; return data; } return NULL; } int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type) { struct oob_data *data; data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type); if (!data) return -ENOENT; BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type); list_del(&data->list); kfree(data); return 0; } void hci_remote_oob_data_clear(struct hci_dev *hdev) { struct oob_data *data, *n; list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) { list_del(&data->list); kfree(data); } } int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type, u8 *hash192, u8 *rand192, u8 *hash256, u8 *rand256) { struct oob_data *data; data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type); if (!data) { data = kmalloc(sizeof(*data), GFP_KERNEL); if (!data) return -ENOMEM; bacpy(&data->bdaddr, bdaddr); data->bdaddr_type = bdaddr_type; list_add(&data->list, &hdev->remote_oob_data); } if (hash192 && rand192) { memcpy(data->hash192, hash192, sizeof(data->hash192)); memcpy(data->rand192, rand192, sizeof(data->rand192)); if (hash256 && rand256) data->present = 0x03; } else { memset(data->hash192, 0, sizeof(data->hash192)); memset(data->rand192, 0, sizeof(data->rand192)); if (hash256 && rand256) data->present = 0x02; else data->present = 0x00; } if (hash256 && rand256) { memcpy(data->hash256, hash256, sizeof(data->hash256)); memcpy(data->rand256, rand256, sizeof(data->rand256)); } else { memset(data->hash256, 0, sizeof(data->hash256)); memset(data->rand256, 0, sizeof(data->rand256)); if (hash192 && rand192) data->present = 0x01; } BT_DBG("%s for %pMR", hdev->name, bdaddr); return 0; } /* This function requires the caller holds hdev->lock */ struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance) { struct adv_info *adv_instance; list_for_each_entry(adv_instance, &hdev->adv_instances, list) { if (adv_instance->instance == instance) return adv_instance; } return NULL; } /* This function requires the caller holds hdev->lock */ struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance) { struct adv_info *cur_instance; cur_instance = hci_find_adv_instance(hdev, instance); if (!cur_instance) return NULL; if (cur_instance == list_last_entry(&hdev->adv_instances, struct adv_info, list)) return list_first_entry(&hdev->adv_instances, struct adv_info, list); else return list_next_entry(cur_instance, list); } /* This function requires the caller holds hdev->lock */ int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance) { struct adv_info *adv_instance; adv_instance = hci_find_adv_instance(hdev, instance); if (!adv_instance) return -ENOENT; BT_DBG("%s removing %dMR", hdev->name, instance); if (hdev->cur_adv_instance == instance) { if (hdev->adv_instance_timeout) { cancel_delayed_work(&hdev->adv_instance_expire); hdev->adv_instance_timeout = 0; } hdev->cur_adv_instance = 0x00; } cancel_delayed_work_sync(&adv_instance->rpa_expired_cb); list_del(&adv_instance->list); kfree(adv_instance); hdev->adv_instance_cnt--; return 0; } void hci_adv_instances_set_rpa_expired(struct hci_dev *hdev, bool rpa_expired) { struct adv_info *adv_instance, *n; list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) adv_instance->rpa_expired = rpa_expired; } /* This function requires the caller holds hdev->lock */ void hci_adv_instances_clear(struct hci_dev *hdev) { struct adv_info *adv_instance, *n; if (hdev->adv_instance_timeout) { cancel_delayed_work(&hdev->adv_instance_expire); hdev->adv_instance_timeout = 0; } list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) { cancel_delayed_work_sync(&adv_instance->rpa_expired_cb); list_del(&adv_instance->list); kfree(adv_instance); } hdev->adv_instance_cnt = 0; hdev->cur_adv_instance = 0x00; } static void adv_instance_rpa_expired(struct work_struct *work) { struct adv_info *adv_instance = container_of(work, struct adv_info, rpa_expired_cb.work); BT_DBG(""); adv_instance->rpa_expired = true; } /* This function requires the caller holds hdev->lock */ struct adv_info *hci_add_adv_instance(struct hci_dev *hdev, u8 instance, u32 flags, u16 adv_data_len, u8 *adv_data, u16 scan_rsp_len, u8 *scan_rsp_data, u16 timeout, u16 duration, s8 tx_power, u32 min_interval, u32 max_interval, u8 mesh_handle) { struct adv_info *adv; adv = hci_find_adv_instance(hdev, instance); if (adv) { memset(adv->adv_data, 0, sizeof(adv->adv_data)); memset(adv->scan_rsp_data, 0, sizeof(adv->scan_rsp_data)); memset(adv->per_adv_data, 0, sizeof(adv->per_adv_data)); } else { if (hdev->adv_instance_cnt >= hdev->le_num_of_adv_sets || instance < 1 || instance > hdev->le_num_of_adv_sets + 1) return ERR_PTR(-EOVERFLOW); adv = kzalloc(sizeof(*adv), GFP_KERNEL); if (!adv) return ERR_PTR(-ENOMEM); adv->pending = true; adv->instance = instance; list_add(&adv->list, &hdev->adv_instances); hdev->adv_instance_cnt++; } adv->flags = flags; adv->min_interval = min_interval; adv->max_interval = max_interval; adv->tx_power = tx_power; /* Defining a mesh_handle changes the timing units to ms, * rather than seconds, and ties the instance to the requested * mesh_tx queue. */ adv->mesh = mesh_handle; hci_set_adv_instance_data(hdev, instance, adv_data_len, adv_data, scan_rsp_len, scan_rsp_data); adv->timeout = timeout; adv->remaining_time = timeout; if (duration == 0) adv->duration = hdev->def_multi_adv_rotation_duration; else adv->duration = duration; INIT_DELAYED_WORK(&adv->rpa_expired_cb, adv_instance_rpa_expired); BT_DBG("%s for %dMR", hdev->name, instance); return adv; } /* This function requires the caller holds hdev->lock */ struct adv_info *hci_add_per_instance(struct hci_dev *hdev, u8 instance, u32 flags, u8 data_len, u8 *data, u32 min_interval, u32 max_interval) { struct adv_info *adv; adv = hci_add_adv_instance(hdev, instance, flags, 0, NULL, 0, NULL, 0, 0, HCI_ADV_TX_POWER_NO_PREFERENCE, min_interval, max_interval, 0); if (IS_ERR(adv)) return adv; adv->periodic = true; adv->per_adv_data_len = data_len; if (data) memcpy(adv->per_adv_data, data, data_len); return adv; } /* This function requires the caller holds hdev->lock */ int hci_set_adv_instance_data(struct hci_dev *hdev, u8 instance, u16 adv_data_len, u8 *adv_data, u16 scan_rsp_len, u8 *scan_rsp_data) { struct adv_info *adv; adv = hci_find_adv_instance(hdev, instance); /* If advertisement doesn't exist, we can't modify its data */ if (!adv) return -ENOENT; if (adv_data_len && ADV_DATA_CMP(adv, adv_data, adv_data_len)) { memset(adv->adv_data, 0, sizeof(adv->adv_data)); memcpy(adv->adv_data, adv_data, adv_data_len); adv->adv_data_len = adv_data_len; adv->adv_data_changed = true; } if (scan_rsp_len && SCAN_RSP_CMP(adv, scan_rsp_data, scan_rsp_len)) { memset(adv->scan_rsp_data, 0, sizeof(adv->scan_rsp_data)); memcpy(adv->scan_rsp_data, scan_rsp_data, scan_rsp_len); adv->scan_rsp_len = scan_rsp_len; adv->scan_rsp_changed = true; } /* Mark as changed if there are flags which would affect it */ if (((adv->flags & MGMT_ADV_FLAG_APPEARANCE) && hdev->appearance) || adv->flags & MGMT_ADV_FLAG_LOCAL_NAME) adv->scan_rsp_changed = true; return 0; } /* This function requires the caller holds hdev->lock */ u32 hci_adv_instance_flags(struct hci_dev *hdev, u8 instance) { u32 flags; struct adv_info *adv; if (instance == 0x00) { /* Instance 0 always manages the "Tx Power" and "Flags" * fields */ flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS; /* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting * corresponds to the "connectable" instance flag. */ if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) flags |= MGMT_ADV_FLAG_CONNECTABLE; if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) flags |= MGMT_ADV_FLAG_LIMITED_DISCOV; else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) flags |= MGMT_ADV_FLAG_DISCOV; return flags; } adv = hci_find_adv_instance(hdev, instance); /* Return 0 when we got an invalid instance identifier. */ if (!adv) return 0; return adv->flags; } bool hci_adv_instance_is_scannable(struct hci_dev *hdev, u8 instance) { struct adv_info *adv; /* Instance 0x00 always set local name */ if (instance == 0x00) return true; adv = hci_find_adv_instance(hdev, instance); if (!adv) return false; if (adv->flags & MGMT_ADV_FLAG_APPEARANCE || adv->flags & MGMT_ADV_FLAG_LOCAL_NAME) return true; return adv->scan_rsp_len ? true : false; } /* This function requires the caller holds hdev->lock */ void hci_adv_monitors_clear(struct hci_dev *hdev) { struct adv_monitor *monitor; int handle; idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle) hci_free_adv_monitor(hdev, monitor); idr_destroy(&hdev->adv_monitors_idr); } /* Frees the monitor structure and do some bookkeepings. * This function requires the caller holds hdev->lock. */ void hci_free_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor) { struct adv_pattern *pattern; struct adv_pattern *tmp; if (!monitor) return; list_for_each_entry_safe(pattern, tmp, &monitor->patterns, list) { list_del(&pattern->list); kfree(pattern); } if (monitor->handle) idr_remove(&hdev->adv_monitors_idr, monitor->handle); if (monitor->state != ADV_MONITOR_STATE_NOT_REGISTERED) { hdev->adv_monitors_cnt--; mgmt_adv_monitor_removed(hdev, monitor->handle); } kfree(monitor); } /* Assigns handle to a monitor, and if offloading is supported and power is on, * also attempts to forward the request to the controller. * This function requires the caller holds hci_req_sync_lock. */ int hci_add_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor) { int min, max, handle; int status = 0; if (!monitor) return -EINVAL; hci_dev_lock(hdev); min = HCI_MIN_ADV_MONITOR_HANDLE; max = HCI_MIN_ADV_MONITOR_HANDLE + HCI_MAX_ADV_MONITOR_NUM_HANDLES; handle = idr_alloc(&hdev->adv_monitors_idr, monitor, min, max, GFP_KERNEL); hci_dev_unlock(hdev); if (handle < 0) return handle; monitor->handle = handle; if (!hdev_is_powered(hdev)) return status; switch (hci_get_adv_monitor_offload_ext(hdev)) { case HCI_ADV_MONITOR_EXT_NONE: bt_dev_dbg(hdev, "add monitor %d status %d", monitor->handle, status); /* Message was not forwarded to controller - not an error */ break; case HCI_ADV_MONITOR_EXT_MSFT: status = msft_add_monitor_pattern(hdev, monitor); bt_dev_dbg(hdev, "add monitor %d msft status %d", handle, status); break; } return status; } /* Attempts to tell the controller and free the monitor. If somehow the * controller doesn't have a corresponding handle, remove anyway. * This function requires the caller holds hci_req_sync_lock. */ static int hci_remove_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor) { int status = 0; int handle; switch (hci_get_adv_monitor_offload_ext(hdev)) { case HCI_ADV_MONITOR_EXT_NONE: /* also goes here when powered off */ bt_dev_dbg(hdev, "remove monitor %d status %d", monitor->handle, status); goto free_monitor; case HCI_ADV_MONITOR_EXT_MSFT: handle = monitor->handle; status = msft_remove_monitor(hdev, monitor); bt_dev_dbg(hdev, "remove monitor %d msft status %d", handle, status); break; } /* In case no matching handle registered, just free the monitor */ if (status == -ENOENT) goto free_monitor; return status; free_monitor: if (status == -ENOENT) bt_dev_warn(hdev, "Removing monitor with no matching handle %d", monitor->handle); hci_free_adv_monitor(hdev, monitor); return status; } /* This function requires the caller holds hci_req_sync_lock */ int hci_remove_single_adv_monitor(struct hci_dev *hdev, u16 handle) { struct adv_monitor *monitor = idr_find(&hdev->adv_monitors_idr, handle); if (!monitor) return -EINVAL; return hci_remove_adv_monitor(hdev, monitor); } /* This function requires the caller holds hci_req_sync_lock */ int hci_remove_all_adv_monitor(struct hci_dev *hdev) { struct adv_monitor *monitor; int idr_next_id = 0; int status = 0; while (1) { monitor = idr_get_next(&hdev->adv_monitors_idr, &idr_next_id); if (!monitor) break; status = hci_remove_adv_monitor(hdev, monitor); if (status) return status; idr_next_id++; } return status; } /* This function requires the caller holds hdev->lock */ bool hci_is_adv_monitoring(struct hci_dev *hdev) { return !idr_is_empty(&hdev->adv_monitors_idr); } int hci_get_adv_monitor_offload_ext(struct hci_dev *hdev) { if (msft_monitor_supported(hdev)) return HCI_ADV_MONITOR_EXT_MSFT; return HCI_ADV_MONITOR_EXT_NONE; } struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list, bdaddr_t *bdaddr, u8 type) { struct bdaddr_list *b; list_for_each_entry(b, bdaddr_list, list) { if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type) return b; } return NULL; } struct bdaddr_list_with_irk *hci_bdaddr_list_lookup_with_irk( struct list_head *bdaddr_list, bdaddr_t *bdaddr, u8 type) { struct bdaddr_list_with_irk *b; list_for_each_entry(b, bdaddr_list, list) { if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type) return b; } return NULL; } struct bdaddr_list_with_flags * hci_bdaddr_list_lookup_with_flags(struct list_head *bdaddr_list, bdaddr_t *bdaddr, u8 type) { struct bdaddr_list_with_flags *b; list_for_each_entry(b, bdaddr_list, list) { if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type) return b; } return NULL; } void hci_bdaddr_list_clear(struct list_head *bdaddr_list) { struct bdaddr_list *b, *n; list_for_each_entry_safe(b, n, bdaddr_list, list) { list_del(&b->list); kfree(b); } } int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type) { struct bdaddr_list *entry; if (!bacmp(bdaddr, BDADDR_ANY)) return -EBADF; if (hci_bdaddr_list_lookup(list, bdaddr, type)) return -EEXIST; entry = kzalloc(sizeof(*entry), GFP_KERNEL); if (!entry) return -ENOMEM; bacpy(&entry->bdaddr, bdaddr); entry->bdaddr_type = type; list_add(&entry->list, list); return 0; } int hci_bdaddr_list_add_with_irk(struct list_head *list, bdaddr_t *bdaddr, u8 type, u8 *peer_irk, u8 *local_irk) { struct bdaddr_list_with_irk *entry; if (!bacmp(bdaddr, BDADDR_ANY)) return -EBADF; if (hci_bdaddr_list_lookup(list, bdaddr, type)) return -EEXIST; entry = kzalloc(sizeof(*entry), GFP_KERNEL); if (!entry) return -ENOMEM; bacpy(&entry->bdaddr, bdaddr); entry->bdaddr_type = type; if (peer_irk) memcpy(entry->peer_irk, peer_irk, 16); if (local_irk) memcpy(entry->local_irk, local_irk, 16); list_add(&entry->list, list); return 0; } int hci_bdaddr_list_add_with_flags(struct list_head *list, bdaddr_t *bdaddr, u8 type, u32 flags) { struct bdaddr_list_with_flags *entry; if (!bacmp(bdaddr, BDADDR_ANY)) return -EBADF; if (hci_bdaddr_list_lookup(list, bdaddr, type)) return -EEXIST; entry = kzalloc(sizeof(*entry), GFP_KERNEL); if (!entry) return -ENOMEM; bacpy(&entry->bdaddr, bdaddr); entry->bdaddr_type = type; entry->flags = flags; list_add(&entry->list, list); return 0; } int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type) { struct bdaddr_list *entry; if (!bacmp(bdaddr, BDADDR_ANY)) { hci_bdaddr_list_clear(list); return 0; } entry = hci_bdaddr_list_lookup(list, bdaddr, type); if (!entry) return -ENOENT; list_del(&entry->list); kfree(entry); return 0; } int hci_bdaddr_list_del_with_irk(struct list_head *list, bdaddr_t *bdaddr, u8 type) { struct bdaddr_list_with_irk *entry; if (!bacmp(bdaddr, BDADDR_ANY)) { hci_bdaddr_list_clear(list); return 0; } entry = hci_bdaddr_list_lookup_with_irk(list, bdaddr, type); if (!entry) return -ENOENT; list_del(&entry->list); kfree(entry); return 0; } int hci_bdaddr_list_del_with_flags(struct list_head *list, bdaddr_t *bdaddr, u8 type) { struct bdaddr_list_with_flags *entry; if (!bacmp(bdaddr, BDADDR_ANY)) { hci_bdaddr_list_clear(list); return 0; } entry = hci_bdaddr_list_lookup_with_flags(list, bdaddr, type); if (!entry) return -ENOENT; list_del(&entry->list); kfree(entry); return 0; } /* This function requires the caller holds hdev->lock */ struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type) { struct hci_conn_params *params; list_for_each_entry(params, &hdev->le_conn_params, list) { if (bacmp(&params->addr, addr) == 0 && params->addr_type == addr_type) { return params; } } return NULL; } /* This function requires the caller holds hdev->lock or rcu_read_lock */ struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list, bdaddr_t *addr, u8 addr_type) { struct hci_conn_params *param; rcu_read_lock(); list_for_each_entry_rcu(param, list, action) { if (bacmp(&param->addr, addr) == 0 && param->addr_type == addr_type) { rcu_read_unlock(); return param; } } rcu_read_unlock(); return NULL; } /* This function requires the caller holds hdev->lock */ void hci_pend_le_list_del_init(struct hci_conn_params *param) { if (list_empty(&param->action)) return; list_del_rcu(&param->action); synchronize_rcu(); INIT_LIST_HEAD(&param->action); } /* This function requires the caller holds hdev->lock */ void hci_pend_le_list_add(struct hci_conn_params *param, struct list_head *list) { list_add_rcu(&param->action, list); } /* This function requires the caller holds hdev->lock */ struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type) { struct hci_conn_params *params; params = hci_conn_params_lookup(hdev, addr, addr_type); if (params) return params; params = kzalloc(sizeof(*params), GFP_KERNEL); if (!params) { bt_dev_err(hdev, "out of memory"); return NULL; } bacpy(&params->addr, addr); params->addr_type = addr_type; list_add(&params->list, &hdev->le_conn_params); INIT_LIST_HEAD(&params->action); params->conn_min_interval = hdev->le_conn_min_interval; params->conn_max_interval = hdev->le_conn_max_interval; params->conn_latency = hdev->le_conn_latency; params->supervision_timeout = hdev->le_supv_timeout; params->auto_connect = HCI_AUTO_CONN_DISABLED; BT_DBG("addr %pMR (type %u)", addr, addr_type); return params; } void hci_conn_params_free(struct hci_conn_params *params) { hci_pend_le_list_del_init(params); if (params->conn) { hci_conn_drop(params->conn); hci_conn_put(params->conn); } list_del(&params->list); kfree(params); } /* This function requires the caller holds hdev->lock */ void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type) { struct hci_conn_params *params; params = hci_conn_params_lookup(hdev, addr, addr_type); if (!params) return; hci_conn_params_free(params); hci_update_passive_scan(hdev); BT_DBG("addr %pMR (type %u)", addr, addr_type); } /* This function requires the caller holds hdev->lock */ void hci_conn_params_clear_disabled(struct hci_dev *hdev) { struct hci_conn_params *params, *tmp; list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) { if (params->auto_connect != HCI_AUTO_CONN_DISABLED) continue; /* If trying to establish one time connection to disabled * device, leave the params, but mark them as just once. */ if (params->explicit_connect) { params->auto_connect = HCI_AUTO_CONN_EXPLICIT; continue; } hci_conn_params_free(params); } BT_DBG("All LE disabled connection parameters were removed"); } /* This function requires the caller holds hdev->lock */ static void hci_conn_params_clear_all(struct hci_dev *hdev) { struct hci_conn_params *params, *tmp; list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) hci_conn_params_free(params); BT_DBG("All LE connection parameters were removed"); } /* Copy the Identity Address of the controller. * * If the controller has a public BD_ADDR, then by default use that one. * If this is a LE only controller without a public address, default to * the static random address. * * For debugging purposes it is possible to force controllers with a * public address to use the static random address instead. * * In case BR/EDR has been disabled on a dual-mode controller and * userspace has configured a static address, then that address * becomes the identity address instead of the public BR/EDR address. */ void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *bdaddr_type) { if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) || !bacmp(&hdev->bdaddr, BDADDR_ANY) || (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) && bacmp(&hdev->static_addr, BDADDR_ANY))) { bacpy(bdaddr, &hdev->static_addr); *bdaddr_type = ADDR_LE_DEV_RANDOM; } else { bacpy(bdaddr, &hdev->bdaddr); *bdaddr_type = ADDR_LE_DEV_PUBLIC; } } static void hci_clear_wake_reason(struct hci_dev *hdev) { hci_dev_lock(hdev); hdev->wake_reason = 0; bacpy(&hdev->wake_addr, BDADDR_ANY); hdev->wake_addr_type = 0; hci_dev_unlock(hdev); } static int hci_suspend_notifier(struct notifier_block *nb, unsigned long action, void *data) { struct hci_dev *hdev = container_of(nb, struct hci_dev, suspend_notifier); int ret = 0; /* Userspace has full control of this device. Do nothing. */ if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) return NOTIFY_DONE; /* To avoid a potential race with hci_unregister_dev. */ hci_dev_hold(hdev); if (action == PM_SUSPEND_PREPARE) ret = hci_suspend_dev(hdev); else if (action == PM_POST_SUSPEND) ret = hci_resume_dev(hdev); if (ret) bt_dev_err(hdev, "Suspend notifier action (%lu) failed: %d", action, ret); hci_dev_put(hdev); return NOTIFY_DONE; } /* Alloc HCI device */ struct hci_dev *hci_alloc_dev_priv(int sizeof_priv) { struct hci_dev *hdev; unsigned int alloc_size; alloc_size = sizeof(*hdev); if (sizeof_priv) { /* Fixme: May need ALIGN-ment? */ alloc_size += sizeof_priv; } hdev = kzalloc(alloc_size, GFP_KERNEL); if (!hdev) return NULL; hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1); hdev->esco_type = (ESCO_HV1); hdev->link_mode = (HCI_LM_ACCEPT); hdev->num_iac = 0x01; /* One IAC support is mandatory */ hdev->io_capability = 0x03; /* No Input No Output */ hdev->manufacturer = 0xffff; /* Default to internal use */ hdev->inq_tx_power = HCI_TX_POWER_INVALID; hdev->adv_tx_power = HCI_TX_POWER_INVALID; hdev->adv_instance_cnt = 0; hdev->cur_adv_instance = 0x00; hdev->adv_instance_timeout = 0; hdev->advmon_allowlist_duration = 300; hdev->advmon_no_filter_duration = 500; hdev->enable_advmon_interleave_scan = 0x00; /* Default to disable */ hdev->sniff_max_interval = 800; hdev->sniff_min_interval = 80; hdev->le_adv_channel_map = 0x07; hdev->le_adv_min_interval = 0x0800; hdev->le_adv_max_interval = 0x0800; hdev->le_scan_interval = 0x0060; hdev->le_scan_window = 0x0030; hdev->le_scan_int_suspend = 0x0400; hdev->le_scan_window_suspend = 0x0012; hdev->le_scan_int_discovery = DISCOV_LE_SCAN_INT; hdev->le_scan_window_discovery = DISCOV_LE_SCAN_WIN; hdev->le_scan_int_adv_monitor = 0x0060; hdev->le_scan_window_adv_monitor = 0x0030; hdev->le_scan_int_connect = 0x0060; hdev->le_scan_window_connect = 0x0060; hdev->le_conn_min_interval = 0x0018; hdev->le_conn_max_interval = 0x0028; hdev->le_conn_latency = 0x0000; hdev->le_supv_timeout = 0x002a; hdev->le_def_tx_len = 0x001b; hdev->le_def_tx_time = 0x0148; hdev->le_max_tx_len = 0x001b; hdev->le_max_tx_time = 0x0148; hdev->le_max_rx_len = 0x001b; hdev->le_max_rx_time = 0x0148; hdev->le_max_key_size = SMP_MAX_ENC_KEY_SIZE; hdev->le_min_key_size = SMP_MIN_ENC_KEY_SIZE; hdev->le_tx_def_phys = HCI_LE_SET_PHY_1M; hdev->le_rx_def_phys = HCI_LE_SET_PHY_1M; hdev->le_num_of_adv_sets = HCI_MAX_ADV_INSTANCES; hdev->def_multi_adv_rotation_duration = HCI_DEFAULT_ADV_DURATION; hdev->def_le_autoconnect_timeout = HCI_LE_AUTOCONN_TIMEOUT; hdev->min_le_tx_power = HCI_TX_POWER_INVALID; hdev->max_le_tx_power = HCI_TX_POWER_INVALID; hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT; hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT; hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE; hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE; hdev->auth_payload_timeout = DEFAULT_AUTH_PAYLOAD_TIMEOUT; hdev->min_enc_key_size = HCI_MIN_ENC_KEY_SIZE; /* default 1.28 sec page scan */ hdev->def_page_scan_type = PAGE_SCAN_TYPE_STANDARD; hdev->def_page_scan_int = 0x0800; hdev->def_page_scan_window = 0x0012; mutex_init(&hdev->lock); mutex_init(&hdev->req_lock); INIT_LIST_HEAD(&hdev->mesh_pending); INIT_LIST_HEAD(&hdev->mgmt_pending); INIT_LIST_HEAD(&hdev->reject_list); INIT_LIST_HEAD(&hdev->accept_list); INIT_LIST_HEAD(&hdev->uuids); INIT_LIST_HEAD(&hdev->link_keys); INIT_LIST_HEAD(&hdev->long_term_keys); INIT_LIST_HEAD(&hdev->identity_resolving_keys); INIT_LIST_HEAD(&hdev->remote_oob_data); INIT_LIST_HEAD(&hdev->le_accept_list); INIT_LIST_HEAD(&hdev->le_resolv_list); INIT_LIST_HEAD(&hdev->le_conn_params); INIT_LIST_HEAD(&hdev->pend_le_conns); INIT_LIST_HEAD(&hdev->pend_le_reports); INIT_LIST_HEAD(&hdev->conn_hash.list); INIT_LIST_HEAD(&hdev->adv_instances); INIT_LIST_HEAD(&hdev->blocked_keys); INIT_LIST_HEAD(&hdev->monitored_devices); INIT_LIST_HEAD(&hdev->local_codecs); INIT_WORK(&hdev->rx_work, hci_rx_work); INIT_WORK(&hdev->cmd_work, hci_cmd_work); INIT_WORK(&hdev->tx_work, hci_tx_work); INIT_WORK(&hdev->power_on, hci_power_on); INIT_WORK(&hdev->error_reset, hci_error_reset); hci_cmd_sync_init(hdev); INIT_DELAYED_WORK(&hdev->power_off, hci_power_off); skb_queue_head_init(&hdev->rx_q); skb_queue_head_init(&hdev->cmd_q); skb_queue_head_init(&hdev->raw_q); init_waitqueue_head(&hdev->req_wait_q); INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout); INIT_DELAYED_WORK(&hdev->ncmd_timer, hci_ncmd_timeout); hci_devcd_setup(hdev); hci_request_setup(hdev); hci_init_sysfs(hdev); discovery_init(hdev); return hdev; } EXPORT_SYMBOL(hci_alloc_dev_priv); /* Free HCI device */ void hci_free_dev(struct hci_dev *hdev) { /* will free via device release */ put_device(&hdev->dev); } EXPORT_SYMBOL(hci_free_dev); /* Register HCI device */ int hci_register_dev(struct hci_dev *hdev) { int id, error; if (!hdev->open || !hdev->close || !hdev->send) return -EINVAL; /* Do not allow HCI_AMP devices to register at index 0, * so the index can be used as the AMP controller ID. */ switch (hdev->dev_type) { case HCI_PRIMARY: id = ida_simple_get(&hci_index_ida, 0, HCI_MAX_ID, GFP_KERNEL); break; case HCI_AMP: id = ida_simple_get(&hci_index_ida, 1, HCI_MAX_ID, GFP_KERNEL); break; default: return -EINVAL; } if (id < 0) return id; snprintf(hdev->name, sizeof(hdev->name), "hci%d", id); hdev->id = id; BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus); hdev->workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI, hdev->name); if (!hdev->workqueue) { error = -ENOMEM; goto err; } hdev->req_workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI, hdev->name); if (!hdev->req_workqueue) { destroy_workqueue(hdev->workqueue); error = -ENOMEM; goto err; } if (!IS_ERR_OR_NULL(bt_debugfs)) hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs); dev_set_name(&hdev->dev, "%s", hdev->name); error = device_add(&hdev->dev); if (error < 0) goto err_wqueue; hci_leds_init(hdev); hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev, RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev); if (hdev->rfkill) { if (rfkill_register(hdev->rfkill) < 0) { rfkill_destroy(hdev->rfkill); hdev->rfkill = NULL; } } if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) hci_dev_set_flag(hdev, HCI_RFKILLED); hci_dev_set_flag(hdev, HCI_SETUP); hci_dev_set_flag(hdev, HCI_AUTO_OFF); if (hdev->dev_type == HCI_PRIMARY) { /* Assume BR/EDR support until proven otherwise (such as * through reading supported features during init. */ hci_dev_set_flag(hdev, HCI_BREDR_ENABLED); } write_lock(&hci_dev_list_lock); list_add(&hdev->list, &hci_dev_list); write_unlock(&hci_dev_list_lock); /* Devices that are marked for raw-only usage are unconfigured * and should not be included in normal operation. */ if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) hci_dev_set_flag(hdev, HCI_UNCONFIGURED); /* Mark Remote Wakeup connection flag as supported if driver has wakeup * callback. */ if (hdev->wakeup) hdev->conn_flags |= HCI_CONN_FLAG_REMOTE_WAKEUP; hci_sock_dev_event(hdev, HCI_DEV_REG); hci_dev_hold(hdev); error = hci_register_suspend_notifier(hdev); if (error) BT_WARN("register suspend notifier failed error:%d\n", error); queue_work(hdev->req_workqueue, &hdev->power_on); idr_init(&hdev->adv_monitors_idr); msft_register(hdev); return id; err_wqueue: debugfs_remove_recursive(hdev->debugfs); destroy_workqueue(hdev->workqueue); destroy_workqueue(hdev->req_workqueue); err: ida_simple_remove(&hci_index_ida, hdev->id); return error; } EXPORT_SYMBOL(hci_register_dev); /* Unregister HCI device */ void hci_unregister_dev(struct hci_dev *hdev) { BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus); mutex_lock(&hdev->unregister_lock); hci_dev_set_flag(hdev, HCI_UNREGISTER); mutex_unlock(&hdev->unregister_lock); write_lock(&hci_dev_list_lock); list_del(&hdev->list); write_unlock(&hci_dev_list_lock); cancel_work_sync(&hdev->power_on); hci_cmd_sync_clear(hdev); hci_unregister_suspend_notifier(hdev); msft_unregister(hdev); hci_dev_do_close(hdev); if (!test_bit(HCI_INIT, &hdev->flags) && !hci_dev_test_flag(hdev, HCI_SETUP) && !hci_dev_test_flag(hdev, HCI_CONFIG)) { hci_dev_lock(hdev); mgmt_index_removed(hdev); hci_dev_unlock(hdev); } /* mgmt_index_removed should take care of emptying the * pending list */ BUG_ON(!list_empty(&hdev->mgmt_pending)); hci_sock_dev_event(hdev, HCI_DEV_UNREG); if (hdev->rfkill) { rfkill_unregister(hdev->rfkill); rfkill_destroy(hdev->rfkill); } device_del(&hdev->dev); /* Actual cleanup is deferred until hci_release_dev(). */ hci_dev_put(hdev); } EXPORT_SYMBOL(hci_unregister_dev); /* Release HCI device */ void hci_release_dev(struct hci_dev *hdev) { debugfs_remove_recursive(hdev->debugfs); kfree_const(hdev->hw_info); kfree_const(hdev->fw_info); destroy_workqueue(hdev->workqueue); destroy_workqueue(hdev->req_workqueue); hci_dev_lock(hdev); hci_bdaddr_list_clear(&hdev->reject_list); hci_bdaddr_list_clear(&hdev->accept_list); hci_uuids_clear(hdev); hci_link_keys_clear(hdev); hci_smp_ltks_clear(hdev); hci_smp_irks_clear(hdev); hci_remote_oob_data_clear(hdev); hci_adv_instances_clear(hdev); hci_adv_monitors_clear(hdev); hci_bdaddr_list_clear(&hdev->le_accept_list); hci_bdaddr_list_clear(&hdev->le_resolv_list); hci_conn_params_clear_all(hdev); hci_discovery_filter_clear(hdev); hci_blocked_keys_clear(hdev); hci_dev_unlock(hdev); ida_simple_remove(&hci_index_ida, hdev->id); kfree_skb(hdev->sent_cmd); kfree_skb(hdev->recv_event); kfree(hdev); } EXPORT_SYMBOL(hci_release_dev); int hci_register_suspend_notifier(struct hci_dev *hdev) { int ret = 0; if (!hdev->suspend_notifier.notifier_call && !test_bit(HCI_QUIRK_NO_SUSPEND_NOTIFIER, &hdev->quirks)) { hdev->suspend_notifier.notifier_call = hci_suspend_notifier; ret = register_pm_notifier(&hdev->suspend_notifier); } return ret; } int hci_unregister_suspend_notifier(struct hci_dev *hdev) { int ret = 0; if (hdev->suspend_notifier.notifier_call) { ret = unregister_pm_notifier(&hdev->suspend_notifier); if (!ret) hdev->suspend_notifier.notifier_call = NULL; } return ret; } /* Suspend HCI device */ int hci_suspend_dev(struct hci_dev *hdev) { int ret; bt_dev_dbg(hdev, ""); /* Suspend should only act on when powered. */ if (!hdev_is_powered(hdev) || hci_dev_test_flag(hdev, HCI_UNREGISTER)) return 0; /* If powering down don't attempt to suspend */ if (mgmt_powering_down(hdev)) return 0; /* Cancel potentially blocking sync operation before suspend */ __hci_cmd_sync_cancel(hdev, -EHOSTDOWN); hci_req_sync_lock(hdev); ret = hci_suspend_sync(hdev); hci_req_sync_unlock(hdev); hci_clear_wake_reason(hdev); mgmt_suspending(hdev, hdev->suspend_state); hci_sock_dev_event(hdev, HCI_DEV_SUSPEND); return ret; } EXPORT_SYMBOL(hci_suspend_dev); /* Resume HCI device */ int hci_resume_dev(struct hci_dev *hdev) { int ret; bt_dev_dbg(hdev, ""); /* Resume should only act on when powered. */ if (!hdev_is_powered(hdev) || hci_dev_test_flag(hdev, HCI_UNREGISTER)) return 0; /* If powering down don't attempt to resume */ if (mgmt_powering_down(hdev)) return 0; hci_req_sync_lock(hdev); ret = hci_resume_sync(hdev); hci_req_sync_unlock(hdev); mgmt_resuming(hdev, hdev->wake_reason, &hdev->wake_addr, hdev->wake_addr_type); hci_sock_dev_event(hdev, HCI_DEV_RESUME); return ret; } EXPORT_SYMBOL(hci_resume_dev); /* Reset HCI device */ int hci_reset_dev(struct hci_dev *hdev) { static const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 }; struct sk_buff *skb; skb = bt_skb_alloc(3, GFP_ATOMIC); if (!skb) return -ENOMEM; hci_skb_pkt_type(skb) = HCI_EVENT_PKT; skb_put_data(skb, hw_err, 3); bt_dev_err(hdev, "Injecting HCI hardware error event"); /* Send Hardware Error to upper stack */ return hci_recv_frame(hdev, skb); } EXPORT_SYMBOL(hci_reset_dev); /* Receive frame from HCI drivers */ int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb) { if (!hdev || (!test_bit(HCI_UP, &hdev->flags) && !test_bit(HCI_INIT, &hdev->flags))) { kfree_skb(skb); return -ENXIO; } switch (hci_skb_pkt_type(skb)) { case HCI_EVENT_PKT: break; case HCI_ACLDATA_PKT: /* Detect if ISO packet has been sent as ACL */ if (hci_conn_num(hdev, ISO_LINK)) { __u16 handle = __le16_to_cpu(hci_acl_hdr(skb)->handle); __u8 type; type = hci_conn_lookup_type(hdev, hci_handle(handle)); if (type == ISO_LINK) hci_skb_pkt_type(skb) = HCI_ISODATA_PKT; } break; case HCI_SCODATA_PKT: break; case HCI_ISODATA_PKT: break; default: kfree_skb(skb); return -EINVAL; } /* Incoming skb */ bt_cb(skb)->incoming = 1; /* Time stamp */ __net_timestamp(skb); skb_queue_tail(&hdev->rx_q, skb); queue_work(hdev->workqueue, &hdev->rx_work); return 0; } EXPORT_SYMBOL(hci_recv_frame); /* Receive diagnostic message from HCI drivers */ int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb) { /* Mark as diagnostic packet */ hci_skb_pkt_type(skb) = HCI_DIAG_PKT; /* Time stamp */ __net_timestamp(skb); skb_queue_tail(&hdev->rx_q, skb); queue_work(hdev->workqueue, &hdev->rx_work); return 0; } EXPORT_SYMBOL(hci_recv_diag); void hci_set_hw_info(struct hci_dev *hdev, const char *fmt, ...) { va_list vargs; va_start(vargs, fmt); kfree_const(hdev->hw_info); hdev->hw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs); va_end(vargs); } EXPORT_SYMBOL(hci_set_hw_info); void hci_set_fw_info(struct hci_dev *hdev, const char *fmt, ...) { va_list vargs; va_start(vargs, fmt); kfree_const(hdev->fw_info); hdev->fw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs); va_end(vargs); } EXPORT_SYMBOL(hci_set_fw_info); /* ---- Interface to upper protocols ---- */ int hci_register_cb(struct hci_cb *cb) { BT_DBG("%p name %s", cb, cb->name); mutex_lock(&hci_cb_list_lock); list_add_tail(&cb->list, &hci_cb_list); mutex_unlock(&hci_cb_list_lock); return 0; } EXPORT_SYMBOL(hci_register_cb); int hci_unregister_cb(struct hci_cb *cb) { BT_DBG("%p name %s", cb, cb->name); mutex_lock(&hci_cb_list_lock); list_del(&cb->list); mutex_unlock(&hci_cb_list_lock); return 0; } EXPORT_SYMBOL(hci_unregister_cb); static int hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb) { int err; BT_DBG("%s type %d len %d", hdev->name, hci_skb_pkt_type(skb), skb->len); /* Time stamp */ __net_timestamp(skb); /* Send copy to monitor */ hci_send_to_monitor(hdev, skb); if (atomic_read(&hdev->promisc)) { /* Send copy to the sockets */ hci_send_to_sock(hdev, skb); } /* Get rid of skb owner, prior to sending to the driver. */ skb_orphan(skb); if (!test_bit(HCI_RUNNING, &hdev->flags)) { kfree_skb(skb); return -EINVAL; } err = hdev->send(hdev, skb); if (err < 0) { bt_dev_err(hdev, "sending frame failed (%d)", err); kfree_skb(skb); return err; } return 0; } /* Send HCI command */ int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, const void *param) { struct sk_buff *skb; BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen); skb = hci_prepare_cmd(hdev, opcode, plen, param); if (!skb) { bt_dev_err(hdev, "no memory for command"); return -ENOMEM; } /* Stand-alone HCI commands must be flagged as * single-command requests. */ bt_cb(skb)->hci.req_flags |= HCI_REQ_START; skb_queue_tail(&hdev->cmd_q, skb); queue_work(hdev->workqueue, &hdev->cmd_work); return 0; } int __hci_cmd_send(struct hci_dev *hdev, u16 opcode, u32 plen, const void *param) { struct sk_buff *skb; if (hci_opcode_ogf(opcode) != 0x3f) { /* A controller receiving a command shall respond with either * a Command Status Event or a Command Complete Event. * Therefore, all standard HCI commands must be sent via the * standard API, using hci_send_cmd or hci_cmd_sync helpers. * Some vendors do not comply with this rule for vendor-specific * commands and do not return any event. We want to support * unresponded commands for such cases only. */ bt_dev_err(hdev, "unresponded command not supported"); return -EINVAL; } skb = hci_prepare_cmd(hdev, opcode, plen, param); if (!skb) { bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)", opcode); return -ENOMEM; } hci_send_frame(hdev, skb); return 0; } EXPORT_SYMBOL(__hci_cmd_send); /* Get data from the previously sent command */ void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode) { struct hci_command_hdr *hdr; if (!hdev->sent_cmd) return NULL; hdr = (void *) hdev->sent_cmd->data; if (hdr->opcode != cpu_to_le16(opcode)) return NULL; BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode); return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE; } /* Get data from last received event */ void *hci_recv_event_data(struct hci_dev *hdev, __u8 event) { struct hci_event_hdr *hdr; int offset; if (!hdev->recv_event) return NULL; hdr = (void *)hdev->recv_event->data; offset = sizeof(*hdr); if (hdr->evt != event) { /* In case of LE metaevent check the subevent match */ if (hdr->evt == HCI_EV_LE_META) { struct hci_ev_le_meta *ev; ev = (void *)hdev->recv_event->data + offset; offset += sizeof(*ev); if (ev->subevent == event) goto found; } return NULL; } found: bt_dev_dbg(hdev, "event 0x%2.2x", event); return hdev->recv_event->data + offset; } /* Send ACL data */ static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags) { struct hci_acl_hdr *hdr; int len = skb->len; skb_push(skb, HCI_ACL_HDR_SIZE); skb_reset_transport_header(skb); hdr = (struct hci_acl_hdr *)skb_transport_header(skb); hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags)); hdr->dlen = cpu_to_le16(len); } static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue, struct sk_buff *skb, __u16 flags) { struct hci_conn *conn = chan->conn; struct hci_dev *hdev = conn->hdev; struct sk_buff *list; skb->len = skb_headlen(skb); skb->data_len = 0; hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT; switch (hdev->dev_type) { case HCI_PRIMARY: hci_add_acl_hdr(skb, conn->handle, flags); break; case HCI_AMP: hci_add_acl_hdr(skb, chan->handle, flags); break; default: bt_dev_err(hdev, "unknown dev_type %d", hdev->dev_type); return; } list = skb_shinfo(skb)->frag_list; if (!list) { /* Non fragmented */ BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len); skb_queue_tail(queue, skb); } else { /* Fragmented */ BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len); skb_shinfo(skb)->frag_list = NULL; /* Queue all fragments atomically. We need to use spin_lock_bh * here because of 6LoWPAN links, as there this function is * called from softirq and using normal spin lock could cause * deadlocks. */ spin_lock_bh(&queue->lock); __skb_queue_tail(queue, skb); flags &= ~ACL_START; flags |= ACL_CONT; do { skb = list; list = list->next; hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT; hci_add_acl_hdr(skb, conn->handle, flags); BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len); __skb_queue_tail(queue, skb); } while (list); spin_unlock_bh(&queue->lock); } } void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags) { struct hci_dev *hdev = chan->conn->hdev; BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags); hci_queue_acl(chan, &chan->data_q, skb, flags); queue_work(hdev->workqueue, &hdev->tx_work); } /* Send SCO data */ void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb) { struct hci_dev *hdev = conn->hdev; struct hci_sco_hdr hdr; BT_DBG("%s len %d", hdev->name, skb->len); hdr.handle = cpu_to_le16(conn->handle); hdr.dlen = skb->len; skb_push(skb, HCI_SCO_HDR_SIZE); skb_reset_transport_header(skb); memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE); hci_skb_pkt_type(skb) = HCI_SCODATA_PKT; skb_queue_tail(&conn->data_q, skb); queue_work(hdev->workqueue, &hdev->tx_work); } /* Send ISO data */ static void hci_add_iso_hdr(struct sk_buff *skb, __u16 handle, __u8 flags) { struct hci_iso_hdr *hdr; int len = skb->len; skb_push(skb, HCI_ISO_HDR_SIZE); skb_reset_transport_header(skb); hdr = (struct hci_iso_hdr *)skb_transport_header(skb); hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags)); hdr->dlen = cpu_to_le16(len); } static void hci_queue_iso(struct hci_conn *conn, struct sk_buff_head *queue, struct sk_buff *skb) { struct hci_dev *hdev = conn->hdev; struct sk_buff *list; __u16 flags; skb->len = skb_headlen(skb); skb->data_len = 0; hci_skb_pkt_type(skb) = HCI_ISODATA_PKT; list = skb_shinfo(skb)->frag_list; flags = hci_iso_flags_pack(list ? ISO_START : ISO_SINGLE, 0x00); hci_add_iso_hdr(skb, conn->handle, flags); if (!list) { /* Non fragmented */ BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len); skb_queue_tail(queue, skb); } else { /* Fragmented */ BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len); skb_shinfo(skb)->frag_list = NULL; __skb_queue_tail(queue, skb); do { skb = list; list = list->next; hci_skb_pkt_type(skb) = HCI_ISODATA_PKT; flags = hci_iso_flags_pack(list ? ISO_CONT : ISO_END, 0x00); hci_add_iso_hdr(skb, conn->handle, flags); BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len); __skb_queue_tail(queue, skb); } while (list); } } void hci_send_iso(struct hci_conn *conn, struct sk_buff *skb) { struct hci_dev *hdev = conn->hdev; BT_DBG("%s len %d", hdev->name, skb->len); hci_queue_iso(conn, &conn->data_q, skb); queue_work(hdev->workqueue, &hdev->tx_work); } /* ---- HCI TX task (outgoing data) ---- */ /* HCI Connection scheduler */ static inline void hci_quote_sent(struct hci_conn *conn, int num, int *quote) { struct hci_dev *hdev; int cnt, q; if (!conn) { *quote = 0; return; } hdev = conn->hdev; switch (conn->type) { case ACL_LINK: cnt = hdev->acl_cnt; break; case AMP_LINK: cnt = hdev->block_cnt; break; case SCO_LINK: case ESCO_LINK: cnt = hdev->sco_cnt; break; case LE_LINK: cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt; break; case ISO_LINK: cnt = hdev->iso_mtu ? hdev->iso_cnt : hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt; break; default: cnt = 0; bt_dev_err(hdev, "unknown link type %d", conn->type); } q = cnt / num; *quote = q ? q : 1; } static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote) { struct hci_conn_hash *h = &hdev->conn_hash; struct hci_conn *conn = NULL, *c; unsigned int num = 0, min = ~0; /* We don't have to lock device here. Connections are always * added and removed with TX task disabled. */ rcu_read_lock(); list_for_each_entry_rcu(c, &h->list, list) { if (c->type != type || skb_queue_empty(&c->data_q)) continue; if (c->state != BT_CONNECTED && c->state != BT_CONFIG) continue; num++; if (c->sent < min) { min = c->sent; conn = c; } if (hci_conn_num(hdev, type) == num) break; } rcu_read_unlock(); hci_quote_sent(conn, num, quote); BT_DBG("conn %p quote %d", conn, *quote); return conn; } static void hci_link_tx_to(struct hci_dev *hdev, __u8 type) { struct hci_conn_hash *h = &hdev->conn_hash; struct hci_conn *c; bt_dev_err(hdev, "link tx timeout"); rcu_read_lock(); /* Kill stalled connections */ list_for_each_entry_rcu(c, &h->list, list) { if (c->type == type && c->sent) { bt_dev_err(hdev, "killing stalled connection %pMR", &c->dst); hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM); } } rcu_read_unlock(); } static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type, int *quote) { struct hci_conn_hash *h = &hdev->conn_hash; struct hci_chan *chan = NULL; unsigned int num = 0, min = ~0, cur_prio = 0; struct hci_conn *conn; int conn_num = 0; BT_DBG("%s", hdev->name); rcu_read_lock(); list_for_each_entry_rcu(conn, &h->list, list) { struct hci_chan *tmp; if (conn->type != type) continue; if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG) continue; conn_num++; list_for_each_entry_rcu(tmp, &conn->chan_list, list) { struct sk_buff *skb; if (skb_queue_empty(&tmp->data_q)) continue; skb = skb_peek(&tmp->data_q); if (skb->priority < cur_prio) continue; if (skb->priority > cur_prio) { num = 0; min = ~0; cur_prio = skb->priority; } num++; if (conn->sent < min) { min = conn->sent; chan = tmp; } } if (hci_conn_num(hdev, type) == conn_num) break; } rcu_read_unlock(); if (!chan) return NULL; hci_quote_sent(chan->conn, num, quote); BT_DBG("chan %p quote %d", chan, *quote); return chan; } static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type) { struct hci_conn_hash *h = &hdev->conn_hash; struct hci_conn *conn; int num = 0; BT_DBG("%s", hdev->name); rcu_read_lock(); list_for_each_entry_rcu(conn, &h->list, list) { struct hci_chan *chan; if (conn->type != type) continue; if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG) continue; num++; list_for_each_entry_rcu(chan, &conn->chan_list, list) { struct sk_buff *skb; if (chan->sent) { chan->sent = 0; continue; } if (skb_queue_empty(&chan->data_q)) continue; skb = skb_peek(&chan->data_q); if (skb->priority >= HCI_PRIO_MAX - 1) continue; skb->priority = HCI_PRIO_MAX - 1; BT_DBG("chan %p skb %p promoted to %d", chan, skb, skb->priority); } if (hci_conn_num(hdev, type) == num) break; } rcu_read_unlock(); } static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb) { /* Calculate count of blocks used by this packet */ return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len); } static void __check_timeout(struct hci_dev *hdev, unsigned int cnt, u8 type) { unsigned long last_tx; if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) return; switch (type) { case LE_LINK: last_tx = hdev->le_last_tx; break; default: last_tx = hdev->acl_last_tx; break; } /* tx timeout must be longer than maximum link supervision timeout * (40.9 seconds) */ if (!cnt && time_after(jiffies, last_tx + HCI_ACL_TX_TIMEOUT)) hci_link_tx_to(hdev, type); } /* Schedule SCO */ static void hci_sched_sco(struct hci_dev *hdev) { struct hci_conn *conn; struct sk_buff *skb; int quote; BT_DBG("%s", hdev->name); if (!hci_conn_num(hdev, SCO_LINK)) return; while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) { while (quote-- && (skb = skb_dequeue(&conn->data_q))) { BT_DBG("skb %p len %d", skb, skb->len); hci_send_frame(hdev, skb); conn->sent++; if (conn->sent == ~0) conn->sent = 0; } } } static void hci_sched_esco(struct hci_dev *hdev) { struct hci_conn *conn; struct sk_buff *skb; int quote; BT_DBG("%s", hdev->name); if (!hci_conn_num(hdev, ESCO_LINK)) return; while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) { while (quote-- && (skb = skb_dequeue(&conn->data_q))) { BT_DBG("skb %p len %d", skb, skb->len); hci_send_frame(hdev, skb); conn->sent++; if (conn->sent == ~0) conn->sent = 0; } } } static void hci_sched_acl_pkt(struct hci_dev *hdev) { unsigned int cnt = hdev->acl_cnt; struct hci_chan *chan; struct sk_buff *skb; int quote; __check_timeout(hdev, cnt, ACL_LINK); while (hdev->acl_cnt && (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) { u32 priority = (skb_peek(&chan->data_q))->priority; while (quote-- && (skb = skb_peek(&chan->data_q))) { BT_DBG("chan %p skb %p len %d priority %u", chan, skb, skb->len, skb->priority); /* Stop if priority has changed */ if (skb->priority < priority) break; skb = skb_dequeue(&chan->data_q); hci_conn_enter_active_mode(chan->conn, bt_cb(skb)->force_active); hci_send_frame(hdev, skb); hdev->acl_last_tx = jiffies; hdev->acl_cnt--; chan->sent++; chan->conn->sent++; /* Send pending SCO packets right away */ hci_sched_sco(hdev); hci_sched_esco(hdev); } } if (cnt != hdev->acl_cnt) hci_prio_recalculate(hdev, ACL_LINK); } static void hci_sched_acl_blk(struct hci_dev *hdev) { unsigned int cnt = hdev->block_cnt; struct hci_chan *chan; struct sk_buff *skb; int quote; u8 type; BT_DBG("%s", hdev->name); if (hdev->dev_type == HCI_AMP) type = AMP_LINK; else type = ACL_LINK; __check_timeout(hdev, cnt, type); while (hdev->block_cnt > 0 && (chan = hci_chan_sent(hdev, type, &quote))) { u32 priority = (skb_peek(&chan->data_q))->priority; while (quote > 0 && (skb = skb_peek(&chan->data_q))) { int blocks; BT_DBG("chan %p skb %p len %d priority %u", chan, skb, skb->len, skb->priority); /* Stop if priority has changed */ if (skb->priority < priority) break; skb = skb_dequeue(&chan->data_q); blocks = __get_blocks(hdev, skb); if (blocks > hdev->block_cnt) return; hci_conn_enter_active_mode(chan->conn, bt_cb(skb)->force_active); hci_send_frame(hdev, skb); hdev->acl_last_tx = jiffies; hdev->block_cnt -= blocks; quote -= blocks; chan->sent += blocks; chan->conn->sent += blocks; } } if (cnt != hdev->block_cnt) hci_prio_recalculate(hdev, type); } static void hci_sched_acl(struct hci_dev *hdev) { BT_DBG("%s", hdev->name); /* No ACL link over BR/EDR controller */ if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_PRIMARY) return; /* No AMP link over AMP controller */ if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP) return; switch (hdev->flow_ctl_mode) { case HCI_FLOW_CTL_MODE_PACKET_BASED: hci_sched_acl_pkt(hdev); break; case HCI_FLOW_CTL_MODE_BLOCK_BASED: hci_sched_acl_blk(hdev); break; } } static void hci_sched_le(struct hci_dev *hdev) { struct hci_chan *chan; struct sk_buff *skb; int quote, cnt, tmp; BT_DBG("%s", hdev->name); if (!hci_conn_num(hdev, LE_LINK)) return; cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt; __check_timeout(hdev, cnt, LE_LINK); tmp = cnt; while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) { u32 priority = (skb_peek(&chan->data_q))->priority; while (quote-- && (skb = skb_peek(&chan->data_q))) { BT_DBG("chan %p skb %p len %d priority %u", chan, skb, skb->len, skb->priority); /* Stop if priority has changed */ if (skb->priority < priority) break; skb = skb_dequeue(&chan->data_q); hci_send_frame(hdev, skb); hdev->le_last_tx = jiffies; cnt--; chan->sent++; chan->conn->sent++; /* Send pending SCO packets right away */ hci_sched_sco(hdev); hci_sched_esco(hdev); } } if (hdev->le_pkts) hdev->le_cnt = cnt; else hdev->acl_cnt = cnt; if (cnt != tmp) hci_prio_recalculate(hdev, LE_LINK); } /* Schedule CIS */ static void hci_sched_iso(struct hci_dev *hdev) { struct hci_conn *conn; struct sk_buff *skb; int quote, *cnt; BT_DBG("%s", hdev->name); if (!hci_conn_num(hdev, ISO_LINK)) return; cnt = hdev->iso_pkts ? &hdev->iso_cnt : hdev->le_pkts ? &hdev->le_cnt : &hdev->acl_cnt; while (*cnt && (conn = hci_low_sent(hdev, ISO_LINK, &quote))) { while (quote-- && (skb = skb_dequeue(&conn->data_q))) { BT_DBG("skb %p len %d", skb, skb->len); hci_send_frame(hdev, skb); conn->sent++; if (conn->sent == ~0) conn->sent = 0; (*cnt)--; } } } static void hci_tx_work(struct work_struct *work) { struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work); struct sk_buff *skb; BT_DBG("%s acl %d sco %d le %d iso %d", hdev->name, hdev->acl_cnt, hdev->sco_cnt, hdev->le_cnt, hdev->iso_cnt); if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) { /* Schedule queues and send stuff to HCI driver */ hci_sched_sco(hdev); hci_sched_esco(hdev); hci_sched_iso(hdev); hci_sched_acl(hdev); hci_sched_le(hdev); } /* Send next queued raw (unknown type) packet */ while ((skb = skb_dequeue(&hdev->raw_q))) hci_send_frame(hdev, skb); } /* ----- HCI RX task (incoming data processing) ----- */ /* ACL data packet */ static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb) { struct hci_acl_hdr *hdr = (void *) skb->data; struct hci_conn *conn; __u16 handle, flags; skb_pull(skb, HCI_ACL_HDR_SIZE); handle = __le16_to_cpu(hdr->handle); flags = hci_flags(handle); handle = hci_handle(handle); BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len, handle, flags); hdev->stat.acl_rx++; hci_dev_lock(hdev); conn = hci_conn_hash_lookup_handle(hdev, handle); hci_dev_unlock(hdev); if (conn) { hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF); /* Send to upper protocol */ l2cap_recv_acldata(conn, skb, flags); return; } else { bt_dev_err(hdev, "ACL packet for unknown connection handle %d", handle); } kfree_skb(skb); } /* SCO data packet */ static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb) { struct hci_sco_hdr *hdr = (void *) skb->data; struct hci_conn *conn; __u16 handle, flags; skb_pull(skb, HCI_SCO_HDR_SIZE); handle = __le16_to_cpu(hdr->handle); flags = hci_flags(handle); handle = hci_handle(handle); BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len, handle, flags); hdev->stat.sco_rx++; hci_dev_lock(hdev); conn = hci_conn_hash_lookup_handle(hdev, handle); hci_dev_unlock(hdev); if (conn) { /* Send to upper protocol */ hci_skb_pkt_status(skb) = flags & 0x03; sco_recv_scodata(conn, skb); return; } else { bt_dev_err_ratelimited(hdev, "SCO packet for unknown connection handle %d", handle); } kfree_skb(skb); } static void hci_isodata_packet(struct hci_dev *hdev, struct sk_buff *skb) { struct hci_iso_hdr *hdr; struct hci_conn *conn; __u16 handle, flags; hdr = skb_pull_data(skb, sizeof(*hdr)); if (!hdr) { bt_dev_err(hdev, "ISO packet too small"); goto drop; } handle = __le16_to_cpu(hdr->handle); flags = hci_flags(handle); handle = hci_handle(handle); bt_dev_dbg(hdev, "len %d handle 0x%4.4x flags 0x%4.4x", skb->len, handle, flags); hci_dev_lock(hdev); conn = hci_conn_hash_lookup_handle(hdev, handle); hci_dev_unlock(hdev); if (!conn) { bt_dev_err(hdev, "ISO packet for unknown connection handle %d", handle); goto drop; } /* Send to upper protocol */ iso_recv(conn, skb, flags); return; drop: kfree_skb(skb); } static bool hci_req_is_complete(struct hci_dev *hdev) { struct sk_buff *skb; skb = skb_peek(&hdev->cmd_q); if (!skb) return true; return (bt_cb(skb)->hci.req_flags & HCI_REQ_START); } static void hci_resend_last(struct hci_dev *hdev) { struct hci_command_hdr *sent; struct sk_buff *skb; u16 opcode; if (!hdev->sent_cmd) return; sent = (void *) hdev->sent_cmd->data; opcode = __le16_to_cpu(sent->opcode); if (opcode == HCI_OP_RESET) return; skb = skb_clone(hdev->sent_cmd, GFP_KERNEL); if (!skb) return; skb_queue_head(&hdev->cmd_q, skb); queue_work(hdev->workqueue, &hdev->cmd_work); } void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status, hci_req_complete_t *req_complete, hci_req_complete_skb_t *req_complete_skb) { struct sk_buff *skb; unsigned long flags; BT_DBG("opcode 0x%04x status 0x%02x", opcode, status); /* If the completed command doesn't match the last one that was * sent we need to do special handling of it. */ if (!hci_sent_cmd_data(hdev, opcode)) { /* Some CSR based controllers generate a spontaneous * reset complete event during init and any pending * command will never be completed. In such a case we * need to resend whatever was the last sent * command. */ if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET) hci_resend_last(hdev); return; } /* If we reach this point this event matches the last command sent */ hci_dev_clear_flag(hdev, HCI_CMD_PENDING); /* If the command succeeded and there's still more commands in * this request the request is not yet complete. */ if (!status && !hci_req_is_complete(hdev)) return; /* If this was the last command in a request the complete * callback would be found in hdev->sent_cmd instead of the * command queue (hdev->cmd_q). */ if (bt_cb(hdev->sent_cmd)->hci.req_flags & HCI_REQ_SKB) { *req_complete_skb = bt_cb(hdev->sent_cmd)->hci.req_complete_skb; return; } if (bt_cb(hdev->sent_cmd)->hci.req_complete) { *req_complete = bt_cb(hdev->sent_cmd)->hci.req_complete; return; } /* Remove all pending commands belonging to this request */ spin_lock_irqsave(&hdev->cmd_q.lock, flags); while ((skb = __skb_dequeue(&hdev->cmd_q))) { if (bt_cb(skb)->hci.req_flags & HCI_REQ_START) { __skb_queue_head(&hdev->cmd_q, skb); break; } if (bt_cb(skb)->hci.req_flags & HCI_REQ_SKB) *req_complete_skb = bt_cb(skb)->hci.req_complete_skb; else *req_complete = bt_cb(skb)->hci.req_complete; dev_kfree_skb_irq(skb); } spin_unlock_irqrestore(&hdev->cmd_q.lock, flags); } static void hci_rx_work(struct work_struct *work) { struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work); struct sk_buff *skb; BT_DBG("%s", hdev->name); /* The kcov_remote functions used for collecting packet parsing * coverage information from this background thread and associate * the coverage with the syscall's thread which originally injected * the packet. This helps fuzzing the kernel. */ for (; (skb = skb_dequeue(&hdev->rx_q)); kcov_remote_stop()) { kcov_remote_start_common(skb_get_kcov_handle(skb)); /* Send copy to monitor */ hci_send_to_monitor(hdev, skb); if (atomic_read(&hdev->promisc)) { /* Send copy to the sockets */ hci_send_to_sock(hdev, skb); } /* If the device has been opened in HCI_USER_CHANNEL, * the userspace has exclusive access to device. * When device is HCI_INIT, we still need to process * the data packets to the driver in order * to complete its setup(). */ if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL) && !test_bit(HCI_INIT, &hdev->flags)) { kfree_skb(skb); continue; } if (test_bit(HCI_INIT, &hdev->flags)) { /* Don't process data packets in this states. */ switch (hci_skb_pkt_type(skb)) { case HCI_ACLDATA_PKT: case HCI_SCODATA_PKT: case HCI_ISODATA_PKT: kfree_skb(skb); continue; } } /* Process frame */ switch (hci_skb_pkt_type(skb)) { case HCI_EVENT_PKT: BT_DBG("%s Event packet", hdev->name); hci_event_packet(hdev, skb); break; case HCI_ACLDATA_PKT: BT_DBG("%s ACL data packet", hdev->name); hci_acldata_packet(hdev, skb); break; case HCI_SCODATA_PKT: BT_DBG("%s SCO data packet", hdev->name); hci_scodata_packet(hdev, skb); break; case HCI_ISODATA_PKT: BT_DBG("%s ISO data packet", hdev->name); hci_isodata_packet(hdev, skb); break; default: kfree_skb(skb); break; } } } static void hci_cmd_work(struct work_struct *work) { struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work); struct sk_buff *skb; BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name, atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q)); /* Send queued commands */ if (atomic_read(&hdev->cmd_cnt)) { skb = skb_dequeue(&hdev->cmd_q); if (!skb) return; kfree_skb(hdev->sent_cmd); hdev->sent_cmd = skb_clone(skb, GFP_KERNEL); if (hdev->sent_cmd) { int res; if (hci_req_status_pend(hdev)) hci_dev_set_flag(hdev, HCI_CMD_PENDING); atomic_dec(&hdev->cmd_cnt); res = hci_send_frame(hdev, skb); if (res < 0) __hci_cmd_sync_cancel(hdev, -res); rcu_read_lock(); if (test_bit(HCI_RESET, &hdev->flags) || hci_dev_test_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE)) cancel_delayed_work(&hdev->cmd_timer); else queue_delayed_work(hdev->workqueue, &hdev->cmd_timer, HCI_CMD_TIMEOUT); rcu_read_unlock(); } else { skb_queue_head(&hdev->cmd_q, skb); queue_work(hdev->workqueue, &hdev->cmd_work); } } }
linux-master
net/bluetooth/hci_core.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2023 Google Corporation */ #include <linux/devcoredump.h> #include <asm/unaligned.h> #include <net/bluetooth/bluetooth.h> #include <net/bluetooth/hci_core.h> enum hci_devcoredump_pkt_type { HCI_DEVCOREDUMP_PKT_INIT, HCI_DEVCOREDUMP_PKT_SKB, HCI_DEVCOREDUMP_PKT_PATTERN, HCI_DEVCOREDUMP_PKT_COMPLETE, HCI_DEVCOREDUMP_PKT_ABORT, }; struct hci_devcoredump_skb_cb { u16 pkt_type; }; struct hci_devcoredump_skb_pattern { u8 pattern; u32 len; } __packed; #define hci_dmp_cb(skb) ((struct hci_devcoredump_skb_cb *)((skb)->cb)) #define DBG_UNEXPECTED_STATE() \ bt_dev_dbg(hdev, \ "Unexpected packet (%d) for state (%d). ", \ hci_dmp_cb(skb)->pkt_type, hdev->dump.state) #define MAX_DEVCOREDUMP_HDR_SIZE 512 /* bytes */ static int hci_devcd_update_hdr_state(char *buf, size_t size, int state) { int len = 0; if (!buf) return 0; len = scnprintf(buf, size, "Bluetooth devcoredump\nState: %d\n", state); return len + 1; /* scnprintf adds \0 at the end upon state rewrite */ } /* Call with hci_dev_lock only. */ static int hci_devcd_update_state(struct hci_dev *hdev, int state) { bt_dev_dbg(hdev, "Updating devcoredump state from %d to %d.", hdev->dump.state, state); hdev->dump.state = state; return hci_devcd_update_hdr_state(hdev->dump.head, hdev->dump.alloc_size, state); } static int hci_devcd_mkheader(struct hci_dev *hdev, struct sk_buff *skb) { char dump_start[] = "--- Start dump ---\n"; char hdr[80]; int hdr_len; hdr_len = hci_devcd_update_hdr_state(hdr, sizeof(hdr), HCI_DEVCOREDUMP_IDLE); skb_put_data(skb, hdr, hdr_len); if (hdev->dump.dmp_hdr) hdev->dump.dmp_hdr(hdev, skb); skb_put_data(skb, dump_start, strlen(dump_start)); return skb->len; } /* Do not call with hci_dev_lock since this calls driver code. */ static void hci_devcd_notify(struct hci_dev *hdev, int state) { if (hdev->dump.notify_change) hdev->dump.notify_change(hdev, state); } /* Call with hci_dev_lock only. */ void hci_devcd_reset(struct hci_dev *hdev) { hdev->dump.head = NULL; hdev->dump.tail = NULL; hdev->dump.alloc_size = 0; hci_devcd_update_state(hdev, HCI_DEVCOREDUMP_IDLE); cancel_delayed_work(&hdev->dump.dump_timeout); skb_queue_purge(&hdev->dump.dump_q); } /* Call with hci_dev_lock only. */ static void hci_devcd_free(struct hci_dev *hdev) { vfree(hdev->dump.head); hci_devcd_reset(hdev); } /* Call with hci_dev_lock only. */ static int hci_devcd_alloc(struct hci_dev *hdev, u32 size) { hdev->dump.head = vmalloc(size); if (!hdev->dump.head) return -ENOMEM; hdev->dump.alloc_size = size; hdev->dump.tail = hdev->dump.head; hdev->dump.end = hdev->dump.head + size; hci_devcd_update_state(hdev, HCI_DEVCOREDUMP_IDLE); return 0; } /* Call with hci_dev_lock only. */ static bool hci_devcd_copy(struct hci_dev *hdev, char *buf, u32 size) { if (hdev->dump.tail + size > hdev->dump.end) return false; memcpy(hdev->dump.tail, buf, size); hdev->dump.tail += size; return true; } /* Call with hci_dev_lock only. */ static bool hci_devcd_memset(struct hci_dev *hdev, u8 pattern, u32 len) { if (hdev->dump.tail + len > hdev->dump.end) return false; memset(hdev->dump.tail, pattern, len); hdev->dump.tail += len; return true; } /* Call with hci_dev_lock only. */ static int hci_devcd_prepare(struct hci_dev *hdev, u32 dump_size) { struct sk_buff *skb; int dump_hdr_size; int err = 0; skb = alloc_skb(MAX_DEVCOREDUMP_HDR_SIZE, GFP_ATOMIC); if (!skb) return -ENOMEM; dump_hdr_size = hci_devcd_mkheader(hdev, skb); if (hci_devcd_alloc(hdev, dump_hdr_size + dump_size)) { err = -ENOMEM; goto hdr_free; } /* Insert the device header */ if (!hci_devcd_copy(hdev, skb->data, skb->len)) { bt_dev_err(hdev, "Failed to insert header"); hci_devcd_free(hdev); err = -ENOMEM; goto hdr_free; } hdr_free: kfree_skb(skb); return err; } static void hci_devcd_handle_pkt_init(struct hci_dev *hdev, struct sk_buff *skb) { u32 dump_size; if (hdev->dump.state != HCI_DEVCOREDUMP_IDLE) { DBG_UNEXPECTED_STATE(); return; } if (skb->len != sizeof(dump_size)) { bt_dev_dbg(hdev, "Invalid dump init pkt"); return; } dump_size = get_unaligned_le32(skb_pull_data(skb, 4)); if (!dump_size) { bt_dev_err(hdev, "Zero size dump init pkt"); return; } if (hci_devcd_prepare(hdev, dump_size)) { bt_dev_err(hdev, "Failed to prepare for dump"); return; } hci_devcd_update_state(hdev, HCI_DEVCOREDUMP_ACTIVE); queue_delayed_work(hdev->workqueue, &hdev->dump.dump_timeout, hdev->dump.timeout); } static void hci_devcd_handle_pkt_skb(struct hci_dev *hdev, struct sk_buff *skb) { if (hdev->dump.state != HCI_DEVCOREDUMP_ACTIVE) { DBG_UNEXPECTED_STATE(); return; } if (!hci_devcd_copy(hdev, skb->data, skb->len)) bt_dev_dbg(hdev, "Failed to insert skb"); } static void hci_devcd_handle_pkt_pattern(struct hci_dev *hdev, struct sk_buff *skb) { struct hci_devcoredump_skb_pattern *pattern; if (hdev->dump.state != HCI_DEVCOREDUMP_ACTIVE) { DBG_UNEXPECTED_STATE(); return; } if (skb->len != sizeof(*pattern)) { bt_dev_dbg(hdev, "Invalid pattern skb"); return; } pattern = skb_pull_data(skb, sizeof(*pattern)); if (!hci_devcd_memset(hdev, pattern->pattern, pattern->len)) bt_dev_dbg(hdev, "Failed to set pattern"); } static void hci_devcd_handle_pkt_complete(struct hci_dev *hdev, struct sk_buff *skb) { u32 dump_size; if (hdev->dump.state != HCI_DEVCOREDUMP_ACTIVE) { DBG_UNEXPECTED_STATE(); return; } hci_devcd_update_state(hdev, HCI_DEVCOREDUMP_DONE); dump_size = hdev->dump.tail - hdev->dump.head; bt_dev_dbg(hdev, "complete with size %u (expect %zu)", dump_size, hdev->dump.alloc_size); dev_coredumpv(&hdev->dev, hdev->dump.head, dump_size, GFP_KERNEL); } static void hci_devcd_handle_pkt_abort(struct hci_dev *hdev, struct sk_buff *skb) { u32 dump_size; if (hdev->dump.state != HCI_DEVCOREDUMP_ACTIVE) { DBG_UNEXPECTED_STATE(); return; } hci_devcd_update_state(hdev, HCI_DEVCOREDUMP_ABORT); dump_size = hdev->dump.tail - hdev->dump.head; bt_dev_dbg(hdev, "aborted with size %u (expect %zu)", dump_size, hdev->dump.alloc_size); /* Emit a devcoredump with the available data */ dev_coredumpv(&hdev->dev, hdev->dump.head, dump_size, GFP_KERNEL); } /* Bluetooth devcoredump state machine. * * Devcoredump states: * * HCI_DEVCOREDUMP_IDLE: The default state. * * HCI_DEVCOREDUMP_ACTIVE: A devcoredump will be in this state once it has * been initialized using hci_devcd_init(). Once active, the driver * can append data using hci_devcd_append() or insert a pattern * using hci_devcd_append_pattern(). * * HCI_DEVCOREDUMP_DONE: Once the dump collection is complete, the drive * can signal the completion using hci_devcd_complete(). A * devcoredump is generated indicating the completion event and * then the state machine is reset to the default state. * * HCI_DEVCOREDUMP_ABORT: The driver can cancel ongoing dump collection in * case of any error using hci_devcd_abort(). A devcoredump is * still generated with the available data indicating the abort * event and then the state machine is reset to the default state. * * HCI_DEVCOREDUMP_TIMEOUT: A timeout timer for HCI_DEVCOREDUMP_TIMEOUT sec * is started during devcoredump initialization. Once the timeout * occurs, the driver is notified, a devcoredump is generated with * the available data indicating the timeout event and then the * state machine is reset to the default state. * * The driver must register using hci_devcd_register() before using the hci * devcoredump APIs. */ void hci_devcd_rx(struct work_struct *work) { struct hci_dev *hdev = container_of(work, struct hci_dev, dump.dump_rx); struct sk_buff *skb; int start_state; while ((skb = skb_dequeue(&hdev->dump.dump_q))) { /* Return if timeout occurs. The timeout handler function * hci_devcd_timeout() will report the available dump data. */ if (hdev->dump.state == HCI_DEVCOREDUMP_TIMEOUT) { kfree_skb(skb); return; } hci_dev_lock(hdev); start_state = hdev->dump.state; switch (hci_dmp_cb(skb)->pkt_type) { case HCI_DEVCOREDUMP_PKT_INIT: hci_devcd_handle_pkt_init(hdev, skb); break; case HCI_DEVCOREDUMP_PKT_SKB: hci_devcd_handle_pkt_skb(hdev, skb); break; case HCI_DEVCOREDUMP_PKT_PATTERN: hci_devcd_handle_pkt_pattern(hdev, skb); break; case HCI_DEVCOREDUMP_PKT_COMPLETE: hci_devcd_handle_pkt_complete(hdev, skb); break; case HCI_DEVCOREDUMP_PKT_ABORT: hci_devcd_handle_pkt_abort(hdev, skb); break; default: bt_dev_dbg(hdev, "Unknown packet (%d) for state (%d). ", hci_dmp_cb(skb)->pkt_type, hdev->dump.state); break; } hci_dev_unlock(hdev); kfree_skb(skb); /* Notify the driver about any state changes before resetting * the state machine */ if (start_state != hdev->dump.state) hci_devcd_notify(hdev, hdev->dump.state); /* Reset the state machine if the devcoredump is complete */ hci_dev_lock(hdev); if (hdev->dump.state == HCI_DEVCOREDUMP_DONE || hdev->dump.state == HCI_DEVCOREDUMP_ABORT) hci_devcd_reset(hdev); hci_dev_unlock(hdev); } } EXPORT_SYMBOL(hci_devcd_rx); void hci_devcd_timeout(struct work_struct *work) { struct hci_dev *hdev = container_of(work, struct hci_dev, dump.dump_timeout.work); u32 dump_size; hci_devcd_notify(hdev, HCI_DEVCOREDUMP_TIMEOUT); hci_dev_lock(hdev); cancel_work(&hdev->dump.dump_rx); hci_devcd_update_state(hdev, HCI_DEVCOREDUMP_TIMEOUT); dump_size = hdev->dump.tail - hdev->dump.head; bt_dev_dbg(hdev, "timeout with size %u (expect %zu)", dump_size, hdev->dump.alloc_size); /* Emit a devcoredump with the available data */ dev_coredumpv(&hdev->dev, hdev->dump.head, dump_size, GFP_KERNEL); hci_devcd_reset(hdev); hci_dev_unlock(hdev); } EXPORT_SYMBOL(hci_devcd_timeout); int hci_devcd_register(struct hci_dev *hdev, coredump_t coredump, dmp_hdr_t dmp_hdr, notify_change_t notify_change) { /* Driver must implement coredump() and dmp_hdr() functions for * bluetooth devcoredump. The coredump() should trigger a coredump * event on the controller when the device's coredump sysfs entry is * written to. The dmp_hdr() should create a dump header to identify * the controller/fw/driver info. */ if (!coredump || !dmp_hdr) return -EINVAL; hci_dev_lock(hdev); hdev->dump.coredump = coredump; hdev->dump.dmp_hdr = dmp_hdr; hdev->dump.notify_change = notify_change; hdev->dump.supported = true; hdev->dump.timeout = DEVCOREDUMP_TIMEOUT; hci_dev_unlock(hdev); return 0; } EXPORT_SYMBOL(hci_devcd_register); static inline bool hci_devcd_enabled(struct hci_dev *hdev) { return hdev->dump.supported; } int hci_devcd_init(struct hci_dev *hdev, u32 dump_size) { struct sk_buff *skb; if (!hci_devcd_enabled(hdev)) return -EOPNOTSUPP; skb = alloc_skb(sizeof(dump_size), GFP_ATOMIC); if (!skb) return -ENOMEM; hci_dmp_cb(skb)->pkt_type = HCI_DEVCOREDUMP_PKT_INIT; put_unaligned_le32(dump_size, skb_put(skb, 4)); skb_queue_tail(&hdev->dump.dump_q, skb); queue_work(hdev->workqueue, &hdev->dump.dump_rx); return 0; } EXPORT_SYMBOL(hci_devcd_init); int hci_devcd_append(struct hci_dev *hdev, struct sk_buff *skb) { if (!skb) return -ENOMEM; if (!hci_devcd_enabled(hdev)) { kfree_skb(skb); return -EOPNOTSUPP; } hci_dmp_cb(skb)->pkt_type = HCI_DEVCOREDUMP_PKT_SKB; skb_queue_tail(&hdev->dump.dump_q, skb); queue_work(hdev->workqueue, &hdev->dump.dump_rx); return 0; } EXPORT_SYMBOL(hci_devcd_append); int hci_devcd_append_pattern(struct hci_dev *hdev, u8 pattern, u32 len) { struct hci_devcoredump_skb_pattern p; struct sk_buff *skb; if (!hci_devcd_enabled(hdev)) return -EOPNOTSUPP; skb = alloc_skb(sizeof(p), GFP_ATOMIC); if (!skb) return -ENOMEM; p.pattern = pattern; p.len = len; hci_dmp_cb(skb)->pkt_type = HCI_DEVCOREDUMP_PKT_PATTERN; skb_put_data(skb, &p, sizeof(p)); skb_queue_tail(&hdev->dump.dump_q, skb); queue_work(hdev->workqueue, &hdev->dump.dump_rx); return 0; } EXPORT_SYMBOL(hci_devcd_append_pattern); int hci_devcd_complete(struct hci_dev *hdev) { struct sk_buff *skb; if (!hci_devcd_enabled(hdev)) return -EOPNOTSUPP; skb = alloc_skb(0, GFP_ATOMIC); if (!skb) return -ENOMEM; hci_dmp_cb(skb)->pkt_type = HCI_DEVCOREDUMP_PKT_COMPLETE; skb_queue_tail(&hdev->dump.dump_q, skb); queue_work(hdev->workqueue, &hdev->dump.dump_rx); return 0; } EXPORT_SYMBOL(hci_devcd_complete); int hci_devcd_abort(struct hci_dev *hdev) { struct sk_buff *skb; if (!hci_devcd_enabled(hdev)) return -EOPNOTSUPP; skb = alloc_skb(0, GFP_ATOMIC); if (!skb) return -ENOMEM; hci_dmp_cb(skb)->pkt_type = HCI_DEVCOREDUMP_PKT_ABORT; skb_queue_tail(&hdev->dump.dump_q, skb); queue_work(hdev->workqueue, &hdev->dump.dump_rx); return 0; } EXPORT_SYMBOL(hci_devcd_abort);
linux-master
net/bluetooth/coredump.c
// SPDX-License-Identifier: GPL-2.0 /* Copyright (C) 2021 Intel Corporation */ #include <net/bluetooth/bluetooth.h> #include <net/bluetooth/hci_core.h> #include "hci_codec.h" static int hci_codec_list_add(struct list_head *list, struct hci_op_read_local_codec_caps *sent, struct hci_rp_read_local_codec_caps *rp, void *caps, __u32 len) { struct codec_list *entry; entry = kzalloc(sizeof(*entry) + len, GFP_KERNEL); if (!entry) return -ENOMEM; entry->id = sent->id; if (sent->id == 0xFF) { entry->cid = __le16_to_cpu(sent->cid); entry->vid = __le16_to_cpu(sent->vid); } entry->transport = sent->transport; entry->len = len; entry->num_caps = 0; if (rp) { entry->num_caps = rp->num_caps; memcpy(entry->caps, caps, len); } list_add(&entry->list, list); return 0; } void hci_codec_list_clear(struct list_head *codec_list) { struct codec_list *c, *n; list_for_each_entry_safe(c, n, codec_list, list) { list_del(&c->list); kfree(c); } } static void hci_read_codec_capabilities(struct hci_dev *hdev, __u8 transport, struct hci_op_read_local_codec_caps *cmd) { __u8 i; for (i = 0; i < TRANSPORT_TYPE_MAX; i++) { if (transport & BIT(i)) { struct hci_rp_read_local_codec_caps *rp; struct hci_codec_caps *caps; struct sk_buff *skb; __u8 j; __u32 len; cmd->transport = i; /* If Read_Codec_Capabilities command is not supported * then just add codec to the list without caps */ if (!(hdev->commands[45] & 0x08)) { hci_dev_lock(hdev); hci_codec_list_add(&hdev->local_codecs, cmd, NULL, NULL, 0); hci_dev_unlock(hdev); continue; } skb = __hci_cmd_sync_sk(hdev, HCI_OP_READ_LOCAL_CODEC_CAPS, sizeof(*cmd), cmd, 0, HCI_CMD_TIMEOUT, NULL); if (IS_ERR(skb)) { bt_dev_err(hdev, "Failed to read codec capabilities (%ld)", PTR_ERR(skb)); continue; } if (skb->len < sizeof(*rp)) goto error; rp = (void *)skb->data; if (rp->status) goto error; if (!rp->num_caps) { len = 0; /* this codec doesn't have capabilities */ goto skip_caps_parse; } skb_pull(skb, sizeof(*rp)); for (j = 0, len = 0; j < rp->num_caps; j++) { caps = (void *)skb->data; if (skb->len < sizeof(*caps)) goto error; if (skb->len < caps->len) goto error; len += sizeof(caps->len) + caps->len; skb_pull(skb, sizeof(caps->len) + caps->len); } skip_caps_parse: hci_dev_lock(hdev); hci_codec_list_add(&hdev->local_codecs, cmd, rp, (__u8 *)rp + sizeof(*rp), len); hci_dev_unlock(hdev); error: kfree_skb(skb); } } } void hci_read_supported_codecs(struct hci_dev *hdev) { struct sk_buff *skb; struct hci_rp_read_local_supported_codecs *rp; struct hci_std_codecs *std_codecs; struct hci_vnd_codecs *vnd_codecs; struct hci_op_read_local_codec_caps caps; __u8 i; skb = __hci_cmd_sync_sk(hdev, HCI_OP_READ_LOCAL_CODECS, 0, NULL, 0, HCI_CMD_TIMEOUT, NULL); if (IS_ERR(skb)) { bt_dev_err(hdev, "Failed to read local supported codecs (%ld)", PTR_ERR(skb)); return; } if (skb->len < sizeof(*rp)) goto error; rp = (void *)skb->data; if (rp->status) goto error; skb_pull(skb, sizeof(rp->status)); std_codecs = (void *)skb->data; /* validate codecs length before accessing */ if (skb->len < flex_array_size(std_codecs, codec, std_codecs->num) + sizeof(std_codecs->num)) goto error; /* enumerate codec capabilities of standard codecs */ memset(&caps, 0, sizeof(caps)); for (i = 0; i < std_codecs->num; i++) { caps.id = std_codecs->codec[i]; caps.direction = 0x00; hci_read_codec_capabilities(hdev, LOCAL_CODEC_ACL_MASK | LOCAL_CODEC_SCO_MASK, &caps); } skb_pull(skb, flex_array_size(std_codecs, codec, std_codecs->num) + sizeof(std_codecs->num)); vnd_codecs = (void *)skb->data; /* validate vendor codecs length before accessing */ if (skb->len < flex_array_size(vnd_codecs, codec, vnd_codecs->num) + sizeof(vnd_codecs->num)) goto error; /* enumerate vendor codec capabilities */ for (i = 0; i < vnd_codecs->num; i++) { caps.id = 0xFF; caps.cid = vnd_codecs->codec[i].cid; caps.vid = vnd_codecs->codec[i].vid; caps.direction = 0x00; hci_read_codec_capabilities(hdev, LOCAL_CODEC_ACL_MASK | LOCAL_CODEC_SCO_MASK, &caps); } error: kfree_skb(skb); } void hci_read_supported_codecs_v2(struct hci_dev *hdev) { struct sk_buff *skb; struct hci_rp_read_local_supported_codecs_v2 *rp; struct hci_std_codecs_v2 *std_codecs; struct hci_vnd_codecs_v2 *vnd_codecs; struct hci_op_read_local_codec_caps caps; __u8 i; skb = __hci_cmd_sync_sk(hdev, HCI_OP_READ_LOCAL_CODECS_V2, 0, NULL, 0, HCI_CMD_TIMEOUT, NULL); if (IS_ERR(skb)) { bt_dev_err(hdev, "Failed to read local supported codecs (%ld)", PTR_ERR(skb)); return; } if (skb->len < sizeof(*rp)) goto error; rp = (void *)skb->data; if (rp->status) goto error; skb_pull(skb, sizeof(rp->status)); std_codecs = (void *)skb->data; /* check for payload data length before accessing */ if (skb->len < flex_array_size(std_codecs, codec, std_codecs->num) + sizeof(std_codecs->num)) goto error; memset(&caps, 0, sizeof(caps)); for (i = 0; i < std_codecs->num; i++) { caps.id = std_codecs->codec[i].id; hci_read_codec_capabilities(hdev, std_codecs->codec[i].transport, &caps); } skb_pull(skb, flex_array_size(std_codecs, codec, std_codecs->num) + sizeof(std_codecs->num)); vnd_codecs = (void *)skb->data; /* check for payload data length before accessing */ if (skb->len < flex_array_size(vnd_codecs, codec, vnd_codecs->num) + sizeof(vnd_codecs->num)) goto error; for (i = 0; i < vnd_codecs->num; i++) { caps.id = 0xFF; caps.cid = vnd_codecs->codec[i].cid; caps.vid = vnd_codecs->codec[i].vid; hci_read_codec_capabilities(hdev, vnd_codecs->codec[i].transport, &caps); } error: kfree_skb(skb); }
linux-master
net/bluetooth/hci_codec.c
/* BlueZ - Bluetooth protocol stack for Linux Copyright (C) 2000-2001 Qualcomm Incorporated Written 2000,2001 by Maxim Krasnyansky <[email protected]> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License version 2 as published by the Free Software Foundation; THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS SOFTWARE IS DISCLAIMED. */ /* Bluetooth HCI sockets. */ #include <linux/compat.h> #include <linux/export.h> #include <linux/utsname.h> #include <linux/sched.h> #include <asm/unaligned.h> #include <net/bluetooth/bluetooth.h> #include <net/bluetooth/hci_core.h> #include <net/bluetooth/hci_mon.h> #include <net/bluetooth/mgmt.h> #include "mgmt_util.h" static LIST_HEAD(mgmt_chan_list); static DEFINE_MUTEX(mgmt_chan_list_lock); static DEFINE_IDA(sock_cookie_ida); static atomic_t monitor_promisc = ATOMIC_INIT(0); /* ----- HCI socket interface ----- */ /* Socket info */ #define hci_pi(sk) ((struct hci_pinfo *) sk) struct hci_pinfo { struct bt_sock bt; struct hci_dev *hdev; struct hci_filter filter; __u8 cmsg_mask; unsigned short channel; unsigned long flags; __u32 cookie; char comm[TASK_COMM_LEN]; __u16 mtu; }; static struct hci_dev *hci_hdev_from_sock(struct sock *sk) { struct hci_dev *hdev = hci_pi(sk)->hdev; if (!hdev) return ERR_PTR(-EBADFD); if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) return ERR_PTR(-EPIPE); return hdev; } void hci_sock_set_flag(struct sock *sk, int nr) { set_bit(nr, &hci_pi(sk)->flags); } void hci_sock_clear_flag(struct sock *sk, int nr) { clear_bit(nr, &hci_pi(sk)->flags); } int hci_sock_test_flag(struct sock *sk, int nr) { return test_bit(nr, &hci_pi(sk)->flags); } unsigned short hci_sock_get_channel(struct sock *sk) { return hci_pi(sk)->channel; } u32 hci_sock_get_cookie(struct sock *sk) { return hci_pi(sk)->cookie; } static bool hci_sock_gen_cookie(struct sock *sk) { int id = hci_pi(sk)->cookie; if (!id) { id = ida_simple_get(&sock_cookie_ida, 1, 0, GFP_KERNEL); if (id < 0) id = 0xffffffff; hci_pi(sk)->cookie = id; get_task_comm(hci_pi(sk)->comm, current); return true; } return false; } static void hci_sock_free_cookie(struct sock *sk) { int id = hci_pi(sk)->cookie; if (id) { hci_pi(sk)->cookie = 0xffffffff; ida_simple_remove(&sock_cookie_ida, id); } } static inline int hci_test_bit(int nr, const void *addr) { return *((const __u32 *) addr + (nr >> 5)) & ((__u32) 1 << (nr & 31)); } /* Security filter */ #define HCI_SFLT_MAX_OGF 5 struct hci_sec_filter { __u32 type_mask; __u32 event_mask[2]; __u32 ocf_mask[HCI_SFLT_MAX_OGF + 1][4]; }; static const struct hci_sec_filter hci_sec_filter = { /* Packet types */ 0x10, /* Events */ { 0x1000d9fe, 0x0000b00c }, /* Commands */ { { 0x0 }, /* OGF_LINK_CTL */ { 0xbe000006, 0x00000001, 0x00000000, 0x00 }, /* OGF_LINK_POLICY */ { 0x00005200, 0x00000000, 0x00000000, 0x00 }, /* OGF_HOST_CTL */ { 0xaab00200, 0x2b402aaa, 0x05220154, 0x00 }, /* OGF_INFO_PARAM */ { 0x000002be, 0x00000000, 0x00000000, 0x00 }, /* OGF_STATUS_PARAM */ { 0x000000ea, 0x00000000, 0x00000000, 0x00 } } }; static struct bt_sock_list hci_sk_list = { .lock = __RW_LOCK_UNLOCKED(hci_sk_list.lock) }; static bool is_filtered_packet(struct sock *sk, struct sk_buff *skb) { struct hci_filter *flt; int flt_type, flt_event; /* Apply filter */ flt = &hci_pi(sk)->filter; flt_type = hci_skb_pkt_type(skb) & HCI_FLT_TYPE_BITS; if (!test_bit(flt_type, &flt->type_mask)) return true; /* Extra filter for event packets only */ if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT) return false; flt_event = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS); if (!hci_test_bit(flt_event, &flt->event_mask)) return true; /* Check filter only when opcode is set */ if (!flt->opcode) return false; if (flt_event == HCI_EV_CMD_COMPLETE && flt->opcode != get_unaligned((__le16 *)(skb->data + 3))) return true; if (flt_event == HCI_EV_CMD_STATUS && flt->opcode != get_unaligned((__le16 *)(skb->data + 4))) return true; return false; } /* Send frame to RAW socket */ void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb) { struct sock *sk; struct sk_buff *skb_copy = NULL; BT_DBG("hdev %p len %d", hdev, skb->len); read_lock(&hci_sk_list.lock); sk_for_each(sk, &hci_sk_list.head) { struct sk_buff *nskb; if (sk->sk_state != BT_BOUND || hci_pi(sk)->hdev != hdev) continue; /* Don't send frame to the socket it came from */ if (skb->sk == sk) continue; if (hci_pi(sk)->channel == HCI_CHANNEL_RAW) { if (hci_skb_pkt_type(skb) != HCI_COMMAND_PKT && hci_skb_pkt_type(skb) != HCI_EVENT_PKT && hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT && hci_skb_pkt_type(skb) != HCI_SCODATA_PKT && hci_skb_pkt_type(skb) != HCI_ISODATA_PKT) continue; if (is_filtered_packet(sk, skb)) continue; } else if (hci_pi(sk)->channel == HCI_CHANNEL_USER) { if (!bt_cb(skb)->incoming) continue; if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT && hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT && hci_skb_pkt_type(skb) != HCI_SCODATA_PKT && hci_skb_pkt_type(skb) != HCI_ISODATA_PKT) continue; } else { /* Don't send frame to other channel types */ continue; } if (!skb_copy) { /* Create a private copy with headroom */ skb_copy = __pskb_copy_fclone(skb, 1, GFP_ATOMIC, true); if (!skb_copy) continue; /* Put type byte before the data */ memcpy(skb_push(skb_copy, 1), &hci_skb_pkt_type(skb), 1); } nskb = skb_clone(skb_copy, GFP_ATOMIC); if (!nskb) continue; if (sock_queue_rcv_skb(sk, nskb)) kfree_skb(nskb); } read_unlock(&hci_sk_list.lock); kfree_skb(skb_copy); } static void hci_sock_copy_creds(struct sock *sk, struct sk_buff *skb) { struct scm_creds *creds; if (!sk || WARN_ON(!skb)) return; creds = &bt_cb(skb)->creds; /* Check if peer credentials is set */ if (!sk->sk_peer_pid) { /* Check if parent peer credentials is set */ if (bt_sk(sk)->parent && bt_sk(sk)->parent->sk_peer_pid) sk = bt_sk(sk)->parent; else return; } /* Check if scm_creds already set */ if (creds->pid == pid_vnr(sk->sk_peer_pid)) return; memset(creds, 0, sizeof(*creds)); creds->pid = pid_vnr(sk->sk_peer_pid); if (sk->sk_peer_cred) { creds->uid = sk->sk_peer_cred->uid; creds->gid = sk->sk_peer_cred->gid; } } static struct sk_buff *hci_skb_clone(struct sk_buff *skb) { struct sk_buff *nskb; if (!skb) return NULL; nskb = skb_clone(skb, GFP_ATOMIC); if (!nskb) return NULL; hci_sock_copy_creds(skb->sk, nskb); return nskb; } /* Send frame to sockets with specific channel */ static void __hci_send_to_channel(unsigned short channel, struct sk_buff *skb, int flag, struct sock *skip_sk) { struct sock *sk; BT_DBG("channel %u len %d", channel, skb->len); sk_for_each(sk, &hci_sk_list.head) { struct sk_buff *nskb; /* Ignore socket without the flag set */ if (!hci_sock_test_flag(sk, flag)) continue; /* Skip the original socket */ if (sk == skip_sk) continue; if (sk->sk_state != BT_BOUND) continue; if (hci_pi(sk)->channel != channel) continue; nskb = hci_skb_clone(skb); if (!nskb) continue; if (sock_queue_rcv_skb(sk, nskb)) kfree_skb(nskb); } } void hci_send_to_channel(unsigned short channel, struct sk_buff *skb, int flag, struct sock *skip_sk) { read_lock(&hci_sk_list.lock); __hci_send_to_channel(channel, skb, flag, skip_sk); read_unlock(&hci_sk_list.lock); } /* Send frame to monitor socket */ void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb) { struct sk_buff *skb_copy = NULL; struct hci_mon_hdr *hdr; __le16 opcode; if (!atomic_read(&monitor_promisc)) return; BT_DBG("hdev %p len %d", hdev, skb->len); switch (hci_skb_pkt_type(skb)) { case HCI_COMMAND_PKT: opcode = cpu_to_le16(HCI_MON_COMMAND_PKT); break; case HCI_EVENT_PKT: opcode = cpu_to_le16(HCI_MON_EVENT_PKT); break; case HCI_ACLDATA_PKT: if (bt_cb(skb)->incoming) opcode = cpu_to_le16(HCI_MON_ACL_RX_PKT); else opcode = cpu_to_le16(HCI_MON_ACL_TX_PKT); break; case HCI_SCODATA_PKT: if (bt_cb(skb)->incoming) opcode = cpu_to_le16(HCI_MON_SCO_RX_PKT); else opcode = cpu_to_le16(HCI_MON_SCO_TX_PKT); break; case HCI_ISODATA_PKT: if (bt_cb(skb)->incoming) opcode = cpu_to_le16(HCI_MON_ISO_RX_PKT); else opcode = cpu_to_le16(HCI_MON_ISO_TX_PKT); break; case HCI_DIAG_PKT: opcode = cpu_to_le16(HCI_MON_VENDOR_DIAG); break; default: return; } /* Create a private copy with headroom */ skb_copy = __pskb_copy_fclone(skb, HCI_MON_HDR_SIZE, GFP_ATOMIC, true); if (!skb_copy) return; hci_sock_copy_creds(skb->sk, skb_copy); /* Put header before the data */ hdr = skb_push(skb_copy, HCI_MON_HDR_SIZE); hdr->opcode = opcode; hdr->index = cpu_to_le16(hdev->id); hdr->len = cpu_to_le16(skb->len); hci_send_to_channel(HCI_CHANNEL_MONITOR, skb_copy, HCI_SOCK_TRUSTED, NULL); kfree_skb(skb_copy); } void hci_send_monitor_ctrl_event(struct hci_dev *hdev, u16 event, void *data, u16 data_len, ktime_t tstamp, int flag, struct sock *skip_sk) { struct sock *sk; __le16 index; if (hdev) index = cpu_to_le16(hdev->id); else index = cpu_to_le16(MGMT_INDEX_NONE); read_lock(&hci_sk_list.lock); sk_for_each(sk, &hci_sk_list.head) { struct hci_mon_hdr *hdr; struct sk_buff *skb; if (hci_pi(sk)->channel != HCI_CHANNEL_CONTROL) continue; /* Ignore socket without the flag set */ if (!hci_sock_test_flag(sk, flag)) continue; /* Skip the original socket */ if (sk == skip_sk) continue; skb = bt_skb_alloc(6 + data_len, GFP_ATOMIC); if (!skb) continue; put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4)); put_unaligned_le16(event, skb_put(skb, 2)); if (data) skb_put_data(skb, data, data_len); skb->tstamp = tstamp; hdr = skb_push(skb, HCI_MON_HDR_SIZE); hdr->opcode = cpu_to_le16(HCI_MON_CTRL_EVENT); hdr->index = index; hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE); __hci_send_to_channel(HCI_CHANNEL_MONITOR, skb, HCI_SOCK_TRUSTED, NULL); kfree_skb(skb); } read_unlock(&hci_sk_list.lock); } static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event) { struct hci_mon_hdr *hdr; struct hci_mon_new_index *ni; struct hci_mon_index_info *ii; struct sk_buff *skb; __le16 opcode; switch (event) { case HCI_DEV_REG: skb = bt_skb_alloc(HCI_MON_NEW_INDEX_SIZE, GFP_ATOMIC); if (!skb) return NULL; ni = skb_put(skb, HCI_MON_NEW_INDEX_SIZE); ni->type = hdev->dev_type; ni->bus = hdev->bus; bacpy(&ni->bdaddr, &hdev->bdaddr); memcpy(ni->name, hdev->name, 8); opcode = cpu_to_le16(HCI_MON_NEW_INDEX); break; case HCI_DEV_UNREG: skb = bt_skb_alloc(0, GFP_ATOMIC); if (!skb) return NULL; opcode = cpu_to_le16(HCI_MON_DEL_INDEX); break; case HCI_DEV_SETUP: if (hdev->manufacturer == 0xffff) return NULL; fallthrough; case HCI_DEV_UP: skb = bt_skb_alloc(HCI_MON_INDEX_INFO_SIZE, GFP_ATOMIC); if (!skb) return NULL; ii = skb_put(skb, HCI_MON_INDEX_INFO_SIZE); bacpy(&ii->bdaddr, &hdev->bdaddr); ii->manufacturer = cpu_to_le16(hdev->manufacturer); opcode = cpu_to_le16(HCI_MON_INDEX_INFO); break; case HCI_DEV_OPEN: skb = bt_skb_alloc(0, GFP_ATOMIC); if (!skb) return NULL; opcode = cpu_to_le16(HCI_MON_OPEN_INDEX); break; case HCI_DEV_CLOSE: skb = bt_skb_alloc(0, GFP_ATOMIC); if (!skb) return NULL; opcode = cpu_to_le16(HCI_MON_CLOSE_INDEX); break; default: return NULL; } __net_timestamp(skb); hdr = skb_push(skb, HCI_MON_HDR_SIZE); hdr->opcode = opcode; hdr->index = cpu_to_le16(hdev->id); hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE); return skb; } static struct sk_buff *create_monitor_ctrl_open(struct sock *sk) { struct hci_mon_hdr *hdr; struct sk_buff *skb; u16 format; u8 ver[3]; u32 flags; /* No message needed when cookie is not present */ if (!hci_pi(sk)->cookie) return NULL; switch (hci_pi(sk)->channel) { case HCI_CHANNEL_RAW: format = 0x0000; ver[0] = BT_SUBSYS_VERSION; put_unaligned_le16(BT_SUBSYS_REVISION, ver + 1); break; case HCI_CHANNEL_USER: format = 0x0001; ver[0] = BT_SUBSYS_VERSION; put_unaligned_le16(BT_SUBSYS_REVISION, ver + 1); break; case HCI_CHANNEL_CONTROL: format = 0x0002; mgmt_fill_version_info(ver); break; default: /* No message for unsupported format */ return NULL; } skb = bt_skb_alloc(14 + TASK_COMM_LEN, GFP_ATOMIC); if (!skb) return NULL; hci_sock_copy_creds(sk, skb); flags = hci_sock_test_flag(sk, HCI_SOCK_TRUSTED) ? 0x1 : 0x0; put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4)); put_unaligned_le16(format, skb_put(skb, 2)); skb_put_data(skb, ver, sizeof(ver)); put_unaligned_le32(flags, skb_put(skb, 4)); skb_put_u8(skb, TASK_COMM_LEN); skb_put_data(skb, hci_pi(sk)->comm, TASK_COMM_LEN); __net_timestamp(skb); hdr = skb_push(skb, HCI_MON_HDR_SIZE); hdr->opcode = cpu_to_le16(HCI_MON_CTRL_OPEN); if (hci_pi(sk)->hdev) hdr->index = cpu_to_le16(hci_pi(sk)->hdev->id); else hdr->index = cpu_to_le16(HCI_DEV_NONE); hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE); return skb; } static struct sk_buff *create_monitor_ctrl_close(struct sock *sk) { struct hci_mon_hdr *hdr; struct sk_buff *skb; /* No message needed when cookie is not present */ if (!hci_pi(sk)->cookie) return NULL; switch (hci_pi(sk)->channel) { case HCI_CHANNEL_RAW: case HCI_CHANNEL_USER: case HCI_CHANNEL_CONTROL: break; default: /* No message for unsupported format */ return NULL; } skb = bt_skb_alloc(4, GFP_ATOMIC); if (!skb) return NULL; hci_sock_copy_creds(sk, skb); put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4)); __net_timestamp(skb); hdr = skb_push(skb, HCI_MON_HDR_SIZE); hdr->opcode = cpu_to_le16(HCI_MON_CTRL_CLOSE); if (hci_pi(sk)->hdev) hdr->index = cpu_to_le16(hci_pi(sk)->hdev->id); else hdr->index = cpu_to_le16(HCI_DEV_NONE); hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE); return skb; } static struct sk_buff *create_monitor_ctrl_command(struct sock *sk, u16 index, u16 opcode, u16 len, const void *buf) { struct hci_mon_hdr *hdr; struct sk_buff *skb; skb = bt_skb_alloc(6 + len, GFP_ATOMIC); if (!skb) return NULL; hci_sock_copy_creds(sk, skb); put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4)); put_unaligned_le16(opcode, skb_put(skb, 2)); if (buf) skb_put_data(skb, buf, len); __net_timestamp(skb); hdr = skb_push(skb, HCI_MON_HDR_SIZE); hdr->opcode = cpu_to_le16(HCI_MON_CTRL_COMMAND); hdr->index = cpu_to_le16(index); hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE); return skb; } static void __printf(2, 3) send_monitor_note(struct sock *sk, const char *fmt, ...) { size_t len; struct hci_mon_hdr *hdr; struct sk_buff *skb; va_list args; va_start(args, fmt); len = vsnprintf(NULL, 0, fmt, args); va_end(args); skb = bt_skb_alloc(len + 1, GFP_ATOMIC); if (!skb) return; hci_sock_copy_creds(sk, skb); va_start(args, fmt); vsprintf(skb_put(skb, len), fmt, args); *(u8 *)skb_put(skb, 1) = 0; va_end(args); __net_timestamp(skb); hdr = (void *)skb_push(skb, HCI_MON_HDR_SIZE); hdr->opcode = cpu_to_le16(HCI_MON_SYSTEM_NOTE); hdr->index = cpu_to_le16(HCI_DEV_NONE); hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE); if (sock_queue_rcv_skb(sk, skb)) kfree_skb(skb); } static void send_monitor_replay(struct sock *sk) { struct hci_dev *hdev; read_lock(&hci_dev_list_lock); list_for_each_entry(hdev, &hci_dev_list, list) { struct sk_buff *skb; skb = create_monitor_event(hdev, HCI_DEV_REG); if (!skb) continue; if (sock_queue_rcv_skb(sk, skb)) kfree_skb(skb); if (!test_bit(HCI_RUNNING, &hdev->flags)) continue; skb = create_monitor_event(hdev, HCI_DEV_OPEN); if (!skb) continue; if (sock_queue_rcv_skb(sk, skb)) kfree_skb(skb); if (test_bit(HCI_UP, &hdev->flags)) skb = create_monitor_event(hdev, HCI_DEV_UP); else if (hci_dev_test_flag(hdev, HCI_SETUP)) skb = create_monitor_event(hdev, HCI_DEV_SETUP); else skb = NULL; if (skb) { if (sock_queue_rcv_skb(sk, skb)) kfree_skb(skb); } } read_unlock(&hci_dev_list_lock); } static void send_monitor_control_replay(struct sock *mon_sk) { struct sock *sk; read_lock(&hci_sk_list.lock); sk_for_each(sk, &hci_sk_list.head) { struct sk_buff *skb; skb = create_monitor_ctrl_open(sk); if (!skb) continue; if (sock_queue_rcv_skb(mon_sk, skb)) kfree_skb(skb); } read_unlock(&hci_sk_list.lock); } /* Generate internal stack event */ static void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data) { struct hci_event_hdr *hdr; struct hci_ev_stack_internal *ev; struct sk_buff *skb; skb = bt_skb_alloc(HCI_EVENT_HDR_SIZE + sizeof(*ev) + dlen, GFP_ATOMIC); if (!skb) return; hdr = skb_put(skb, HCI_EVENT_HDR_SIZE); hdr->evt = HCI_EV_STACK_INTERNAL; hdr->plen = sizeof(*ev) + dlen; ev = skb_put(skb, sizeof(*ev) + dlen); ev->type = type; memcpy(ev->data, data, dlen); bt_cb(skb)->incoming = 1; __net_timestamp(skb); hci_skb_pkt_type(skb) = HCI_EVENT_PKT; hci_send_to_sock(hdev, skb); kfree_skb(skb); } void hci_sock_dev_event(struct hci_dev *hdev, int event) { BT_DBG("hdev %s event %d", hdev->name, event); if (atomic_read(&monitor_promisc)) { struct sk_buff *skb; /* Send event to monitor */ skb = create_monitor_event(hdev, event); if (skb) { hci_send_to_channel(HCI_CHANNEL_MONITOR, skb, HCI_SOCK_TRUSTED, NULL); kfree_skb(skb); } } if (event <= HCI_DEV_DOWN) { struct hci_ev_si_device ev; /* Send event to sockets */ ev.event = event; ev.dev_id = hdev->id; hci_si_event(NULL, HCI_EV_SI_DEVICE, sizeof(ev), &ev); } if (event == HCI_DEV_UNREG) { struct sock *sk; /* Wake up sockets using this dead device */ read_lock(&hci_sk_list.lock); sk_for_each(sk, &hci_sk_list.head) { if (hci_pi(sk)->hdev == hdev) { sk->sk_err = EPIPE; sk->sk_state_change(sk); } } read_unlock(&hci_sk_list.lock); } } static struct hci_mgmt_chan *__hci_mgmt_chan_find(unsigned short channel) { struct hci_mgmt_chan *c; list_for_each_entry(c, &mgmt_chan_list, list) { if (c->channel == channel) return c; } return NULL; } static struct hci_mgmt_chan *hci_mgmt_chan_find(unsigned short channel) { struct hci_mgmt_chan *c; mutex_lock(&mgmt_chan_list_lock); c = __hci_mgmt_chan_find(channel); mutex_unlock(&mgmt_chan_list_lock); return c; } int hci_mgmt_chan_register(struct hci_mgmt_chan *c) { if (c->channel < HCI_CHANNEL_CONTROL) return -EINVAL; mutex_lock(&mgmt_chan_list_lock); if (__hci_mgmt_chan_find(c->channel)) { mutex_unlock(&mgmt_chan_list_lock); return -EALREADY; } list_add_tail(&c->list, &mgmt_chan_list); mutex_unlock(&mgmt_chan_list_lock); return 0; } EXPORT_SYMBOL(hci_mgmt_chan_register); void hci_mgmt_chan_unregister(struct hci_mgmt_chan *c) { mutex_lock(&mgmt_chan_list_lock); list_del(&c->list); mutex_unlock(&mgmt_chan_list_lock); } EXPORT_SYMBOL(hci_mgmt_chan_unregister); static int hci_sock_release(struct socket *sock) { struct sock *sk = sock->sk; struct hci_dev *hdev; struct sk_buff *skb; BT_DBG("sock %p sk %p", sock, sk); if (!sk) return 0; lock_sock(sk); switch (hci_pi(sk)->channel) { case HCI_CHANNEL_MONITOR: atomic_dec(&monitor_promisc); break; case HCI_CHANNEL_RAW: case HCI_CHANNEL_USER: case HCI_CHANNEL_CONTROL: /* Send event to monitor */ skb = create_monitor_ctrl_close(sk); if (skb) { hci_send_to_channel(HCI_CHANNEL_MONITOR, skb, HCI_SOCK_TRUSTED, NULL); kfree_skb(skb); } hci_sock_free_cookie(sk); break; } bt_sock_unlink(&hci_sk_list, sk); hdev = hci_pi(sk)->hdev; if (hdev) { if (hci_pi(sk)->channel == HCI_CHANNEL_USER && !hci_dev_test_flag(hdev, HCI_UNREGISTER)) { /* When releasing a user channel exclusive access, * call hci_dev_do_close directly instead of calling * hci_dev_close to ensure the exclusive access will * be released and the controller brought back down. * * The checking of HCI_AUTO_OFF is not needed in this * case since it will have been cleared already when * opening the user channel. * * Make sure to also check that we haven't already * unregistered since all the cleanup will have already * been complete and hdev will get released when we put * below. */ hci_dev_do_close(hdev); hci_dev_clear_flag(hdev, HCI_USER_CHANNEL); mgmt_index_added(hdev); } atomic_dec(&hdev->promisc); hci_dev_put(hdev); } sock_orphan(sk); release_sock(sk); sock_put(sk); return 0; } static int hci_sock_reject_list_add(struct hci_dev *hdev, void __user *arg) { bdaddr_t bdaddr; int err; if (copy_from_user(&bdaddr, arg, sizeof(bdaddr))) return -EFAULT; hci_dev_lock(hdev); err = hci_bdaddr_list_add(&hdev->reject_list, &bdaddr, BDADDR_BREDR); hci_dev_unlock(hdev); return err; } static int hci_sock_reject_list_del(struct hci_dev *hdev, void __user *arg) { bdaddr_t bdaddr; int err; if (copy_from_user(&bdaddr, arg, sizeof(bdaddr))) return -EFAULT; hci_dev_lock(hdev); err = hci_bdaddr_list_del(&hdev->reject_list, &bdaddr, BDADDR_BREDR); hci_dev_unlock(hdev); return err; } /* Ioctls that require bound socket */ static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd, unsigned long arg) { struct hci_dev *hdev = hci_hdev_from_sock(sk); if (IS_ERR(hdev)) return PTR_ERR(hdev); if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) return -EBUSY; if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) return -EOPNOTSUPP; if (hdev->dev_type != HCI_PRIMARY) return -EOPNOTSUPP; switch (cmd) { case HCISETRAW: if (!capable(CAP_NET_ADMIN)) return -EPERM; return -EOPNOTSUPP; case HCIGETCONNINFO: return hci_get_conn_info(hdev, (void __user *)arg); case HCIGETAUTHINFO: return hci_get_auth_info(hdev, (void __user *)arg); case HCIBLOCKADDR: if (!capable(CAP_NET_ADMIN)) return -EPERM; return hci_sock_reject_list_add(hdev, (void __user *)arg); case HCIUNBLOCKADDR: if (!capable(CAP_NET_ADMIN)) return -EPERM; return hci_sock_reject_list_del(hdev, (void __user *)arg); } return -ENOIOCTLCMD; } static int hci_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) { void __user *argp = (void __user *)arg; struct sock *sk = sock->sk; int err; BT_DBG("cmd %x arg %lx", cmd, arg); /* Make sure the cmd is valid before doing anything */ switch (cmd) { case HCIGETDEVLIST: case HCIGETDEVINFO: case HCIGETCONNLIST: case HCIDEVUP: case HCIDEVDOWN: case HCIDEVRESET: case HCIDEVRESTAT: case HCISETSCAN: case HCISETAUTH: case HCISETENCRYPT: case HCISETPTYPE: case HCISETLINKPOL: case HCISETLINKMODE: case HCISETACLMTU: case HCISETSCOMTU: case HCIINQUIRY: case HCISETRAW: case HCIGETCONNINFO: case HCIGETAUTHINFO: case HCIBLOCKADDR: case HCIUNBLOCKADDR: break; default: return -ENOIOCTLCMD; } lock_sock(sk); if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) { err = -EBADFD; goto done; } /* When calling an ioctl on an unbound raw socket, then ensure * that the monitor gets informed. Ensure that the resulting event * is only send once by checking if the cookie exists or not. The * socket cookie will be only ever generated once for the lifetime * of a given socket. */ if (hci_sock_gen_cookie(sk)) { struct sk_buff *skb; /* Perform careful checks before setting the HCI_SOCK_TRUSTED * flag. Make sure that not only the current task but also * the socket opener has the required capability, since * privileged programs can be tricked into making ioctl calls * on HCI sockets, and the socket should not be marked as * trusted simply because the ioctl caller is privileged. */ if (sk_capable(sk, CAP_NET_ADMIN)) hci_sock_set_flag(sk, HCI_SOCK_TRUSTED); /* Send event to monitor */ skb = create_monitor_ctrl_open(sk); if (skb) { hci_send_to_channel(HCI_CHANNEL_MONITOR, skb, HCI_SOCK_TRUSTED, NULL); kfree_skb(skb); } } release_sock(sk); switch (cmd) { case HCIGETDEVLIST: return hci_get_dev_list(argp); case HCIGETDEVINFO: return hci_get_dev_info(argp); case HCIGETCONNLIST: return hci_get_conn_list(argp); case HCIDEVUP: if (!capable(CAP_NET_ADMIN)) return -EPERM; return hci_dev_open(arg); case HCIDEVDOWN: if (!capable(CAP_NET_ADMIN)) return -EPERM; return hci_dev_close(arg); case HCIDEVRESET: if (!capable(CAP_NET_ADMIN)) return -EPERM; return hci_dev_reset(arg); case HCIDEVRESTAT: if (!capable(CAP_NET_ADMIN)) return -EPERM; return hci_dev_reset_stat(arg); case HCISETSCAN: case HCISETAUTH: case HCISETENCRYPT: case HCISETPTYPE: case HCISETLINKPOL: case HCISETLINKMODE: case HCISETACLMTU: case HCISETSCOMTU: if (!capable(CAP_NET_ADMIN)) return -EPERM; return hci_dev_cmd(cmd, argp); case HCIINQUIRY: return hci_inquiry(argp); } lock_sock(sk); err = hci_sock_bound_ioctl(sk, cmd, arg); done: release_sock(sk); return err; } #ifdef CONFIG_COMPAT static int hci_sock_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) { switch (cmd) { case HCIDEVUP: case HCIDEVDOWN: case HCIDEVRESET: case HCIDEVRESTAT: return hci_sock_ioctl(sock, cmd, arg); } return hci_sock_ioctl(sock, cmd, (unsigned long)compat_ptr(arg)); } #endif static int hci_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_len) { struct sockaddr_hci haddr; struct sock *sk = sock->sk; struct hci_dev *hdev = NULL; struct sk_buff *skb; int len, err = 0; BT_DBG("sock %p sk %p", sock, sk); if (!addr) return -EINVAL; memset(&haddr, 0, sizeof(haddr)); len = min_t(unsigned int, sizeof(haddr), addr_len); memcpy(&haddr, addr, len); if (haddr.hci_family != AF_BLUETOOTH) return -EINVAL; lock_sock(sk); /* Allow detaching from dead device and attaching to alive device, if * the caller wants to re-bind (instead of close) this socket in * response to hci_sock_dev_event(HCI_DEV_UNREG) notification. */ hdev = hci_pi(sk)->hdev; if (hdev && hci_dev_test_flag(hdev, HCI_UNREGISTER)) { hci_pi(sk)->hdev = NULL; sk->sk_state = BT_OPEN; hci_dev_put(hdev); } hdev = NULL; if (sk->sk_state == BT_BOUND) { err = -EALREADY; goto done; } switch (haddr.hci_channel) { case HCI_CHANNEL_RAW: if (hci_pi(sk)->hdev) { err = -EALREADY; goto done; } if (haddr.hci_dev != HCI_DEV_NONE) { hdev = hci_dev_get(haddr.hci_dev); if (!hdev) { err = -ENODEV; goto done; } atomic_inc(&hdev->promisc); } hci_pi(sk)->channel = haddr.hci_channel; if (!hci_sock_gen_cookie(sk)) { /* In the case when a cookie has already been assigned, * then there has been already an ioctl issued against * an unbound socket and with that triggered an open * notification. Send a close notification first to * allow the state transition to bounded. */ skb = create_monitor_ctrl_close(sk); if (skb) { hci_send_to_channel(HCI_CHANNEL_MONITOR, skb, HCI_SOCK_TRUSTED, NULL); kfree_skb(skb); } } if (capable(CAP_NET_ADMIN)) hci_sock_set_flag(sk, HCI_SOCK_TRUSTED); hci_pi(sk)->hdev = hdev; /* Send event to monitor */ skb = create_monitor_ctrl_open(sk); if (skb) { hci_send_to_channel(HCI_CHANNEL_MONITOR, skb, HCI_SOCK_TRUSTED, NULL); kfree_skb(skb); } break; case HCI_CHANNEL_USER: if (hci_pi(sk)->hdev) { err = -EALREADY; goto done; } if (haddr.hci_dev == HCI_DEV_NONE) { err = -EINVAL; goto done; } if (!capable(CAP_NET_ADMIN)) { err = -EPERM; goto done; } hdev = hci_dev_get(haddr.hci_dev); if (!hdev) { err = -ENODEV; goto done; } if (test_bit(HCI_INIT, &hdev->flags) || hci_dev_test_flag(hdev, HCI_SETUP) || hci_dev_test_flag(hdev, HCI_CONFIG) || (!hci_dev_test_flag(hdev, HCI_AUTO_OFF) && test_bit(HCI_UP, &hdev->flags))) { err = -EBUSY; hci_dev_put(hdev); goto done; } if (hci_dev_test_and_set_flag(hdev, HCI_USER_CHANNEL)) { err = -EUSERS; hci_dev_put(hdev); goto done; } mgmt_index_removed(hdev); err = hci_dev_open(hdev->id); if (err) { if (err == -EALREADY) { /* In case the transport is already up and * running, clear the error here. * * This can happen when opening a user * channel and HCI_AUTO_OFF grace period * is still active. */ err = 0; } else { hci_dev_clear_flag(hdev, HCI_USER_CHANNEL); mgmt_index_added(hdev); hci_dev_put(hdev); goto done; } } hci_pi(sk)->channel = haddr.hci_channel; if (!hci_sock_gen_cookie(sk)) { /* In the case when a cookie has already been assigned, * this socket will transition from a raw socket into * a user channel socket. For a clean transition, send * the close notification first. */ skb = create_monitor_ctrl_close(sk); if (skb) { hci_send_to_channel(HCI_CHANNEL_MONITOR, skb, HCI_SOCK_TRUSTED, NULL); kfree_skb(skb); } } /* The user channel is restricted to CAP_NET_ADMIN * capabilities and with that implicitly trusted. */ hci_sock_set_flag(sk, HCI_SOCK_TRUSTED); hci_pi(sk)->hdev = hdev; /* Send event to monitor */ skb = create_monitor_ctrl_open(sk); if (skb) { hci_send_to_channel(HCI_CHANNEL_MONITOR, skb, HCI_SOCK_TRUSTED, NULL); kfree_skb(skb); } atomic_inc(&hdev->promisc); break; case HCI_CHANNEL_MONITOR: if (haddr.hci_dev != HCI_DEV_NONE) { err = -EINVAL; goto done; } if (!capable(CAP_NET_RAW)) { err = -EPERM; goto done; } hci_pi(sk)->channel = haddr.hci_channel; /* The monitor interface is restricted to CAP_NET_RAW * capabilities and with that implicitly trusted. */ hci_sock_set_flag(sk, HCI_SOCK_TRUSTED); send_monitor_note(sk, "Linux version %s (%s)", init_utsname()->release, init_utsname()->machine); send_monitor_note(sk, "Bluetooth subsystem version %u.%u", BT_SUBSYS_VERSION, BT_SUBSYS_REVISION); send_monitor_replay(sk); send_monitor_control_replay(sk); atomic_inc(&monitor_promisc); break; case HCI_CHANNEL_LOGGING: if (haddr.hci_dev != HCI_DEV_NONE) { err = -EINVAL; goto done; } if (!capable(CAP_NET_ADMIN)) { err = -EPERM; goto done; } hci_pi(sk)->channel = haddr.hci_channel; break; default: if (!hci_mgmt_chan_find(haddr.hci_channel)) { err = -EINVAL; goto done; } if (haddr.hci_dev != HCI_DEV_NONE) { err = -EINVAL; goto done; } /* Users with CAP_NET_ADMIN capabilities are allowed * access to all management commands and events. For * untrusted users the interface is restricted and * also only untrusted events are sent. */ if (capable(CAP_NET_ADMIN)) hci_sock_set_flag(sk, HCI_SOCK_TRUSTED); hci_pi(sk)->channel = haddr.hci_channel; /* At the moment the index and unconfigured index events * are enabled unconditionally. Setting them on each * socket when binding keeps this functionality. They * however might be cleared later and then sending of these * events will be disabled, but that is then intentional. * * This also enables generic events that are safe to be * received by untrusted users. Example for such events * are changes to settings, class of device, name etc. */ if (hci_pi(sk)->channel == HCI_CHANNEL_CONTROL) { if (!hci_sock_gen_cookie(sk)) { /* In the case when a cookie has already been * assigned, this socket will transition from * a raw socket into a control socket. To * allow for a clean transition, send the * close notification first. */ skb = create_monitor_ctrl_close(sk); if (skb) { hci_send_to_channel(HCI_CHANNEL_MONITOR, skb, HCI_SOCK_TRUSTED, NULL); kfree_skb(skb); } } /* Send event to monitor */ skb = create_monitor_ctrl_open(sk); if (skb) { hci_send_to_channel(HCI_CHANNEL_MONITOR, skb, HCI_SOCK_TRUSTED, NULL); kfree_skb(skb); } hci_sock_set_flag(sk, HCI_MGMT_INDEX_EVENTS); hci_sock_set_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS); hci_sock_set_flag(sk, HCI_MGMT_OPTION_EVENTS); hci_sock_set_flag(sk, HCI_MGMT_SETTING_EVENTS); hci_sock_set_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS); hci_sock_set_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS); } break; } /* Default MTU to HCI_MAX_FRAME_SIZE if not set */ if (!hci_pi(sk)->mtu) hci_pi(sk)->mtu = HCI_MAX_FRAME_SIZE; sk->sk_state = BT_BOUND; done: release_sock(sk); return err; } static int hci_sock_getname(struct socket *sock, struct sockaddr *addr, int peer) { struct sockaddr_hci *haddr = (struct sockaddr_hci *)addr; struct sock *sk = sock->sk; struct hci_dev *hdev; int err = 0; BT_DBG("sock %p sk %p", sock, sk); if (peer) return -EOPNOTSUPP; lock_sock(sk); hdev = hci_hdev_from_sock(sk); if (IS_ERR(hdev)) { err = PTR_ERR(hdev); goto done; } haddr->hci_family = AF_BLUETOOTH; haddr->hci_dev = hdev->id; haddr->hci_channel= hci_pi(sk)->channel; err = sizeof(*haddr); done: release_sock(sk); return err; } static void hci_sock_cmsg(struct sock *sk, struct msghdr *msg, struct sk_buff *skb) { __u8 mask = hci_pi(sk)->cmsg_mask; if (mask & HCI_CMSG_DIR) { int incoming = bt_cb(skb)->incoming; put_cmsg(msg, SOL_HCI, HCI_CMSG_DIR, sizeof(incoming), &incoming); } if (mask & HCI_CMSG_TSTAMP) { #ifdef CONFIG_COMPAT struct old_timeval32 ctv; #endif struct __kernel_old_timeval tv; void *data; int len; skb_get_timestamp(skb, &tv); data = &tv; len = sizeof(tv); #ifdef CONFIG_COMPAT if (!COMPAT_USE_64BIT_TIME && (msg->msg_flags & MSG_CMSG_COMPAT)) { ctv.tv_sec = tv.tv_sec; ctv.tv_usec = tv.tv_usec; data = &ctv; len = sizeof(ctv); } #endif put_cmsg(msg, SOL_HCI, HCI_CMSG_TSTAMP, len, data); } } static int hci_sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, int flags) { struct scm_cookie scm; struct sock *sk = sock->sk; struct sk_buff *skb; int copied, err; unsigned int skblen; BT_DBG("sock %p, sk %p", sock, sk); if (flags & MSG_OOB) return -EOPNOTSUPP; if (hci_pi(sk)->channel == HCI_CHANNEL_LOGGING) return -EOPNOTSUPP; if (sk->sk_state == BT_CLOSED) return 0; skb = skb_recv_datagram(sk, flags, &err); if (!skb) return err; skblen = skb->len; copied = skb->len; if (len < copied) { msg->msg_flags |= MSG_TRUNC; copied = len; } skb_reset_transport_header(skb); err = skb_copy_datagram_msg(skb, 0, msg, copied); switch (hci_pi(sk)->channel) { case HCI_CHANNEL_RAW: hci_sock_cmsg(sk, msg, skb); break; case HCI_CHANNEL_USER: case HCI_CHANNEL_MONITOR: sock_recv_timestamp(msg, sk, skb); break; default: if (hci_mgmt_chan_find(hci_pi(sk)->channel)) sock_recv_timestamp(msg, sk, skb); break; } memset(&scm, 0, sizeof(scm)); scm.creds = bt_cb(skb)->creds; skb_free_datagram(sk, skb); if (flags & MSG_TRUNC) copied = skblen; scm_recv(sock, msg, &scm, flags); return err ? : copied; } static int hci_mgmt_cmd(struct hci_mgmt_chan *chan, struct sock *sk, struct sk_buff *skb) { u8 *cp; struct mgmt_hdr *hdr; u16 opcode, index, len; struct hci_dev *hdev = NULL; const struct hci_mgmt_handler *handler; bool var_len, no_hdev; int err; BT_DBG("got %d bytes", skb->len); if (skb->len < sizeof(*hdr)) return -EINVAL; hdr = (void *)skb->data; opcode = __le16_to_cpu(hdr->opcode); index = __le16_to_cpu(hdr->index); len = __le16_to_cpu(hdr->len); if (len != skb->len - sizeof(*hdr)) { err = -EINVAL; goto done; } if (chan->channel == HCI_CHANNEL_CONTROL) { struct sk_buff *cmd; /* Send event to monitor */ cmd = create_monitor_ctrl_command(sk, index, opcode, len, skb->data + sizeof(*hdr)); if (cmd) { hci_send_to_channel(HCI_CHANNEL_MONITOR, cmd, HCI_SOCK_TRUSTED, NULL); kfree_skb(cmd); } } if (opcode >= chan->handler_count || chan->handlers[opcode].func == NULL) { BT_DBG("Unknown op %u", opcode); err = mgmt_cmd_status(sk, index, opcode, MGMT_STATUS_UNKNOWN_COMMAND); goto done; } handler = &chan->handlers[opcode]; if (!hci_sock_test_flag(sk, HCI_SOCK_TRUSTED) && !(handler->flags & HCI_MGMT_UNTRUSTED)) { err = mgmt_cmd_status(sk, index, opcode, MGMT_STATUS_PERMISSION_DENIED); goto done; } if (index != MGMT_INDEX_NONE) { hdev = hci_dev_get(index); if (!hdev) { err = mgmt_cmd_status(sk, index, opcode, MGMT_STATUS_INVALID_INDEX); goto done; } if (hci_dev_test_flag(hdev, HCI_SETUP) || hci_dev_test_flag(hdev, HCI_CONFIG) || hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) { err = mgmt_cmd_status(sk, index, opcode, MGMT_STATUS_INVALID_INDEX); goto done; } if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) && !(handler->flags & HCI_MGMT_UNCONFIGURED)) { err = mgmt_cmd_status(sk, index, opcode, MGMT_STATUS_INVALID_INDEX); goto done; } } if (!(handler->flags & HCI_MGMT_HDEV_OPTIONAL)) { no_hdev = (handler->flags & HCI_MGMT_NO_HDEV); if (no_hdev != !hdev) { err = mgmt_cmd_status(sk, index, opcode, MGMT_STATUS_INVALID_INDEX); goto done; } } var_len = (handler->flags & HCI_MGMT_VAR_LEN); if ((var_len && len < handler->data_len) || (!var_len && len != handler->data_len)) { err = mgmt_cmd_status(sk, index, opcode, MGMT_STATUS_INVALID_PARAMS); goto done; } if (hdev && chan->hdev_init) chan->hdev_init(sk, hdev); cp = skb->data + sizeof(*hdr); err = handler->func(sk, hdev, cp, len); if (err < 0) goto done; err = skb->len; done: if (hdev) hci_dev_put(hdev); return err; } static int hci_logging_frame(struct sock *sk, struct sk_buff *skb, unsigned int flags) { struct hci_mon_hdr *hdr; struct hci_dev *hdev; u16 index; int err; /* The logging frame consists at minimum of the standard header, * the priority byte, the ident length byte and at least one string * terminator NUL byte. Anything shorter are invalid packets. */ if (skb->len < sizeof(*hdr) + 3) return -EINVAL; hdr = (void *)skb->data; if (__le16_to_cpu(hdr->len) != skb->len - sizeof(*hdr)) return -EINVAL; if (__le16_to_cpu(hdr->opcode) == 0x0000) { __u8 priority = skb->data[sizeof(*hdr)]; __u8 ident_len = skb->data[sizeof(*hdr) + 1]; /* Only the priorities 0-7 are valid and with that any other * value results in an invalid packet. * * The priority byte is followed by an ident length byte and * the NUL terminated ident string. Check that the ident * length is not overflowing the packet and also that the * ident string itself is NUL terminated. In case the ident * length is zero, the length value actually doubles as NUL * terminator identifier. * * The message follows the ident string (if present) and * must be NUL terminated. Otherwise it is not a valid packet. */ if (priority > 7 || skb->data[skb->len - 1] != 0x00 || ident_len > skb->len - sizeof(*hdr) - 3 || skb->data[sizeof(*hdr) + ident_len + 1] != 0x00) return -EINVAL; } else { return -EINVAL; } index = __le16_to_cpu(hdr->index); if (index != MGMT_INDEX_NONE) { hdev = hci_dev_get(index); if (!hdev) return -ENODEV; } else { hdev = NULL; } hdr->opcode = cpu_to_le16(HCI_MON_USER_LOGGING); hci_send_to_channel(HCI_CHANNEL_MONITOR, skb, HCI_SOCK_TRUSTED, NULL); err = skb->len; if (hdev) hci_dev_put(hdev); return err; } static int hci_sock_sendmsg(struct socket *sock, struct msghdr *msg, size_t len) { struct sock *sk = sock->sk; struct hci_mgmt_chan *chan; struct hci_dev *hdev; struct sk_buff *skb; int err; const unsigned int flags = msg->msg_flags; BT_DBG("sock %p sk %p", sock, sk); if (flags & MSG_OOB) return -EOPNOTSUPP; if (flags & ~(MSG_DONTWAIT | MSG_NOSIGNAL | MSG_ERRQUEUE | MSG_CMSG_COMPAT)) return -EINVAL; if (len < 4 || len > hci_pi(sk)->mtu) return -EINVAL; skb = bt_skb_sendmsg(sk, msg, len, len, 0, 0); if (IS_ERR(skb)) return PTR_ERR(skb); lock_sock(sk); switch (hci_pi(sk)->channel) { case HCI_CHANNEL_RAW: case HCI_CHANNEL_USER: break; case HCI_CHANNEL_MONITOR: err = -EOPNOTSUPP; goto drop; case HCI_CHANNEL_LOGGING: err = hci_logging_frame(sk, skb, flags); goto drop; default: mutex_lock(&mgmt_chan_list_lock); chan = __hci_mgmt_chan_find(hci_pi(sk)->channel); if (chan) err = hci_mgmt_cmd(chan, sk, skb); else err = -EINVAL; mutex_unlock(&mgmt_chan_list_lock); goto drop; } hdev = hci_hdev_from_sock(sk); if (IS_ERR(hdev)) { err = PTR_ERR(hdev); goto drop; } if (!test_bit(HCI_UP, &hdev->flags)) { err = -ENETDOWN; goto drop; } hci_skb_pkt_type(skb) = skb->data[0]; skb_pull(skb, 1); if (hci_pi(sk)->channel == HCI_CHANNEL_USER) { /* No permission check is needed for user channel * since that gets enforced when binding the socket. * * However check that the packet type is valid. */ if (hci_skb_pkt_type(skb) != HCI_COMMAND_PKT && hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT && hci_skb_pkt_type(skb) != HCI_SCODATA_PKT && hci_skb_pkt_type(skb) != HCI_ISODATA_PKT) { err = -EINVAL; goto drop; } skb_queue_tail(&hdev->raw_q, skb); queue_work(hdev->workqueue, &hdev->tx_work); } else if (hci_skb_pkt_type(skb) == HCI_COMMAND_PKT) { u16 opcode = get_unaligned_le16(skb->data); u16 ogf = hci_opcode_ogf(opcode); u16 ocf = hci_opcode_ocf(opcode); if (((ogf > HCI_SFLT_MAX_OGF) || !hci_test_bit(ocf & HCI_FLT_OCF_BITS, &hci_sec_filter.ocf_mask[ogf])) && !capable(CAP_NET_RAW)) { err = -EPERM; goto drop; } /* Since the opcode has already been extracted here, store * a copy of the value for later use by the drivers. */ hci_skb_opcode(skb) = opcode; if (ogf == 0x3f) { skb_queue_tail(&hdev->raw_q, skb); queue_work(hdev->workqueue, &hdev->tx_work); } else { /* Stand-alone HCI commands must be flagged as * single-command requests. */ bt_cb(skb)->hci.req_flags |= HCI_REQ_START; skb_queue_tail(&hdev->cmd_q, skb); queue_work(hdev->workqueue, &hdev->cmd_work); } } else { if (!capable(CAP_NET_RAW)) { err = -EPERM; goto drop; } if (hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT && hci_skb_pkt_type(skb) != HCI_SCODATA_PKT && hci_skb_pkt_type(skb) != HCI_ISODATA_PKT) { err = -EINVAL; goto drop; } skb_queue_tail(&hdev->raw_q, skb); queue_work(hdev->workqueue, &hdev->tx_work); } err = len; done: release_sock(sk); return err; drop: kfree_skb(skb); goto done; } static int hci_sock_setsockopt_old(struct socket *sock, int level, int optname, sockptr_t optval, unsigned int len) { struct hci_ufilter uf = { .opcode = 0 }; struct sock *sk = sock->sk; int err = 0, opt = 0; BT_DBG("sk %p, opt %d", sk, optname); lock_sock(sk); if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) { err = -EBADFD; goto done; } switch (optname) { case HCI_DATA_DIR: if (copy_from_sockptr(&opt, optval, sizeof(opt))) { err = -EFAULT; break; } if (opt) hci_pi(sk)->cmsg_mask |= HCI_CMSG_DIR; else hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_DIR; break; case HCI_TIME_STAMP: if (copy_from_sockptr(&opt, optval, sizeof(opt))) { err = -EFAULT; break; } if (opt) hci_pi(sk)->cmsg_mask |= HCI_CMSG_TSTAMP; else hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_TSTAMP; break; case HCI_FILTER: { struct hci_filter *f = &hci_pi(sk)->filter; uf.type_mask = f->type_mask; uf.opcode = f->opcode; uf.event_mask[0] = *((u32 *) f->event_mask + 0); uf.event_mask[1] = *((u32 *) f->event_mask + 1); } len = min_t(unsigned int, len, sizeof(uf)); if (copy_from_sockptr(&uf, optval, len)) { err = -EFAULT; break; } if (!capable(CAP_NET_RAW)) { uf.type_mask &= hci_sec_filter.type_mask; uf.event_mask[0] &= *((u32 *) hci_sec_filter.event_mask + 0); uf.event_mask[1] &= *((u32 *) hci_sec_filter.event_mask + 1); } { struct hci_filter *f = &hci_pi(sk)->filter; f->type_mask = uf.type_mask; f->opcode = uf.opcode; *((u32 *) f->event_mask + 0) = uf.event_mask[0]; *((u32 *) f->event_mask + 1) = uf.event_mask[1]; } break; default: err = -ENOPROTOOPT; break; } done: release_sock(sk); return err; } static int hci_sock_setsockopt(struct socket *sock, int level, int optname, sockptr_t optval, unsigned int len) { struct sock *sk = sock->sk; int err = 0; u16 opt; BT_DBG("sk %p, opt %d", sk, optname); if (level == SOL_HCI) return hci_sock_setsockopt_old(sock, level, optname, optval, len); if (level != SOL_BLUETOOTH) return -ENOPROTOOPT; lock_sock(sk); switch (optname) { case BT_SNDMTU: case BT_RCVMTU: switch (hci_pi(sk)->channel) { /* Don't allow changing MTU for channels that are meant for HCI * traffic only. */ case HCI_CHANNEL_RAW: case HCI_CHANNEL_USER: err = -ENOPROTOOPT; goto done; } if (copy_from_sockptr(&opt, optval, sizeof(opt))) { err = -EFAULT; break; } hci_pi(sk)->mtu = opt; break; default: err = -ENOPROTOOPT; break; } done: release_sock(sk); return err; } static int hci_sock_getsockopt_old(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen) { struct hci_ufilter uf; struct sock *sk = sock->sk; int len, opt, err = 0; BT_DBG("sk %p, opt %d", sk, optname); if (get_user(len, optlen)) return -EFAULT; lock_sock(sk); if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) { err = -EBADFD; goto done; } switch (optname) { case HCI_DATA_DIR: if (hci_pi(sk)->cmsg_mask & HCI_CMSG_DIR) opt = 1; else opt = 0; if (put_user(opt, optval)) err = -EFAULT; break; case HCI_TIME_STAMP: if (hci_pi(sk)->cmsg_mask & HCI_CMSG_TSTAMP) opt = 1; else opt = 0; if (put_user(opt, optval)) err = -EFAULT; break; case HCI_FILTER: { struct hci_filter *f = &hci_pi(sk)->filter; memset(&uf, 0, sizeof(uf)); uf.type_mask = f->type_mask; uf.opcode = f->opcode; uf.event_mask[0] = *((u32 *) f->event_mask + 0); uf.event_mask[1] = *((u32 *) f->event_mask + 1); } len = min_t(unsigned int, len, sizeof(uf)); if (copy_to_user(optval, &uf, len)) err = -EFAULT; break; default: err = -ENOPROTOOPT; break; } done: release_sock(sk); return err; } static int hci_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen) { struct sock *sk = sock->sk; int err = 0; BT_DBG("sk %p, opt %d", sk, optname); if (level == SOL_HCI) return hci_sock_getsockopt_old(sock, level, optname, optval, optlen); if (level != SOL_BLUETOOTH) return -ENOPROTOOPT; lock_sock(sk); switch (optname) { case BT_SNDMTU: case BT_RCVMTU: if (put_user(hci_pi(sk)->mtu, (u16 __user *)optval)) err = -EFAULT; break; default: err = -ENOPROTOOPT; break; } release_sock(sk); return err; } static void hci_sock_destruct(struct sock *sk) { mgmt_cleanup(sk); skb_queue_purge(&sk->sk_receive_queue); skb_queue_purge(&sk->sk_write_queue); } static const struct proto_ops hci_sock_ops = { .family = PF_BLUETOOTH, .owner = THIS_MODULE, .release = hci_sock_release, .bind = hci_sock_bind, .getname = hci_sock_getname, .sendmsg = hci_sock_sendmsg, .recvmsg = hci_sock_recvmsg, .ioctl = hci_sock_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = hci_sock_compat_ioctl, #endif .poll = datagram_poll, .listen = sock_no_listen, .shutdown = sock_no_shutdown, .setsockopt = hci_sock_setsockopt, .getsockopt = hci_sock_getsockopt, .connect = sock_no_connect, .socketpair = sock_no_socketpair, .accept = sock_no_accept, .mmap = sock_no_mmap }; static struct proto hci_sk_proto = { .name = "HCI", .owner = THIS_MODULE, .obj_size = sizeof(struct hci_pinfo) }; static int hci_sock_create(struct net *net, struct socket *sock, int protocol, int kern) { struct sock *sk; BT_DBG("sock %p", sock); if (sock->type != SOCK_RAW) return -ESOCKTNOSUPPORT; sock->ops = &hci_sock_ops; sk = bt_sock_alloc(net, sock, &hci_sk_proto, protocol, GFP_ATOMIC, kern); if (!sk) return -ENOMEM; sock->state = SS_UNCONNECTED; sk->sk_destruct = hci_sock_destruct; bt_sock_link(&hci_sk_list, sk); return 0; } static const struct net_proto_family hci_sock_family_ops = { .family = PF_BLUETOOTH, .owner = THIS_MODULE, .create = hci_sock_create, }; int __init hci_sock_init(void) { int err; BUILD_BUG_ON(sizeof(struct sockaddr_hci) > sizeof(struct sockaddr)); err = proto_register(&hci_sk_proto, 0); if (err < 0) return err; err = bt_sock_register(BTPROTO_HCI, &hci_sock_family_ops); if (err < 0) { BT_ERR("HCI socket registration failed"); goto error; } err = bt_procfs_init(&init_net, "hci", &hci_sk_list, NULL); if (err < 0) { BT_ERR("Failed to create HCI proc file"); bt_sock_unregister(BTPROTO_HCI); goto error; } BT_INFO("HCI socket layer initialized"); return 0; error: proto_unregister(&hci_sk_proto); return err; } void hci_sock_cleanup(void) { bt_procfs_cleanup(&init_net, "hci"); bt_sock_unregister(BTPROTO_HCI); proto_unregister(&hci_sk_proto); }
linux-master
net/bluetooth/hci_sock.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2020 Google Corporation */ #include <net/bluetooth/bluetooth.h> #include <net/bluetooth/hci_core.h> #include <net/bluetooth/mgmt.h> #include "hci_request.h" #include "mgmt_util.h" #include "msft.h" #define MSFT_RSSI_THRESHOLD_VALUE_MIN -127 #define MSFT_RSSI_THRESHOLD_VALUE_MAX 20 #define MSFT_RSSI_LOW_TIMEOUT_MAX 0x3C #define MSFT_OP_READ_SUPPORTED_FEATURES 0x00 struct msft_cp_read_supported_features { __u8 sub_opcode; } __packed; struct msft_rp_read_supported_features { __u8 status; __u8 sub_opcode; __le64 features; __u8 evt_prefix_len; __u8 evt_prefix[]; } __packed; #define MSFT_OP_LE_MONITOR_ADVERTISEMENT 0x03 #define MSFT_MONITOR_ADVERTISEMENT_TYPE_PATTERN 0x01 struct msft_le_monitor_advertisement_pattern { __u8 length; __u8 data_type; __u8 start_byte; __u8 pattern[]; }; struct msft_le_monitor_advertisement_pattern_data { __u8 count; __u8 data[]; }; struct msft_cp_le_monitor_advertisement { __u8 sub_opcode; __s8 rssi_high; __s8 rssi_low; __u8 rssi_low_interval; __u8 rssi_sampling_period; __u8 cond_type; __u8 data[]; } __packed; struct msft_rp_le_monitor_advertisement { __u8 status; __u8 sub_opcode; __u8 handle; } __packed; #define MSFT_OP_LE_CANCEL_MONITOR_ADVERTISEMENT 0x04 struct msft_cp_le_cancel_monitor_advertisement { __u8 sub_opcode; __u8 handle; } __packed; struct msft_rp_le_cancel_monitor_advertisement { __u8 status; __u8 sub_opcode; } __packed; #define MSFT_OP_LE_SET_ADVERTISEMENT_FILTER_ENABLE 0x05 struct msft_cp_le_set_advertisement_filter_enable { __u8 sub_opcode; __u8 enable; } __packed; struct msft_rp_le_set_advertisement_filter_enable { __u8 status; __u8 sub_opcode; } __packed; #define MSFT_EV_LE_MONITOR_DEVICE 0x02 struct msft_ev_le_monitor_device { __u8 addr_type; bdaddr_t bdaddr; __u8 monitor_handle; __u8 monitor_state; } __packed; struct msft_monitor_advertisement_handle_data { __u8 msft_handle; __u16 mgmt_handle; __s8 rssi_high; __s8 rssi_low; __u8 rssi_low_interval; __u8 rssi_sampling_period; __u8 cond_type; struct list_head list; }; enum monitor_addr_filter_state { AF_STATE_IDLE, AF_STATE_ADDING, AF_STATE_ADDED, AF_STATE_REMOVING, }; #define MSFT_MONITOR_ADVERTISEMENT_TYPE_ADDR 0x04 struct msft_monitor_addr_filter_data { __u8 msft_handle; __u8 pattern_handle; /* address filters pertain to */ __u16 mgmt_handle; int state; __s8 rssi_high; __s8 rssi_low; __u8 rssi_low_interval; __u8 rssi_sampling_period; __u8 addr_type; bdaddr_t bdaddr; struct list_head list; }; struct msft_data { __u64 features; __u8 evt_prefix_len; __u8 *evt_prefix; struct list_head handle_map; struct list_head address_filters; __u8 resuming; __u8 suspending; __u8 filter_enabled; /* To synchronize add/remove address filter and monitor device event.*/ struct mutex filter_lock; }; bool msft_monitor_supported(struct hci_dev *hdev) { return !!(msft_get_features(hdev) & MSFT_FEATURE_MASK_LE_ADV_MONITOR); } static bool read_supported_features(struct hci_dev *hdev, struct msft_data *msft) { struct msft_cp_read_supported_features cp; struct msft_rp_read_supported_features *rp; struct sk_buff *skb; cp.sub_opcode = MSFT_OP_READ_SUPPORTED_FEATURES; skb = __hci_cmd_sync(hdev, hdev->msft_opcode, sizeof(cp), &cp, HCI_CMD_TIMEOUT); if (IS_ERR_OR_NULL(skb)) { if (!skb) skb = ERR_PTR(-EIO); bt_dev_err(hdev, "Failed to read MSFT supported features (%ld)", PTR_ERR(skb)); return false; } if (skb->len < sizeof(*rp)) { bt_dev_err(hdev, "MSFT supported features length mismatch"); goto failed; } rp = (struct msft_rp_read_supported_features *)skb->data; if (rp->sub_opcode != MSFT_OP_READ_SUPPORTED_FEATURES) goto failed; if (rp->evt_prefix_len > 0) { msft->evt_prefix = kmemdup(rp->evt_prefix, rp->evt_prefix_len, GFP_KERNEL); if (!msft->evt_prefix) goto failed; } msft->evt_prefix_len = rp->evt_prefix_len; msft->features = __le64_to_cpu(rp->features); if (msft->features & MSFT_FEATURE_MASK_CURVE_VALIDITY) hdev->msft_curve_validity = true; kfree_skb(skb); return true; failed: kfree_skb(skb); return false; } /* is_mgmt = true matches the handle exposed to userspace via mgmt. * is_mgmt = false matches the handle used by the msft controller. * This function requires the caller holds hdev->lock */ static struct msft_monitor_advertisement_handle_data *msft_find_handle_data (struct hci_dev *hdev, u16 handle, bool is_mgmt) { struct msft_monitor_advertisement_handle_data *entry; struct msft_data *msft = hdev->msft_data; list_for_each_entry(entry, &msft->handle_map, list) { if (is_mgmt && entry->mgmt_handle == handle) return entry; if (!is_mgmt && entry->msft_handle == handle) return entry; } return NULL; } /* This function requires the caller holds msft->filter_lock */ static struct msft_monitor_addr_filter_data *msft_find_address_data (struct hci_dev *hdev, u8 addr_type, bdaddr_t *addr, u8 pattern_handle) { struct msft_monitor_addr_filter_data *entry; struct msft_data *msft = hdev->msft_data; list_for_each_entry(entry, &msft->address_filters, list) { if (entry->pattern_handle == pattern_handle && addr_type == entry->addr_type && !bacmp(addr, &entry->bdaddr)) return entry; } return NULL; } /* This function requires the caller holds hdev->lock */ static int msft_monitor_device_del(struct hci_dev *hdev, __u16 mgmt_handle, bdaddr_t *bdaddr, __u8 addr_type, bool notify) { struct monitored_device *dev, *tmp; int count = 0; list_for_each_entry_safe(dev, tmp, &hdev->monitored_devices, list) { /* mgmt_handle == 0 indicates remove all devices, whereas, * bdaddr == NULL indicates remove all devices matching the * mgmt_handle. */ if ((!mgmt_handle || dev->handle == mgmt_handle) && (!bdaddr || (!bacmp(bdaddr, &dev->bdaddr) && addr_type == dev->addr_type))) { if (notify && dev->notified) { mgmt_adv_monitor_device_lost(hdev, dev->handle, &dev->bdaddr, dev->addr_type); } list_del(&dev->list); kfree(dev); count++; } } return count; } static int msft_le_monitor_advertisement_cb(struct hci_dev *hdev, u16 opcode, struct adv_monitor *monitor, struct sk_buff *skb) { struct msft_rp_le_monitor_advertisement *rp; struct msft_monitor_advertisement_handle_data *handle_data; struct msft_data *msft = hdev->msft_data; int status = 0; hci_dev_lock(hdev); rp = (struct msft_rp_le_monitor_advertisement *)skb->data; if (skb->len < sizeof(*rp)) { status = HCI_ERROR_UNSPECIFIED; goto unlock; } status = rp->status; if (status) goto unlock; handle_data = kmalloc(sizeof(*handle_data), GFP_KERNEL); if (!handle_data) { status = HCI_ERROR_UNSPECIFIED; goto unlock; } handle_data->mgmt_handle = monitor->handle; handle_data->msft_handle = rp->handle; handle_data->cond_type = MSFT_MONITOR_ADVERTISEMENT_TYPE_PATTERN; INIT_LIST_HEAD(&handle_data->list); list_add(&handle_data->list, &msft->handle_map); monitor->state = ADV_MONITOR_STATE_OFFLOADED; unlock: if (status) hci_free_adv_monitor(hdev, monitor); hci_dev_unlock(hdev); return status; } /* This function requires the caller holds hci_req_sync_lock */ static void msft_remove_addr_filters_sync(struct hci_dev *hdev, u8 handle) { struct msft_monitor_addr_filter_data *address_filter, *n; struct msft_cp_le_cancel_monitor_advertisement cp; struct msft_data *msft = hdev->msft_data; struct list_head head; struct sk_buff *skb; INIT_LIST_HEAD(&head); /* Cancel all corresponding address monitors */ mutex_lock(&msft->filter_lock); list_for_each_entry_safe(address_filter, n, &msft->address_filters, list) { if (address_filter->pattern_handle != handle) continue; list_del(&address_filter->list); /* Keep the address filter and let * msft_add_address_filter_sync() remove and free the address * filter. */ if (address_filter->state == AF_STATE_ADDING) { address_filter->state = AF_STATE_REMOVING; continue; } /* Keep the address filter and let * msft_cancel_address_filter_sync() remove and free the address * filter */ if (address_filter->state == AF_STATE_REMOVING) continue; list_add_tail(&address_filter->list, &head); } mutex_unlock(&msft->filter_lock); list_for_each_entry_safe(address_filter, n, &head, list) { list_del(&address_filter->list); cp.sub_opcode = MSFT_OP_LE_CANCEL_MONITOR_ADVERTISEMENT; cp.handle = address_filter->msft_handle; skb = __hci_cmd_sync(hdev, hdev->msft_opcode, sizeof(cp), &cp, HCI_CMD_TIMEOUT); if (IS_ERR_OR_NULL(skb)) { kfree(address_filter); continue; } kfree_skb(skb); bt_dev_dbg(hdev, "MSFT: Canceled device %pMR address filter", &address_filter->bdaddr); kfree(address_filter); } } static int msft_le_cancel_monitor_advertisement_cb(struct hci_dev *hdev, u16 opcode, struct adv_monitor *monitor, struct sk_buff *skb) { struct msft_rp_le_cancel_monitor_advertisement *rp; struct msft_monitor_advertisement_handle_data *handle_data; struct msft_data *msft = hdev->msft_data; int status = 0; u8 msft_handle; rp = (struct msft_rp_le_cancel_monitor_advertisement *)skb->data; if (skb->len < sizeof(*rp)) { status = HCI_ERROR_UNSPECIFIED; goto done; } status = rp->status; if (status) goto done; hci_dev_lock(hdev); handle_data = msft_find_handle_data(hdev, monitor->handle, true); if (handle_data) { if (monitor->state == ADV_MONITOR_STATE_OFFLOADED) monitor->state = ADV_MONITOR_STATE_REGISTERED; /* Do not free the monitor if it is being removed due to * suspend. It will be re-monitored on resume. */ if (!msft->suspending) { hci_free_adv_monitor(hdev, monitor); /* Clear any monitored devices by this Adv Monitor */ msft_monitor_device_del(hdev, handle_data->mgmt_handle, NULL, 0, false); } msft_handle = handle_data->msft_handle; list_del(&handle_data->list); kfree(handle_data); hci_dev_unlock(hdev); msft_remove_addr_filters_sync(hdev, msft_handle); } else { hci_dev_unlock(hdev); } done: return status; } /* This function requires the caller holds hci_req_sync_lock */ static int msft_remove_monitor_sync(struct hci_dev *hdev, struct adv_monitor *monitor) { struct msft_cp_le_cancel_monitor_advertisement cp; struct msft_monitor_advertisement_handle_data *handle_data; struct sk_buff *skb; handle_data = msft_find_handle_data(hdev, monitor->handle, true); /* If no matched handle, just remove without telling controller */ if (!handle_data) return -ENOENT; cp.sub_opcode = MSFT_OP_LE_CANCEL_MONITOR_ADVERTISEMENT; cp.handle = handle_data->msft_handle; skb = __hci_cmd_sync(hdev, hdev->msft_opcode, sizeof(cp), &cp, HCI_CMD_TIMEOUT); if (IS_ERR_OR_NULL(skb)) { if (!skb) return -EIO; return PTR_ERR(skb); } return msft_le_cancel_monitor_advertisement_cb(hdev, hdev->msft_opcode, monitor, skb); } /* This function requires the caller holds hci_req_sync_lock */ int msft_suspend_sync(struct hci_dev *hdev) { struct msft_data *msft = hdev->msft_data; struct adv_monitor *monitor; int handle = 0; if (!msft || !msft_monitor_supported(hdev)) return 0; msft->suspending = true; while (1) { monitor = idr_get_next(&hdev->adv_monitors_idr, &handle); if (!monitor) break; msft_remove_monitor_sync(hdev, monitor); handle++; } /* All monitors have been removed */ msft->suspending = false; return 0; } static bool msft_monitor_rssi_valid(struct adv_monitor *monitor) { struct adv_rssi_thresholds *r = &monitor->rssi; if (r->high_threshold < MSFT_RSSI_THRESHOLD_VALUE_MIN || r->high_threshold > MSFT_RSSI_THRESHOLD_VALUE_MAX || r->low_threshold < MSFT_RSSI_THRESHOLD_VALUE_MIN || r->low_threshold > MSFT_RSSI_THRESHOLD_VALUE_MAX) return false; /* High_threshold_timeout is not supported, * once high_threshold is reached, events are immediately reported. */ if (r->high_threshold_timeout != 0) return false; if (r->low_threshold_timeout > MSFT_RSSI_LOW_TIMEOUT_MAX) return false; /* Sampling period from 0x00 to 0xFF are all allowed */ return true; } static bool msft_monitor_pattern_valid(struct adv_monitor *monitor) { return msft_monitor_rssi_valid(monitor); /* No additional check needed for pattern-based monitor */ } static int msft_add_monitor_sync(struct hci_dev *hdev, struct adv_monitor *monitor) { struct msft_cp_le_monitor_advertisement *cp; struct msft_le_monitor_advertisement_pattern_data *pattern_data; struct msft_monitor_advertisement_handle_data *handle_data; struct msft_le_monitor_advertisement_pattern *pattern; struct adv_pattern *entry; size_t total_size = sizeof(*cp) + sizeof(*pattern_data); ptrdiff_t offset = 0; u8 pattern_count = 0; struct sk_buff *skb; int err; if (!msft_monitor_pattern_valid(monitor)) return -EINVAL; list_for_each_entry(entry, &monitor->patterns, list) { pattern_count++; total_size += sizeof(*pattern) + entry->length; } cp = kmalloc(total_size, GFP_KERNEL); if (!cp) return -ENOMEM; cp->sub_opcode = MSFT_OP_LE_MONITOR_ADVERTISEMENT; cp->rssi_high = monitor->rssi.high_threshold; cp->rssi_low = monitor->rssi.low_threshold; cp->rssi_low_interval = (u8)monitor->rssi.low_threshold_timeout; cp->rssi_sampling_period = monitor->rssi.sampling_period; cp->cond_type = MSFT_MONITOR_ADVERTISEMENT_TYPE_PATTERN; pattern_data = (void *)cp->data; pattern_data->count = pattern_count; list_for_each_entry(entry, &monitor->patterns, list) { pattern = (void *)(pattern_data->data + offset); /* the length also includes data_type and offset */ pattern->length = entry->length + 2; pattern->data_type = entry->ad_type; pattern->start_byte = entry->offset; memcpy(pattern->pattern, entry->value, entry->length); offset += sizeof(*pattern) + entry->length; } skb = __hci_cmd_sync(hdev, hdev->msft_opcode, total_size, cp, HCI_CMD_TIMEOUT); if (IS_ERR_OR_NULL(skb)) { err = PTR_ERR(skb); goto out_free; } err = msft_le_monitor_advertisement_cb(hdev, hdev->msft_opcode, monitor, skb); if (err) goto out_free; handle_data = msft_find_handle_data(hdev, monitor->handle, true); if (!handle_data) { err = -ENODATA; goto out_free; } handle_data->rssi_high = cp->rssi_high; handle_data->rssi_low = cp->rssi_low; handle_data->rssi_low_interval = cp->rssi_low_interval; handle_data->rssi_sampling_period = cp->rssi_sampling_period; out_free: kfree(cp); return err; } /* This function requires the caller holds hci_req_sync_lock */ static void reregister_monitor(struct hci_dev *hdev) { struct adv_monitor *monitor; struct msft_data *msft = hdev->msft_data; int handle = 0; if (!msft) return; msft->resuming = true; while (1) { monitor = idr_get_next(&hdev->adv_monitors_idr, &handle); if (!monitor) break; msft_add_monitor_sync(hdev, monitor); handle++; } /* All monitors have been reregistered */ msft->resuming = false; } /* This function requires the caller holds hci_req_sync_lock */ int msft_resume_sync(struct hci_dev *hdev) { struct msft_data *msft = hdev->msft_data; if (!msft || !msft_monitor_supported(hdev)) return 0; hci_dev_lock(hdev); /* Clear already tracked devices on resume. Once the monitors are * reregistered, devices in range will be found again after resume. */ hdev->advmon_pend_notify = false; msft_monitor_device_del(hdev, 0, NULL, 0, true); hci_dev_unlock(hdev); reregister_monitor(hdev); return 0; } /* This function requires the caller holds hci_req_sync_lock */ void msft_do_open(struct hci_dev *hdev) { struct msft_data *msft = hdev->msft_data; if (hdev->msft_opcode == HCI_OP_NOP) return; if (!msft) { bt_dev_err(hdev, "MSFT extension not registered"); return; } bt_dev_dbg(hdev, "Initialize MSFT extension"); /* Reset existing MSFT data before re-reading */ kfree(msft->evt_prefix); msft->evt_prefix = NULL; msft->evt_prefix_len = 0; msft->features = 0; if (!read_supported_features(hdev, msft)) { hdev->msft_data = NULL; kfree(msft); return; } if (msft_monitor_supported(hdev)) { msft->resuming = true; msft_set_filter_enable(hdev, true); /* Monitors get removed on power off, so we need to explicitly * tell the controller to re-monitor. */ reregister_monitor(hdev); } } void msft_do_close(struct hci_dev *hdev) { struct msft_data *msft = hdev->msft_data; struct msft_monitor_advertisement_handle_data *handle_data, *tmp; struct msft_monitor_addr_filter_data *address_filter, *n; struct adv_monitor *monitor; if (!msft) return; bt_dev_dbg(hdev, "Cleanup of MSFT extension"); /* The controller will silently remove all monitors on power off. * Therefore, remove handle_data mapping and reset monitor state. */ list_for_each_entry_safe(handle_data, tmp, &msft->handle_map, list) { monitor = idr_find(&hdev->adv_monitors_idr, handle_data->mgmt_handle); if (monitor && monitor->state == ADV_MONITOR_STATE_OFFLOADED) monitor->state = ADV_MONITOR_STATE_REGISTERED; list_del(&handle_data->list); kfree(handle_data); } mutex_lock(&msft->filter_lock); list_for_each_entry_safe(address_filter, n, &msft->address_filters, list) { list_del(&address_filter->list); kfree(address_filter); } mutex_unlock(&msft->filter_lock); hci_dev_lock(hdev); /* Clear any devices that are being monitored and notify device lost */ hdev->advmon_pend_notify = false; msft_monitor_device_del(hdev, 0, NULL, 0, true); hci_dev_unlock(hdev); } static int msft_cancel_address_filter_sync(struct hci_dev *hdev, void *data) { struct msft_monitor_addr_filter_data *address_filter = data; struct msft_cp_le_cancel_monitor_advertisement cp; struct msft_data *msft = hdev->msft_data; struct sk_buff *skb; int err = 0; if (!msft) { bt_dev_err(hdev, "MSFT: msft data is freed"); return -EINVAL; } /* The address filter has been removed by hci dev close */ if (!test_bit(HCI_UP, &hdev->flags)) return 0; mutex_lock(&msft->filter_lock); list_del(&address_filter->list); mutex_unlock(&msft->filter_lock); cp.sub_opcode = MSFT_OP_LE_CANCEL_MONITOR_ADVERTISEMENT; cp.handle = address_filter->msft_handle; skb = __hci_cmd_sync(hdev, hdev->msft_opcode, sizeof(cp), &cp, HCI_CMD_TIMEOUT); if (IS_ERR_OR_NULL(skb)) { bt_dev_err(hdev, "MSFT: Failed to cancel address (%pMR) filter", &address_filter->bdaddr); err = -EIO; goto done; } kfree_skb(skb); bt_dev_dbg(hdev, "MSFT: Canceled device %pMR address filter", &address_filter->bdaddr); done: kfree(address_filter); return err; } void msft_register(struct hci_dev *hdev) { struct msft_data *msft = NULL; bt_dev_dbg(hdev, "Register MSFT extension"); msft = kzalloc(sizeof(*msft), GFP_KERNEL); if (!msft) { bt_dev_err(hdev, "Failed to register MSFT extension"); return; } INIT_LIST_HEAD(&msft->handle_map); INIT_LIST_HEAD(&msft->address_filters); hdev->msft_data = msft; mutex_init(&msft->filter_lock); } void msft_unregister(struct hci_dev *hdev) { struct msft_data *msft = hdev->msft_data; if (!msft) return; bt_dev_dbg(hdev, "Unregister MSFT extension"); hdev->msft_data = NULL; kfree(msft->evt_prefix); mutex_destroy(&msft->filter_lock); kfree(msft); } /* This function requires the caller holds hdev->lock */ static void msft_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 addr_type, __u16 mgmt_handle) { struct monitored_device *dev; dev = kmalloc(sizeof(*dev), GFP_KERNEL); if (!dev) { bt_dev_err(hdev, "MSFT vendor event %u: no memory", MSFT_EV_LE_MONITOR_DEVICE); return; } bacpy(&dev->bdaddr, bdaddr); dev->addr_type = addr_type; dev->handle = mgmt_handle; dev->notified = false; INIT_LIST_HEAD(&dev->list); list_add(&dev->list, &hdev->monitored_devices); hdev->advmon_pend_notify = true; } /* This function requires the caller holds hdev->lock */ static void msft_device_lost(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 addr_type, __u16 mgmt_handle) { if (!msft_monitor_device_del(hdev, mgmt_handle, bdaddr, addr_type, true)) { bt_dev_err(hdev, "MSFT vendor event %u: dev %pMR not in list", MSFT_EV_LE_MONITOR_DEVICE, bdaddr); } } static void *msft_skb_pull(struct hci_dev *hdev, struct sk_buff *skb, u8 ev, size_t len) { void *data; data = skb_pull_data(skb, len); if (!data) bt_dev_err(hdev, "Malformed MSFT vendor event: 0x%02x", ev); return data; } static int msft_add_address_filter_sync(struct hci_dev *hdev, void *data) { struct msft_monitor_addr_filter_data *address_filter = data; struct msft_rp_le_monitor_advertisement *rp; struct msft_cp_le_monitor_advertisement *cp; struct msft_data *msft = hdev->msft_data; struct sk_buff *skb = NULL; bool remove = false; size_t size; if (!msft) { bt_dev_err(hdev, "MSFT: msft data is freed"); return -EINVAL; } /* The address filter has been removed by hci dev close */ if (!test_bit(HCI_UP, &hdev->flags)) return -ENODEV; /* We are safe to use the address filter from now on. * msft_monitor_device_evt() wouldn't delete this filter because it's * not been added by now. * And all other functions that requiring hci_req_sync_lock wouldn't * touch this filter before this func completes because it's protected * by hci_req_sync_lock. */ if (address_filter->state == AF_STATE_REMOVING) { mutex_lock(&msft->filter_lock); list_del(&address_filter->list); mutex_unlock(&msft->filter_lock); kfree(address_filter); return 0; } size = sizeof(*cp) + sizeof(address_filter->addr_type) + sizeof(address_filter->bdaddr); cp = kzalloc(size, GFP_KERNEL); if (!cp) { bt_dev_err(hdev, "MSFT: Alloc cmd param err"); remove = true; goto done; } cp->sub_opcode = MSFT_OP_LE_MONITOR_ADVERTISEMENT; cp->rssi_high = address_filter->rssi_high; cp->rssi_low = address_filter->rssi_low; cp->rssi_low_interval = address_filter->rssi_low_interval; cp->rssi_sampling_period = address_filter->rssi_sampling_period; cp->cond_type = MSFT_MONITOR_ADVERTISEMENT_TYPE_ADDR; cp->data[0] = address_filter->addr_type; memcpy(&cp->data[1], &address_filter->bdaddr, sizeof(address_filter->bdaddr)); skb = __hci_cmd_sync(hdev, hdev->msft_opcode, size, cp, HCI_CMD_TIMEOUT); if (IS_ERR_OR_NULL(skb)) { bt_dev_err(hdev, "Failed to enable address %pMR filter", &address_filter->bdaddr); skb = NULL; remove = true; goto done; } rp = skb_pull_data(skb, sizeof(*rp)); if (!rp || rp->sub_opcode != MSFT_OP_LE_MONITOR_ADVERTISEMENT || rp->status) remove = true; done: mutex_lock(&msft->filter_lock); if (remove) { bt_dev_warn(hdev, "MSFT: Remove address (%pMR) filter", &address_filter->bdaddr); list_del(&address_filter->list); kfree(address_filter); } else { address_filter->state = AF_STATE_ADDED; address_filter->msft_handle = rp->handle; bt_dev_dbg(hdev, "MSFT: Address %pMR filter enabled", &address_filter->bdaddr); } mutex_unlock(&msft->filter_lock); kfree_skb(skb); return 0; } /* This function requires the caller holds msft->filter_lock */ static struct msft_monitor_addr_filter_data *msft_add_address_filter (struct hci_dev *hdev, u8 addr_type, bdaddr_t *bdaddr, struct msft_monitor_advertisement_handle_data *handle_data) { struct msft_monitor_addr_filter_data *address_filter = NULL; struct msft_data *msft = hdev->msft_data; int err; address_filter = kzalloc(sizeof(*address_filter), GFP_KERNEL); if (!address_filter) return NULL; address_filter->state = AF_STATE_ADDING; address_filter->msft_handle = 0xff; address_filter->pattern_handle = handle_data->msft_handle; address_filter->mgmt_handle = handle_data->mgmt_handle; address_filter->rssi_high = handle_data->rssi_high; address_filter->rssi_low = handle_data->rssi_low; address_filter->rssi_low_interval = handle_data->rssi_low_interval; address_filter->rssi_sampling_period = handle_data->rssi_sampling_period; address_filter->addr_type = addr_type; bacpy(&address_filter->bdaddr, bdaddr); /* With the above AF_STATE_ADDING, duplicated address filter can be * avoided when receiving monitor device event (found/lost) frequently * for the same device. */ list_add_tail(&address_filter->list, &msft->address_filters); err = hci_cmd_sync_queue(hdev, msft_add_address_filter_sync, address_filter, NULL); if (err < 0) { bt_dev_err(hdev, "MSFT: Add address %pMR filter err", bdaddr); list_del(&address_filter->list); kfree(address_filter); return NULL; } bt_dev_dbg(hdev, "MSFT: Add device %pMR address filter", &address_filter->bdaddr); return address_filter; } /* This function requires the caller holds hdev->lock */ static void msft_monitor_device_evt(struct hci_dev *hdev, struct sk_buff *skb) { struct msft_monitor_addr_filter_data *n, *address_filter = NULL; struct msft_ev_le_monitor_device *ev; struct msft_monitor_advertisement_handle_data *handle_data; struct msft_data *msft = hdev->msft_data; u16 mgmt_handle = 0xffff; u8 addr_type; ev = msft_skb_pull(hdev, skb, MSFT_EV_LE_MONITOR_DEVICE, sizeof(*ev)); if (!ev) return; bt_dev_dbg(hdev, "MSFT vendor event 0x%02x: handle 0x%04x state %d addr %pMR", MSFT_EV_LE_MONITOR_DEVICE, ev->monitor_handle, ev->monitor_state, &ev->bdaddr); handle_data = msft_find_handle_data(hdev, ev->monitor_handle, false); if (!test_bit(HCI_QUIRK_USE_MSFT_EXT_ADDRESS_FILTER, &hdev->quirks)) { if (!handle_data) return; mgmt_handle = handle_data->mgmt_handle; goto report_state; } if (handle_data) { /* Don't report any device found/lost event from pattern * monitors. Pattern monitor always has its address filters for * tracking devices. */ address_filter = msft_find_address_data(hdev, ev->addr_type, &ev->bdaddr, handle_data->msft_handle); if (address_filter) return; if (ev->monitor_state && handle_data->cond_type == MSFT_MONITOR_ADVERTISEMENT_TYPE_PATTERN) msft_add_address_filter(hdev, ev->addr_type, &ev->bdaddr, handle_data); return; } /* This device event is not from pattern monitor. * Report it if there is a corresponding address_filter for it. */ list_for_each_entry(n, &msft->address_filters, list) { if (n->state == AF_STATE_ADDED && n->msft_handle == ev->monitor_handle) { mgmt_handle = n->mgmt_handle; address_filter = n; break; } } if (!address_filter) { bt_dev_warn(hdev, "MSFT: Unexpected device event %pMR, %u, %u", &ev->bdaddr, ev->monitor_handle, ev->monitor_state); return; } report_state: switch (ev->addr_type) { case ADDR_LE_DEV_PUBLIC: addr_type = BDADDR_LE_PUBLIC; break; case ADDR_LE_DEV_RANDOM: addr_type = BDADDR_LE_RANDOM; break; default: bt_dev_err(hdev, "MSFT vendor event 0x%02x: unknown addr type 0x%02x", MSFT_EV_LE_MONITOR_DEVICE, ev->addr_type); return; } if (ev->monitor_state) { msft_device_found(hdev, &ev->bdaddr, addr_type, mgmt_handle); } else { if (address_filter && address_filter->state == AF_STATE_ADDED) { address_filter->state = AF_STATE_REMOVING; hci_cmd_sync_queue(hdev, msft_cancel_address_filter_sync, address_filter, NULL); } msft_device_lost(hdev, &ev->bdaddr, addr_type, mgmt_handle); } } void msft_vendor_evt(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct msft_data *msft = hdev->msft_data; u8 *evt_prefix; u8 *evt; if (!msft) return; /* When the extension has defined an event prefix, check that it * matches, and otherwise just return. */ if (msft->evt_prefix_len > 0) { evt_prefix = msft_skb_pull(hdev, skb, 0, msft->evt_prefix_len); if (!evt_prefix) return; if (memcmp(evt_prefix, msft->evt_prefix, msft->evt_prefix_len)) return; } /* Every event starts at least with an event code and the rest of * the data is variable and depends on the event code. */ if (skb->len < 1) return; evt = msft_skb_pull(hdev, skb, 0, sizeof(*evt)); if (!evt) return; hci_dev_lock(hdev); switch (*evt) { case MSFT_EV_LE_MONITOR_DEVICE: mutex_lock(&msft->filter_lock); msft_monitor_device_evt(hdev, skb); mutex_unlock(&msft->filter_lock); break; default: bt_dev_dbg(hdev, "MSFT vendor event 0x%02x", *evt); break; } hci_dev_unlock(hdev); } __u64 msft_get_features(struct hci_dev *hdev) { struct msft_data *msft = hdev->msft_data; return msft ? msft->features : 0; } static void msft_le_set_advertisement_filter_enable_cb(struct hci_dev *hdev, void *user_data, u8 status) { struct msft_cp_le_set_advertisement_filter_enable *cp = user_data; struct msft_data *msft = hdev->msft_data; /* Error 0x0C would be returned if the filter enabled status is * already set to whatever we were trying to set. * Although the default state should be disabled, some controller set * the initial value to enabled. Because there is no way to know the * actual initial value before sending this command, here we also treat * error 0x0C as success. */ if (status != 0x00 && status != 0x0C) return; hci_dev_lock(hdev); msft->filter_enabled = cp->enable; if (status == 0x0C) bt_dev_warn(hdev, "MSFT filter_enable is already %s", cp->enable ? "on" : "off"); hci_dev_unlock(hdev); } /* This function requires the caller holds hci_req_sync_lock */ int msft_add_monitor_pattern(struct hci_dev *hdev, struct adv_monitor *monitor) { struct msft_data *msft = hdev->msft_data; if (!msft) return -EOPNOTSUPP; if (msft->resuming || msft->suspending) return -EBUSY; return msft_add_monitor_sync(hdev, monitor); } /* This function requires the caller holds hci_req_sync_lock */ int msft_remove_monitor(struct hci_dev *hdev, struct adv_monitor *monitor) { struct msft_data *msft = hdev->msft_data; if (!msft) return -EOPNOTSUPP; if (msft->resuming || msft->suspending) return -EBUSY; return msft_remove_monitor_sync(hdev, monitor); } int msft_set_filter_enable(struct hci_dev *hdev, bool enable) { struct msft_cp_le_set_advertisement_filter_enable cp; struct msft_data *msft = hdev->msft_data; int err; if (!msft) return -EOPNOTSUPP; cp.sub_opcode = MSFT_OP_LE_SET_ADVERTISEMENT_FILTER_ENABLE; cp.enable = enable; err = __hci_cmd_sync_status(hdev, hdev->msft_opcode, sizeof(cp), &cp, HCI_CMD_TIMEOUT); msft_le_set_advertisement_filter_enable_cb(hdev, &cp, err); return 0; } bool msft_curve_validity(struct hci_dev *hdev) { return hdev->msft_curve_validity; }
linux-master
net/bluetooth/msft.c
// SPDX-License-Identifier: GPL-2.0 /* * BlueZ - Bluetooth protocol stack for Linux * * Copyright (C) 2021 Intel Corporation * Copyright 2023 NXP */ #include <linux/property.h> #include <net/bluetooth/bluetooth.h> #include <net/bluetooth/hci_core.h> #include <net/bluetooth/mgmt.h> #include "hci_request.h" #include "hci_codec.h" #include "hci_debugfs.h" #include "smp.h" #include "eir.h" #include "msft.h" #include "aosp.h" #include "leds.h" static void hci_cmd_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode, struct sk_buff *skb) { bt_dev_dbg(hdev, "result 0x%2.2x", result); if (hdev->req_status != HCI_REQ_PEND) return; hdev->req_result = result; hdev->req_status = HCI_REQ_DONE; if (skb) { struct sock *sk = hci_skb_sk(skb); /* Drop sk reference if set */ if (sk) sock_put(sk); hdev->req_skb = skb_get(skb); } wake_up_interruptible(&hdev->req_wait_q); } static struct sk_buff *hci_cmd_sync_alloc(struct hci_dev *hdev, u16 opcode, u32 plen, const void *param, struct sock *sk) { int len = HCI_COMMAND_HDR_SIZE + plen; struct hci_command_hdr *hdr; struct sk_buff *skb; skb = bt_skb_alloc(len, GFP_ATOMIC); if (!skb) return NULL; hdr = skb_put(skb, HCI_COMMAND_HDR_SIZE); hdr->opcode = cpu_to_le16(opcode); hdr->plen = plen; if (plen) skb_put_data(skb, param, plen); bt_dev_dbg(hdev, "skb len %d", skb->len); hci_skb_pkt_type(skb) = HCI_COMMAND_PKT; hci_skb_opcode(skb) = opcode; /* Grab a reference if command needs to be associated with a sock (e.g. * likely mgmt socket that initiated the command). */ if (sk) { hci_skb_sk(skb) = sk; sock_hold(sk); } return skb; } static void hci_cmd_sync_add(struct hci_request *req, u16 opcode, u32 plen, const void *param, u8 event, struct sock *sk) { struct hci_dev *hdev = req->hdev; struct sk_buff *skb; bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen); /* If an error occurred during request building, there is no point in * queueing the HCI command. We can simply return. */ if (req->err) return; skb = hci_cmd_sync_alloc(hdev, opcode, plen, param, sk); if (!skb) { bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)", opcode); req->err = -ENOMEM; return; } if (skb_queue_empty(&req->cmd_q)) bt_cb(skb)->hci.req_flags |= HCI_REQ_START; hci_skb_event(skb) = event; skb_queue_tail(&req->cmd_q, skb); } static int hci_cmd_sync_run(struct hci_request *req) { struct hci_dev *hdev = req->hdev; struct sk_buff *skb; unsigned long flags; bt_dev_dbg(hdev, "length %u", skb_queue_len(&req->cmd_q)); /* If an error occurred during request building, remove all HCI * commands queued on the HCI request queue. */ if (req->err) { skb_queue_purge(&req->cmd_q); return req->err; } /* Do not allow empty requests */ if (skb_queue_empty(&req->cmd_q)) return -ENODATA; skb = skb_peek_tail(&req->cmd_q); bt_cb(skb)->hci.req_complete_skb = hci_cmd_sync_complete; bt_cb(skb)->hci.req_flags |= HCI_REQ_SKB; spin_lock_irqsave(&hdev->cmd_q.lock, flags); skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q); spin_unlock_irqrestore(&hdev->cmd_q.lock, flags); queue_work(hdev->workqueue, &hdev->cmd_work); return 0; } /* This function requires the caller holds hdev->req_lock. */ struct sk_buff *__hci_cmd_sync_sk(struct hci_dev *hdev, u16 opcode, u32 plen, const void *param, u8 event, u32 timeout, struct sock *sk) { struct hci_request req; struct sk_buff *skb; int err = 0; bt_dev_dbg(hdev, "Opcode 0x%4x", opcode); hci_req_init(&req, hdev); hci_cmd_sync_add(&req, opcode, plen, param, event, sk); hdev->req_status = HCI_REQ_PEND; err = hci_cmd_sync_run(&req); if (err < 0) return ERR_PTR(err); err = wait_event_interruptible_timeout(hdev->req_wait_q, hdev->req_status != HCI_REQ_PEND, timeout); if (err == -ERESTARTSYS) return ERR_PTR(-EINTR); switch (hdev->req_status) { case HCI_REQ_DONE: err = -bt_to_errno(hdev->req_result); break; case HCI_REQ_CANCELED: err = -hdev->req_result; break; default: err = -ETIMEDOUT; break; } hdev->req_status = 0; hdev->req_result = 0; skb = hdev->req_skb; hdev->req_skb = NULL; bt_dev_dbg(hdev, "end: err %d", err); if (err < 0) { kfree_skb(skb); return ERR_PTR(err); } return skb; } EXPORT_SYMBOL(__hci_cmd_sync_sk); /* This function requires the caller holds hdev->req_lock. */ struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen, const void *param, u32 timeout) { return __hci_cmd_sync_sk(hdev, opcode, plen, param, 0, timeout, NULL); } EXPORT_SYMBOL(__hci_cmd_sync); /* Send HCI command and wait for command complete event */ struct sk_buff *hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen, const void *param, u32 timeout) { struct sk_buff *skb; if (!test_bit(HCI_UP, &hdev->flags)) return ERR_PTR(-ENETDOWN); bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen); hci_req_sync_lock(hdev); skb = __hci_cmd_sync(hdev, opcode, plen, param, timeout); hci_req_sync_unlock(hdev); return skb; } EXPORT_SYMBOL(hci_cmd_sync); /* This function requires the caller holds hdev->req_lock. */ struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen, const void *param, u8 event, u32 timeout) { return __hci_cmd_sync_sk(hdev, opcode, plen, param, event, timeout, NULL); } EXPORT_SYMBOL(__hci_cmd_sync_ev); /* This function requires the caller holds hdev->req_lock. */ int __hci_cmd_sync_status_sk(struct hci_dev *hdev, u16 opcode, u32 plen, const void *param, u8 event, u32 timeout, struct sock *sk) { struct sk_buff *skb; u8 status; skb = __hci_cmd_sync_sk(hdev, opcode, plen, param, event, timeout, sk); if (IS_ERR(skb)) { if (!event) bt_dev_err(hdev, "Opcode 0x%4x failed: %ld", opcode, PTR_ERR(skb)); return PTR_ERR(skb); } /* If command return a status event skb will be set to NULL as there are * no parameters, in case of failure IS_ERR(skb) would have be set to * the actual error would be found with PTR_ERR(skb). */ if (!skb) return 0; status = skb->data[0]; kfree_skb(skb); return status; } EXPORT_SYMBOL(__hci_cmd_sync_status_sk); int __hci_cmd_sync_status(struct hci_dev *hdev, u16 opcode, u32 plen, const void *param, u32 timeout) { return __hci_cmd_sync_status_sk(hdev, opcode, plen, param, 0, timeout, NULL); } EXPORT_SYMBOL(__hci_cmd_sync_status); static void hci_cmd_sync_work(struct work_struct *work) { struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_sync_work); bt_dev_dbg(hdev, ""); /* Dequeue all entries and run them */ while (1) { struct hci_cmd_sync_work_entry *entry; mutex_lock(&hdev->cmd_sync_work_lock); entry = list_first_entry_or_null(&hdev->cmd_sync_work_list, struct hci_cmd_sync_work_entry, list); if (entry) list_del(&entry->list); mutex_unlock(&hdev->cmd_sync_work_lock); if (!entry) break; bt_dev_dbg(hdev, "entry %p", entry); if (entry->func) { int err; hci_req_sync_lock(hdev); err = entry->func(hdev, entry->data); if (entry->destroy) entry->destroy(hdev, entry->data, err); hci_req_sync_unlock(hdev); } kfree(entry); } } static void hci_cmd_sync_cancel_work(struct work_struct *work) { struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_sync_cancel_work); cancel_delayed_work_sync(&hdev->cmd_timer); cancel_delayed_work_sync(&hdev->ncmd_timer); atomic_set(&hdev->cmd_cnt, 1); wake_up_interruptible(&hdev->req_wait_q); } static int hci_scan_disable_sync(struct hci_dev *hdev); static int scan_disable_sync(struct hci_dev *hdev, void *data) { return hci_scan_disable_sync(hdev); } static int hci_inquiry_sync(struct hci_dev *hdev, u8 length); static int interleaved_inquiry_sync(struct hci_dev *hdev, void *data) { return hci_inquiry_sync(hdev, DISCOV_INTERLEAVED_INQUIRY_LEN); } static void le_scan_disable(struct work_struct *work) { struct hci_dev *hdev = container_of(work, struct hci_dev, le_scan_disable.work); int status; bt_dev_dbg(hdev, ""); hci_dev_lock(hdev); if (!hci_dev_test_flag(hdev, HCI_LE_SCAN)) goto _return; cancel_delayed_work(&hdev->le_scan_restart); status = hci_cmd_sync_queue(hdev, scan_disable_sync, NULL, NULL); if (status) { bt_dev_err(hdev, "failed to disable LE scan: %d", status); goto _return; } hdev->discovery.scan_start = 0; /* If we were running LE only scan, change discovery state. If * we were running both LE and BR/EDR inquiry simultaneously, * and BR/EDR inquiry is already finished, stop discovery, * otherwise BR/EDR inquiry will stop discovery when finished. * If we will resolve remote device name, do not change * discovery state. */ if (hdev->discovery.type == DISCOV_TYPE_LE) goto discov_stopped; if (hdev->discovery.type != DISCOV_TYPE_INTERLEAVED) goto _return; if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) { if (!test_bit(HCI_INQUIRY, &hdev->flags) && hdev->discovery.state != DISCOVERY_RESOLVING) goto discov_stopped; goto _return; } status = hci_cmd_sync_queue(hdev, interleaved_inquiry_sync, NULL, NULL); if (status) { bt_dev_err(hdev, "inquiry failed: status %d", status); goto discov_stopped; } goto _return; discov_stopped: hci_discovery_set_state(hdev, DISCOVERY_STOPPED); _return: hci_dev_unlock(hdev); } static int hci_le_set_scan_enable_sync(struct hci_dev *hdev, u8 val, u8 filter_dup); static int hci_le_scan_restart_sync(struct hci_dev *hdev) { /* If controller is not scanning we are done. */ if (!hci_dev_test_flag(hdev, HCI_LE_SCAN)) return 0; if (hdev->scanning_paused) { bt_dev_dbg(hdev, "Scanning is paused for suspend"); return 0; } hci_le_set_scan_enable_sync(hdev, LE_SCAN_DISABLE, 0x00); return hci_le_set_scan_enable_sync(hdev, LE_SCAN_ENABLE, LE_SCAN_FILTER_DUP_ENABLE); } static int le_scan_restart_sync(struct hci_dev *hdev, void *data) { return hci_le_scan_restart_sync(hdev); } static void le_scan_restart(struct work_struct *work) { struct hci_dev *hdev = container_of(work, struct hci_dev, le_scan_restart.work); unsigned long timeout, duration, scan_start, now; int status; bt_dev_dbg(hdev, ""); hci_dev_lock(hdev); status = hci_cmd_sync_queue(hdev, le_scan_restart_sync, NULL, NULL); if (status) { bt_dev_err(hdev, "failed to restart LE scan: status %d", status); goto unlock; } if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) || !hdev->discovery.scan_start) goto unlock; /* When the scan was started, hdev->le_scan_disable has been queued * after duration from scan_start. During scan restart this job * has been canceled, and we need to queue it again after proper * timeout, to make sure that scan does not run indefinitely. */ duration = hdev->discovery.scan_duration; scan_start = hdev->discovery.scan_start; now = jiffies; if (now - scan_start <= duration) { int elapsed; if (now >= scan_start) elapsed = now - scan_start; else elapsed = ULONG_MAX - scan_start + now; timeout = duration - elapsed; } else { timeout = 0; } queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_disable, timeout); unlock: hci_dev_unlock(hdev); } static int reenable_adv_sync(struct hci_dev *hdev, void *data) { bt_dev_dbg(hdev, ""); if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) && list_empty(&hdev->adv_instances)) return 0; if (hdev->cur_adv_instance) { return hci_schedule_adv_instance_sync(hdev, hdev->cur_adv_instance, true); } else { if (ext_adv_capable(hdev)) { hci_start_ext_adv_sync(hdev, 0x00); } else { hci_update_adv_data_sync(hdev, 0x00); hci_update_scan_rsp_data_sync(hdev, 0x00); hci_enable_advertising_sync(hdev); } } return 0; } static void reenable_adv(struct work_struct *work) { struct hci_dev *hdev = container_of(work, struct hci_dev, reenable_adv_work); int status; bt_dev_dbg(hdev, ""); hci_dev_lock(hdev); status = hci_cmd_sync_queue(hdev, reenable_adv_sync, NULL, NULL); if (status) bt_dev_err(hdev, "failed to reenable ADV: %d", status); hci_dev_unlock(hdev); } static void cancel_adv_timeout(struct hci_dev *hdev) { if (hdev->adv_instance_timeout) { hdev->adv_instance_timeout = 0; cancel_delayed_work(&hdev->adv_instance_expire); } } /* For a single instance: * - force == true: The instance will be removed even when its remaining * lifetime is not zero. * - force == false: the instance will be deactivated but kept stored unless * the remaining lifetime is zero. * * For instance == 0x00: * - force == true: All instances will be removed regardless of their timeout * setting. * - force == false: Only instances that have a timeout will be removed. */ int hci_clear_adv_instance_sync(struct hci_dev *hdev, struct sock *sk, u8 instance, bool force) { struct adv_info *adv_instance, *n, *next_instance = NULL; int err; u8 rem_inst; /* Cancel any timeout concerning the removed instance(s). */ if (!instance || hdev->cur_adv_instance == instance) cancel_adv_timeout(hdev); /* Get the next instance to advertise BEFORE we remove * the current one. This can be the same instance again * if there is only one instance. */ if (instance && hdev->cur_adv_instance == instance) next_instance = hci_get_next_instance(hdev, instance); if (instance == 0x00) { list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) { if (!(force || adv_instance->timeout)) continue; rem_inst = adv_instance->instance; err = hci_remove_adv_instance(hdev, rem_inst); if (!err) mgmt_advertising_removed(sk, hdev, rem_inst); } } else { adv_instance = hci_find_adv_instance(hdev, instance); if (force || (adv_instance && adv_instance->timeout && !adv_instance->remaining_time)) { /* Don't advertise a removed instance. */ if (next_instance && next_instance->instance == instance) next_instance = NULL; err = hci_remove_adv_instance(hdev, instance); if (!err) mgmt_advertising_removed(sk, hdev, instance); } } if (!hdev_is_powered(hdev) || hci_dev_test_flag(hdev, HCI_ADVERTISING)) return 0; if (next_instance && !ext_adv_capable(hdev)) return hci_schedule_adv_instance_sync(hdev, next_instance->instance, false); return 0; } static int adv_timeout_expire_sync(struct hci_dev *hdev, void *data) { u8 instance = *(u8 *)data; kfree(data); hci_clear_adv_instance_sync(hdev, NULL, instance, false); if (list_empty(&hdev->adv_instances)) return hci_disable_advertising_sync(hdev); return 0; } static void adv_timeout_expire(struct work_struct *work) { u8 *inst_ptr; struct hci_dev *hdev = container_of(work, struct hci_dev, adv_instance_expire.work); bt_dev_dbg(hdev, ""); hci_dev_lock(hdev); hdev->adv_instance_timeout = 0; if (hdev->cur_adv_instance == 0x00) goto unlock; inst_ptr = kmalloc(1, GFP_KERNEL); if (!inst_ptr) goto unlock; *inst_ptr = hdev->cur_adv_instance; hci_cmd_sync_queue(hdev, adv_timeout_expire_sync, inst_ptr, NULL); unlock: hci_dev_unlock(hdev); } void hci_cmd_sync_init(struct hci_dev *hdev) { INIT_WORK(&hdev->cmd_sync_work, hci_cmd_sync_work); INIT_LIST_HEAD(&hdev->cmd_sync_work_list); mutex_init(&hdev->cmd_sync_work_lock); mutex_init(&hdev->unregister_lock); INIT_WORK(&hdev->cmd_sync_cancel_work, hci_cmd_sync_cancel_work); INIT_WORK(&hdev->reenable_adv_work, reenable_adv); INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable); INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart); INIT_DELAYED_WORK(&hdev->adv_instance_expire, adv_timeout_expire); } void hci_cmd_sync_clear(struct hci_dev *hdev) { struct hci_cmd_sync_work_entry *entry, *tmp; cancel_work_sync(&hdev->cmd_sync_work); cancel_work_sync(&hdev->reenable_adv_work); mutex_lock(&hdev->cmd_sync_work_lock); list_for_each_entry_safe(entry, tmp, &hdev->cmd_sync_work_list, list) { if (entry->destroy) entry->destroy(hdev, entry->data, -ECANCELED); list_del(&entry->list); kfree(entry); } mutex_unlock(&hdev->cmd_sync_work_lock); } void __hci_cmd_sync_cancel(struct hci_dev *hdev, int err) { bt_dev_dbg(hdev, "err 0x%2.2x", err); if (hdev->req_status == HCI_REQ_PEND) { hdev->req_result = err; hdev->req_status = HCI_REQ_CANCELED; cancel_delayed_work_sync(&hdev->cmd_timer); cancel_delayed_work_sync(&hdev->ncmd_timer); atomic_set(&hdev->cmd_cnt, 1); wake_up_interruptible(&hdev->req_wait_q); } } void hci_cmd_sync_cancel(struct hci_dev *hdev, int err) { bt_dev_dbg(hdev, "err 0x%2.2x", err); if (hdev->req_status == HCI_REQ_PEND) { hdev->req_result = err; hdev->req_status = HCI_REQ_CANCELED; queue_work(hdev->workqueue, &hdev->cmd_sync_cancel_work); } } EXPORT_SYMBOL(hci_cmd_sync_cancel); /* Submit HCI command to be run in as cmd_sync_work: * * - hdev must _not_ be unregistered */ int hci_cmd_sync_submit(struct hci_dev *hdev, hci_cmd_sync_work_func_t func, void *data, hci_cmd_sync_work_destroy_t destroy) { struct hci_cmd_sync_work_entry *entry; int err = 0; mutex_lock(&hdev->unregister_lock); if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) { err = -ENODEV; goto unlock; } entry = kmalloc(sizeof(*entry), GFP_KERNEL); if (!entry) { err = -ENOMEM; goto unlock; } entry->func = func; entry->data = data; entry->destroy = destroy; mutex_lock(&hdev->cmd_sync_work_lock); list_add_tail(&entry->list, &hdev->cmd_sync_work_list); mutex_unlock(&hdev->cmd_sync_work_lock); queue_work(hdev->req_workqueue, &hdev->cmd_sync_work); unlock: mutex_unlock(&hdev->unregister_lock); return err; } EXPORT_SYMBOL(hci_cmd_sync_submit); /* Queue HCI command: * * - hdev must be running */ int hci_cmd_sync_queue(struct hci_dev *hdev, hci_cmd_sync_work_func_t func, void *data, hci_cmd_sync_work_destroy_t destroy) { /* Only queue command if hdev is running which means it had been opened * and is either on init phase or is already up. */ if (!test_bit(HCI_RUNNING, &hdev->flags)) return -ENETDOWN; return hci_cmd_sync_submit(hdev, func, data, destroy); } EXPORT_SYMBOL(hci_cmd_sync_queue); int hci_update_eir_sync(struct hci_dev *hdev) { struct hci_cp_write_eir cp; bt_dev_dbg(hdev, ""); if (!hdev_is_powered(hdev)) return 0; if (!lmp_ext_inq_capable(hdev)) return 0; if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) return 0; if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE)) return 0; memset(&cp, 0, sizeof(cp)); eir_create(hdev, cp.data); if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0) return 0; memcpy(hdev->eir, cp.data, sizeof(cp.data)); return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_EIR, sizeof(cp), &cp, HCI_CMD_TIMEOUT); } static u8 get_service_classes(struct hci_dev *hdev) { struct bt_uuid *uuid; u8 val = 0; list_for_each_entry(uuid, &hdev->uuids, list) val |= uuid->svc_hint; return val; } int hci_update_class_sync(struct hci_dev *hdev) { u8 cod[3]; bt_dev_dbg(hdev, ""); if (!hdev_is_powered(hdev)) return 0; if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) return 0; if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE)) return 0; cod[0] = hdev->minor_class; cod[1] = hdev->major_class; cod[2] = get_service_classes(hdev); if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) cod[1] |= 0x20; if (memcmp(cod, hdev->dev_class, 3) == 0) return 0; return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod, HCI_CMD_TIMEOUT); } static bool is_advertising_allowed(struct hci_dev *hdev, bool connectable) { /* If there is no connection we are OK to advertise. */ if (hci_conn_num(hdev, LE_LINK) == 0) return true; /* Check le_states if there is any connection in peripheral role. */ if (hdev->conn_hash.le_num_peripheral > 0) { /* Peripheral connection state and non connectable mode * bit 20. */ if (!connectable && !(hdev->le_states[2] & 0x10)) return false; /* Peripheral connection state and connectable mode bit 38 * and scannable bit 21. */ if (connectable && (!(hdev->le_states[4] & 0x40) || !(hdev->le_states[2] & 0x20))) return false; } /* Check le_states if there is any connection in central role. */ if (hci_conn_num(hdev, LE_LINK) != hdev->conn_hash.le_num_peripheral) { /* Central connection state and non connectable mode bit 18. */ if (!connectable && !(hdev->le_states[2] & 0x02)) return false; /* Central connection state and connectable mode bit 35 and * scannable 19. */ if (connectable && (!(hdev->le_states[4] & 0x08) || !(hdev->le_states[2] & 0x08))) return false; } return true; } static bool adv_use_rpa(struct hci_dev *hdev, uint32_t flags) { /* If privacy is not enabled don't use RPA */ if (!hci_dev_test_flag(hdev, HCI_PRIVACY)) return false; /* If basic privacy mode is enabled use RPA */ if (!hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY)) return true; /* If limited privacy mode is enabled don't use RPA if we're * both discoverable and bondable. */ if ((flags & MGMT_ADV_FLAG_DISCOV) && hci_dev_test_flag(hdev, HCI_BONDABLE)) return false; /* We're neither bondable nor discoverable in the limited * privacy mode, therefore use RPA. */ return true; } static int hci_set_random_addr_sync(struct hci_dev *hdev, bdaddr_t *rpa) { /* If we're advertising or initiating an LE connection we can't * go ahead and change the random address at this time. This is * because the eventual initiator address used for the * subsequently created connection will be undefined (some * controllers use the new address and others the one we had * when the operation started). * * In this kind of scenario skip the update and let the random * address be updated at the next cycle. */ if (hci_dev_test_flag(hdev, HCI_LE_ADV) || hci_lookup_le_connect(hdev)) { bt_dev_dbg(hdev, "Deferring random address update"); hci_dev_set_flag(hdev, HCI_RPA_EXPIRED); return 0; } return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa, HCI_CMD_TIMEOUT); } int hci_update_random_address_sync(struct hci_dev *hdev, bool require_privacy, bool rpa, u8 *own_addr_type) { int err; /* If privacy is enabled use a resolvable private address. If * current RPA has expired or there is something else than * the current RPA in use, then generate a new one. */ if (rpa) { /* If Controller supports LL Privacy use own address type is * 0x03 */ if (use_ll_privacy(hdev)) *own_addr_type = ADDR_LE_DEV_RANDOM_RESOLVED; else *own_addr_type = ADDR_LE_DEV_RANDOM; /* Check if RPA is valid */ if (rpa_valid(hdev)) return 0; err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa); if (err < 0) { bt_dev_err(hdev, "failed to generate new RPA"); return err; } err = hci_set_random_addr_sync(hdev, &hdev->rpa); if (err) return err; return 0; } /* In case of required privacy without resolvable private address, * use an non-resolvable private address. This is useful for active * scanning and non-connectable advertising. */ if (require_privacy) { bdaddr_t nrpa; while (true) { /* The non-resolvable private address is generated * from random six bytes with the two most significant * bits cleared. */ get_random_bytes(&nrpa, 6); nrpa.b[5] &= 0x3f; /* The non-resolvable private address shall not be * equal to the public address. */ if (bacmp(&hdev->bdaddr, &nrpa)) break; } *own_addr_type = ADDR_LE_DEV_RANDOM; return hci_set_random_addr_sync(hdev, &nrpa); } /* If forcing static address is in use or there is no public * address use the static address as random address (but skip * the HCI command if the current random address is already the * static one. * * In case BR/EDR has been disabled on a dual-mode controller * and a static address has been configured, then use that * address instead of the public BR/EDR address. */ if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) || !bacmp(&hdev->bdaddr, BDADDR_ANY) || (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) && bacmp(&hdev->static_addr, BDADDR_ANY))) { *own_addr_type = ADDR_LE_DEV_RANDOM; if (bacmp(&hdev->static_addr, &hdev->random_addr)) return hci_set_random_addr_sync(hdev, &hdev->static_addr); return 0; } /* Neither privacy nor static address is being used so use a * public address. */ *own_addr_type = ADDR_LE_DEV_PUBLIC; return 0; } static int hci_disable_ext_adv_instance_sync(struct hci_dev *hdev, u8 instance) { struct hci_cp_le_set_ext_adv_enable *cp; struct hci_cp_ext_adv_set *set; u8 data[sizeof(*cp) + sizeof(*set) * 1]; u8 size; /* If request specifies an instance that doesn't exist, fail */ if (instance > 0) { struct adv_info *adv; adv = hci_find_adv_instance(hdev, instance); if (!adv) return -EINVAL; /* If not enabled there is nothing to do */ if (!adv->enabled) return 0; } memset(data, 0, sizeof(data)); cp = (void *)data; set = (void *)cp->data; /* Instance 0x00 indicates all advertising instances will be disabled */ cp->num_of_sets = !!instance; cp->enable = 0x00; set->handle = instance; size = sizeof(*cp) + sizeof(*set) * cp->num_of_sets; return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_ADV_ENABLE, size, data, HCI_CMD_TIMEOUT); } static int hci_set_adv_set_random_addr_sync(struct hci_dev *hdev, u8 instance, bdaddr_t *random_addr) { struct hci_cp_le_set_adv_set_rand_addr cp; int err; if (!instance) { /* Instance 0x00 doesn't have an adv_info, instead it uses * hdev->random_addr to track its address so whenever it needs * to be updated this also set the random address since * hdev->random_addr is shared with scan state machine. */ err = hci_set_random_addr_sync(hdev, random_addr); if (err) return err; } memset(&cp, 0, sizeof(cp)); cp.handle = instance; bacpy(&cp.bdaddr, random_addr); return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_SET_RAND_ADDR, sizeof(cp), &cp, HCI_CMD_TIMEOUT); } int hci_setup_ext_adv_instance_sync(struct hci_dev *hdev, u8 instance) { struct hci_cp_le_set_ext_adv_params cp; bool connectable; u32 flags; bdaddr_t random_addr; u8 own_addr_type; int err; struct adv_info *adv; bool secondary_adv; if (instance > 0) { adv = hci_find_adv_instance(hdev, instance); if (!adv) return -EINVAL; } else { adv = NULL; } /* Updating parameters of an active instance will return a * Command Disallowed error, so we must first disable the * instance if it is active. */ if (adv && !adv->pending) { err = hci_disable_ext_adv_instance_sync(hdev, instance); if (err) return err; } flags = hci_adv_instance_flags(hdev, instance); /* If the "connectable" instance flag was not set, then choose between * ADV_IND and ADV_NONCONN_IND based on the global connectable setting. */ connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) || mgmt_get_connectable(hdev); if (!is_advertising_allowed(hdev, connectable)) return -EPERM; /* Set require_privacy to true only when non-connectable * advertising is used. In that case it is fine to use a * non-resolvable private address. */ err = hci_get_random_address(hdev, !connectable, adv_use_rpa(hdev, flags), adv, &own_addr_type, &random_addr); if (err < 0) return err; memset(&cp, 0, sizeof(cp)); if (adv) { hci_cpu_to_le24(adv->min_interval, cp.min_interval); hci_cpu_to_le24(adv->max_interval, cp.max_interval); cp.tx_power = adv->tx_power; } else { hci_cpu_to_le24(hdev->le_adv_min_interval, cp.min_interval); hci_cpu_to_le24(hdev->le_adv_max_interval, cp.max_interval); cp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE; } secondary_adv = (flags & MGMT_ADV_FLAG_SEC_MASK); if (connectable) { if (secondary_adv) cp.evt_properties = cpu_to_le16(LE_EXT_ADV_CONN_IND); else cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_IND); } else if (hci_adv_instance_is_scannable(hdev, instance) || (flags & MGMT_ADV_PARAM_SCAN_RSP)) { if (secondary_adv) cp.evt_properties = cpu_to_le16(LE_EXT_ADV_SCAN_IND); else cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_SCAN_IND); } else { if (secondary_adv) cp.evt_properties = cpu_to_le16(LE_EXT_ADV_NON_CONN_IND); else cp.evt_properties = cpu_to_le16(LE_LEGACY_NONCONN_IND); } /* If Own_Address_Type equals 0x02 or 0x03, the Peer_Address parameter * contains the peer’s Identity Address and the Peer_Address_Type * parameter contains the peer’s Identity Type (i.e., 0x00 or 0x01). * These parameters are used to locate the corresponding local IRK in * the resolving list; this IRK is used to generate their own address * used in the advertisement. */ if (own_addr_type == ADDR_LE_DEV_RANDOM_RESOLVED) hci_copy_identity_address(hdev, &cp.peer_addr, &cp.peer_addr_type); cp.own_addr_type = own_addr_type; cp.channel_map = hdev->le_adv_channel_map; cp.handle = instance; if (flags & MGMT_ADV_FLAG_SEC_2M) { cp.primary_phy = HCI_ADV_PHY_1M; cp.secondary_phy = HCI_ADV_PHY_2M; } else if (flags & MGMT_ADV_FLAG_SEC_CODED) { cp.primary_phy = HCI_ADV_PHY_CODED; cp.secondary_phy = HCI_ADV_PHY_CODED; } else { /* In all other cases use 1M */ cp.primary_phy = HCI_ADV_PHY_1M; cp.secondary_phy = HCI_ADV_PHY_1M; } err = __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_ADV_PARAMS, sizeof(cp), &cp, HCI_CMD_TIMEOUT); if (err) return err; if ((own_addr_type == ADDR_LE_DEV_RANDOM || own_addr_type == ADDR_LE_DEV_RANDOM_RESOLVED) && bacmp(&random_addr, BDADDR_ANY)) { /* Check if random address need to be updated */ if (adv) { if (!bacmp(&random_addr, &adv->random_addr)) return 0; } else { if (!bacmp(&random_addr, &hdev->random_addr)) return 0; } return hci_set_adv_set_random_addr_sync(hdev, instance, &random_addr); } return 0; } static int hci_set_ext_scan_rsp_data_sync(struct hci_dev *hdev, u8 instance) { struct { struct hci_cp_le_set_ext_scan_rsp_data cp; u8 data[HCI_MAX_EXT_AD_LENGTH]; } pdu; u8 len; struct adv_info *adv = NULL; int err; memset(&pdu, 0, sizeof(pdu)); if (instance) { adv = hci_find_adv_instance(hdev, instance); if (!adv || !adv->scan_rsp_changed) return 0; } len = eir_create_scan_rsp(hdev, instance, pdu.data); pdu.cp.handle = instance; pdu.cp.length = len; pdu.cp.operation = LE_SET_ADV_DATA_OP_COMPLETE; pdu.cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG; err = __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_SCAN_RSP_DATA, sizeof(pdu.cp) + len, &pdu.cp, HCI_CMD_TIMEOUT); if (err) return err; if (adv) { adv->scan_rsp_changed = false; } else { memcpy(hdev->scan_rsp_data, pdu.data, len); hdev->scan_rsp_data_len = len; } return 0; } static int __hci_set_scan_rsp_data_sync(struct hci_dev *hdev, u8 instance) { struct hci_cp_le_set_scan_rsp_data cp; u8 len; memset(&cp, 0, sizeof(cp)); len = eir_create_scan_rsp(hdev, instance, cp.data); if (hdev->scan_rsp_data_len == len && !memcmp(cp.data, hdev->scan_rsp_data, len)) return 0; memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data)); hdev->scan_rsp_data_len = len; cp.length = len; return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp, HCI_CMD_TIMEOUT); } int hci_update_scan_rsp_data_sync(struct hci_dev *hdev, u8 instance) { if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) return 0; if (ext_adv_capable(hdev)) return hci_set_ext_scan_rsp_data_sync(hdev, instance); return __hci_set_scan_rsp_data_sync(hdev, instance); } int hci_enable_ext_advertising_sync(struct hci_dev *hdev, u8 instance) { struct hci_cp_le_set_ext_adv_enable *cp; struct hci_cp_ext_adv_set *set; u8 data[sizeof(*cp) + sizeof(*set) * 1]; struct adv_info *adv; if (instance > 0) { adv = hci_find_adv_instance(hdev, instance); if (!adv) return -EINVAL; /* If already enabled there is nothing to do */ if (adv->enabled) return 0; } else { adv = NULL; } cp = (void *)data; set = (void *)cp->data; memset(cp, 0, sizeof(*cp)); cp->enable = 0x01; cp->num_of_sets = 0x01; memset(set, 0, sizeof(*set)); set->handle = instance; /* Set duration per instance since controller is responsible for * scheduling it. */ if (adv && adv->timeout) { u16 duration = adv->timeout * MSEC_PER_SEC; /* Time = N * 10 ms */ set->duration = cpu_to_le16(duration / 10); } return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_ADV_ENABLE, sizeof(*cp) + sizeof(*set) * cp->num_of_sets, data, HCI_CMD_TIMEOUT); } int hci_start_ext_adv_sync(struct hci_dev *hdev, u8 instance) { int err; err = hci_setup_ext_adv_instance_sync(hdev, instance); if (err) return err; err = hci_set_ext_scan_rsp_data_sync(hdev, instance); if (err) return err; return hci_enable_ext_advertising_sync(hdev, instance); } static int hci_disable_per_advertising_sync(struct hci_dev *hdev, u8 instance) { struct hci_cp_le_set_per_adv_enable cp; struct adv_info *adv = NULL; /* If periodic advertising already disabled there is nothing to do. */ adv = hci_find_adv_instance(hdev, instance); if (!adv || !adv->periodic || !adv->enabled) return 0; memset(&cp, 0, sizeof(cp)); cp.enable = 0x00; cp.handle = instance; return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_PER_ADV_ENABLE, sizeof(cp), &cp, HCI_CMD_TIMEOUT); } static int hci_set_per_adv_params_sync(struct hci_dev *hdev, u8 instance, u16 min_interval, u16 max_interval) { struct hci_cp_le_set_per_adv_params cp; memset(&cp, 0, sizeof(cp)); if (!min_interval) min_interval = DISCOV_LE_PER_ADV_INT_MIN; if (!max_interval) max_interval = DISCOV_LE_PER_ADV_INT_MAX; cp.handle = instance; cp.min_interval = cpu_to_le16(min_interval); cp.max_interval = cpu_to_le16(max_interval); cp.periodic_properties = 0x0000; return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_PER_ADV_PARAMS, sizeof(cp), &cp, HCI_CMD_TIMEOUT); } static int hci_set_per_adv_data_sync(struct hci_dev *hdev, u8 instance) { struct { struct hci_cp_le_set_per_adv_data cp; u8 data[HCI_MAX_PER_AD_LENGTH]; } pdu; u8 len; memset(&pdu, 0, sizeof(pdu)); if (instance) { struct adv_info *adv = hci_find_adv_instance(hdev, instance); if (!adv || !adv->periodic) return 0; } len = eir_create_per_adv_data(hdev, instance, pdu.data); pdu.cp.length = len; pdu.cp.handle = instance; pdu.cp.operation = LE_SET_ADV_DATA_OP_COMPLETE; return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_PER_ADV_DATA, sizeof(pdu.cp) + len, &pdu, HCI_CMD_TIMEOUT); } static int hci_enable_per_advertising_sync(struct hci_dev *hdev, u8 instance) { struct hci_cp_le_set_per_adv_enable cp; struct adv_info *adv = NULL; /* If periodic advertising already enabled there is nothing to do. */ adv = hci_find_adv_instance(hdev, instance); if (adv && adv->periodic && adv->enabled) return 0; memset(&cp, 0, sizeof(cp)); cp.enable = 0x01; cp.handle = instance; return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_PER_ADV_ENABLE, sizeof(cp), &cp, HCI_CMD_TIMEOUT); } /* Checks if periodic advertising data contains a Basic Announcement and if it * does generates a Broadcast ID and add Broadcast Announcement. */ static int hci_adv_bcast_annoucement(struct hci_dev *hdev, struct adv_info *adv) { u8 bid[3]; u8 ad[4 + 3]; /* Skip if NULL adv as instance 0x00 is used for general purpose * advertising so it cannot used for the likes of Broadcast Announcement * as it can be overwritten at any point. */ if (!adv) return 0; /* Check if PA data doesn't contains a Basic Audio Announcement then * there is nothing to do. */ if (!eir_get_service_data(adv->per_adv_data, adv->per_adv_data_len, 0x1851, NULL)) return 0; /* Check if advertising data already has a Broadcast Announcement since * the process may want to control the Broadcast ID directly and in that * case the kernel shall no interfere. */ if (eir_get_service_data(adv->adv_data, adv->adv_data_len, 0x1852, NULL)) return 0; /* Generate Broadcast ID */ get_random_bytes(bid, sizeof(bid)); eir_append_service_data(ad, 0, 0x1852, bid, sizeof(bid)); hci_set_adv_instance_data(hdev, adv->instance, sizeof(ad), ad, 0, NULL); return hci_update_adv_data_sync(hdev, adv->instance); } int hci_start_per_adv_sync(struct hci_dev *hdev, u8 instance, u8 data_len, u8 *data, u32 flags, u16 min_interval, u16 max_interval, u16 sync_interval) { struct adv_info *adv = NULL; int err; bool added = false; hci_disable_per_advertising_sync(hdev, instance); if (instance) { adv = hci_find_adv_instance(hdev, instance); /* Create an instance if that could not be found */ if (!adv) { adv = hci_add_per_instance(hdev, instance, flags, data_len, data, sync_interval, sync_interval); if (IS_ERR(adv)) return PTR_ERR(adv); adv->pending = false; added = true; } } /* Start advertising */ err = hci_start_ext_adv_sync(hdev, instance); if (err < 0) goto fail; err = hci_adv_bcast_annoucement(hdev, adv); if (err < 0) goto fail; err = hci_set_per_adv_params_sync(hdev, instance, min_interval, max_interval); if (err < 0) goto fail; err = hci_set_per_adv_data_sync(hdev, instance); if (err < 0) goto fail; err = hci_enable_per_advertising_sync(hdev, instance); if (err < 0) goto fail; return 0; fail: if (added) hci_remove_adv_instance(hdev, instance); return err; } static int hci_start_adv_sync(struct hci_dev *hdev, u8 instance) { int err; if (ext_adv_capable(hdev)) return hci_start_ext_adv_sync(hdev, instance); err = hci_update_adv_data_sync(hdev, instance); if (err) return err; err = hci_update_scan_rsp_data_sync(hdev, instance); if (err) return err; return hci_enable_advertising_sync(hdev); } int hci_enable_advertising_sync(struct hci_dev *hdev) { struct adv_info *adv_instance; struct hci_cp_le_set_adv_param cp; u8 own_addr_type, enable = 0x01; bool connectable; u16 adv_min_interval, adv_max_interval; u32 flags; u8 status; if (ext_adv_capable(hdev)) return hci_enable_ext_advertising_sync(hdev, hdev->cur_adv_instance); flags = hci_adv_instance_flags(hdev, hdev->cur_adv_instance); adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance); /* If the "connectable" instance flag was not set, then choose between * ADV_IND and ADV_NONCONN_IND based on the global connectable setting. */ connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) || mgmt_get_connectable(hdev); if (!is_advertising_allowed(hdev, connectable)) return -EINVAL; status = hci_disable_advertising_sync(hdev); if (status) return status; /* Clear the HCI_LE_ADV bit temporarily so that the * hci_update_random_address knows that it's safe to go ahead * and write a new random address. The flag will be set back on * as soon as the SET_ADV_ENABLE HCI command completes. */ hci_dev_clear_flag(hdev, HCI_LE_ADV); /* Set require_privacy to true only when non-connectable * advertising is used. In that case it is fine to use a * non-resolvable private address. */ status = hci_update_random_address_sync(hdev, !connectable, adv_use_rpa(hdev, flags), &own_addr_type); if (status) return status; memset(&cp, 0, sizeof(cp)); if (adv_instance) { adv_min_interval = adv_instance->min_interval; adv_max_interval = adv_instance->max_interval; } else { adv_min_interval = hdev->le_adv_min_interval; adv_max_interval = hdev->le_adv_max_interval; } if (connectable) { cp.type = LE_ADV_IND; } else { if (hci_adv_instance_is_scannable(hdev, hdev->cur_adv_instance)) cp.type = LE_ADV_SCAN_IND; else cp.type = LE_ADV_NONCONN_IND; if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE) || hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) { adv_min_interval = DISCOV_LE_FAST_ADV_INT_MIN; adv_max_interval = DISCOV_LE_FAST_ADV_INT_MAX; } } cp.min_interval = cpu_to_le16(adv_min_interval); cp.max_interval = cpu_to_le16(adv_max_interval); cp.own_address_type = own_addr_type; cp.channel_map = hdev->le_adv_channel_map; status = __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp, HCI_CMD_TIMEOUT); if (status) return status; return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable, HCI_CMD_TIMEOUT); } static int enable_advertising_sync(struct hci_dev *hdev, void *data) { return hci_enable_advertising_sync(hdev); } int hci_enable_advertising(struct hci_dev *hdev) { if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) && list_empty(&hdev->adv_instances)) return 0; return hci_cmd_sync_queue(hdev, enable_advertising_sync, NULL, NULL); } int hci_remove_ext_adv_instance_sync(struct hci_dev *hdev, u8 instance, struct sock *sk) { int err; if (!ext_adv_capable(hdev)) return 0; err = hci_disable_ext_adv_instance_sync(hdev, instance); if (err) return err; /* If request specifies an instance that doesn't exist, fail */ if (instance > 0 && !hci_find_adv_instance(hdev, instance)) return -EINVAL; return __hci_cmd_sync_status_sk(hdev, HCI_OP_LE_REMOVE_ADV_SET, sizeof(instance), &instance, 0, HCI_CMD_TIMEOUT, sk); } static int remove_ext_adv_sync(struct hci_dev *hdev, void *data) { struct adv_info *adv = data; u8 instance = 0; if (adv) instance = adv->instance; return hci_remove_ext_adv_instance_sync(hdev, instance, NULL); } int hci_remove_ext_adv_instance(struct hci_dev *hdev, u8 instance) { struct adv_info *adv = NULL; if (instance) { adv = hci_find_adv_instance(hdev, instance); if (!adv) return -EINVAL; } return hci_cmd_sync_queue(hdev, remove_ext_adv_sync, adv, NULL); } int hci_le_terminate_big_sync(struct hci_dev *hdev, u8 handle, u8 reason) { struct hci_cp_le_term_big cp; memset(&cp, 0, sizeof(cp)); cp.handle = handle; cp.reason = reason; return __hci_cmd_sync_status(hdev, HCI_OP_LE_TERM_BIG, sizeof(cp), &cp, HCI_CMD_TIMEOUT); } static int hci_set_ext_adv_data_sync(struct hci_dev *hdev, u8 instance) { struct { struct hci_cp_le_set_ext_adv_data cp; u8 data[HCI_MAX_EXT_AD_LENGTH]; } pdu; u8 len; struct adv_info *adv = NULL; int err; memset(&pdu, 0, sizeof(pdu)); if (instance) { adv = hci_find_adv_instance(hdev, instance); if (!adv || !adv->adv_data_changed) return 0; } len = eir_create_adv_data(hdev, instance, pdu.data); pdu.cp.length = len; pdu.cp.handle = instance; pdu.cp.operation = LE_SET_ADV_DATA_OP_COMPLETE; pdu.cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG; err = __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_ADV_DATA, sizeof(pdu.cp) + len, &pdu.cp, HCI_CMD_TIMEOUT); if (err) return err; /* Update data if the command succeed */ if (adv) { adv->adv_data_changed = false; } else { memcpy(hdev->adv_data, pdu.data, len); hdev->adv_data_len = len; } return 0; } static int hci_set_adv_data_sync(struct hci_dev *hdev, u8 instance) { struct hci_cp_le_set_adv_data cp; u8 len; memset(&cp, 0, sizeof(cp)); len = eir_create_adv_data(hdev, instance, cp.data); /* There's nothing to do if the data hasn't changed */ if (hdev->adv_data_len == len && memcmp(cp.data, hdev->adv_data, len) == 0) return 0; memcpy(hdev->adv_data, cp.data, sizeof(cp.data)); hdev->adv_data_len = len; cp.length = len; return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp, HCI_CMD_TIMEOUT); } int hci_update_adv_data_sync(struct hci_dev *hdev, u8 instance) { if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) return 0; if (ext_adv_capable(hdev)) return hci_set_ext_adv_data_sync(hdev, instance); return hci_set_adv_data_sync(hdev, instance); } int hci_schedule_adv_instance_sync(struct hci_dev *hdev, u8 instance, bool force) { struct adv_info *adv = NULL; u16 timeout; if (hci_dev_test_flag(hdev, HCI_ADVERTISING) && !ext_adv_capable(hdev)) return -EPERM; if (hdev->adv_instance_timeout) return -EBUSY; adv = hci_find_adv_instance(hdev, instance); if (!adv) return -ENOENT; /* A zero timeout means unlimited advertising. As long as there is * only one instance, duration should be ignored. We still set a timeout * in case further instances are being added later on. * * If the remaining lifetime of the instance is more than the duration * then the timeout corresponds to the duration, otherwise it will be * reduced to the remaining instance lifetime. */ if (adv->timeout == 0 || adv->duration <= adv->remaining_time) timeout = adv->duration; else timeout = adv->remaining_time; /* The remaining time is being reduced unless the instance is being * advertised without time limit. */ if (adv->timeout) adv->remaining_time = adv->remaining_time - timeout; /* Only use work for scheduling instances with legacy advertising */ if (!ext_adv_capable(hdev)) { hdev->adv_instance_timeout = timeout; queue_delayed_work(hdev->req_workqueue, &hdev->adv_instance_expire, msecs_to_jiffies(timeout * 1000)); } /* If we're just re-scheduling the same instance again then do not * execute any HCI commands. This happens when a single instance is * being advertised. */ if (!force && hdev->cur_adv_instance == instance && hci_dev_test_flag(hdev, HCI_LE_ADV)) return 0; hdev->cur_adv_instance = instance; return hci_start_adv_sync(hdev, instance); } static int hci_clear_adv_sets_sync(struct hci_dev *hdev, struct sock *sk) { int err; if (!ext_adv_capable(hdev)) return 0; /* Disable instance 0x00 to disable all instances */ err = hci_disable_ext_adv_instance_sync(hdev, 0x00); if (err) return err; return __hci_cmd_sync_status_sk(hdev, HCI_OP_LE_CLEAR_ADV_SETS, 0, NULL, 0, HCI_CMD_TIMEOUT, sk); } static int hci_clear_adv_sync(struct hci_dev *hdev, struct sock *sk, bool force) { struct adv_info *adv, *n; int err = 0; if (ext_adv_capable(hdev)) /* Remove all existing sets */ err = hci_clear_adv_sets_sync(hdev, sk); if (ext_adv_capable(hdev)) return err; /* This is safe as long as there is no command send while the lock is * held. */ hci_dev_lock(hdev); /* Cleanup non-ext instances */ list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) { u8 instance = adv->instance; int err; if (!(force || adv->timeout)) continue; err = hci_remove_adv_instance(hdev, instance); if (!err) mgmt_advertising_removed(sk, hdev, instance); } hci_dev_unlock(hdev); return 0; } static int hci_remove_adv_sync(struct hci_dev *hdev, u8 instance, struct sock *sk) { int err = 0; /* If we use extended advertising, instance has to be removed first. */ if (ext_adv_capable(hdev)) err = hci_remove_ext_adv_instance_sync(hdev, instance, sk); if (ext_adv_capable(hdev)) return err; /* This is safe as long as there is no command send while the lock is * held. */ hci_dev_lock(hdev); err = hci_remove_adv_instance(hdev, instance); if (!err) mgmt_advertising_removed(sk, hdev, instance); hci_dev_unlock(hdev); return err; } /* For a single instance: * - force == true: The instance will be removed even when its remaining * lifetime is not zero. * - force == false: the instance will be deactivated but kept stored unless * the remaining lifetime is zero. * * For instance == 0x00: * - force == true: All instances will be removed regardless of their timeout * setting. * - force == false: Only instances that have a timeout will be removed. */ int hci_remove_advertising_sync(struct hci_dev *hdev, struct sock *sk, u8 instance, bool force) { struct adv_info *next = NULL; int err; /* Cancel any timeout concerning the removed instance(s). */ if (!instance || hdev->cur_adv_instance == instance) cancel_adv_timeout(hdev); /* Get the next instance to advertise BEFORE we remove * the current one. This can be the same instance again * if there is only one instance. */ if (hdev->cur_adv_instance == instance) next = hci_get_next_instance(hdev, instance); if (!instance) { err = hci_clear_adv_sync(hdev, sk, force); if (err) return err; } else { struct adv_info *adv = hci_find_adv_instance(hdev, instance); if (force || (adv && adv->timeout && !adv->remaining_time)) { /* Don't advertise a removed instance. */ if (next && next->instance == instance) next = NULL; err = hci_remove_adv_sync(hdev, instance, sk); if (err) return err; } } if (!hdev_is_powered(hdev) || hci_dev_test_flag(hdev, HCI_ADVERTISING)) return 0; if (next && !ext_adv_capable(hdev)) hci_schedule_adv_instance_sync(hdev, next->instance, false); return 0; } int hci_read_rssi_sync(struct hci_dev *hdev, __le16 handle) { struct hci_cp_read_rssi cp; cp.handle = handle; return __hci_cmd_sync_status(hdev, HCI_OP_READ_RSSI, sizeof(cp), &cp, HCI_CMD_TIMEOUT); } int hci_read_clock_sync(struct hci_dev *hdev, struct hci_cp_read_clock *cp) { return __hci_cmd_sync_status(hdev, HCI_OP_READ_CLOCK, sizeof(*cp), cp, HCI_CMD_TIMEOUT); } int hci_read_tx_power_sync(struct hci_dev *hdev, __le16 handle, u8 type) { struct hci_cp_read_tx_power cp; cp.handle = handle; cp.type = type; return __hci_cmd_sync_status(hdev, HCI_OP_READ_TX_POWER, sizeof(cp), &cp, HCI_CMD_TIMEOUT); } int hci_disable_advertising_sync(struct hci_dev *hdev) { u8 enable = 0x00; int err = 0; /* If controller is not advertising we are done. */ if (!hci_dev_test_flag(hdev, HCI_LE_ADV)) return 0; if (ext_adv_capable(hdev)) err = hci_disable_ext_adv_instance_sync(hdev, 0x00); if (ext_adv_capable(hdev)) return err; return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable, HCI_CMD_TIMEOUT); } static int hci_le_set_ext_scan_enable_sync(struct hci_dev *hdev, u8 val, u8 filter_dup) { struct hci_cp_le_set_ext_scan_enable cp; memset(&cp, 0, sizeof(cp)); cp.enable = val; if (hci_dev_test_flag(hdev, HCI_MESH)) cp.filter_dup = LE_SCAN_FILTER_DUP_DISABLE; else cp.filter_dup = filter_dup; return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_SCAN_ENABLE, sizeof(cp), &cp, HCI_CMD_TIMEOUT); } static int hci_le_set_scan_enable_sync(struct hci_dev *hdev, u8 val, u8 filter_dup) { struct hci_cp_le_set_scan_enable cp; if (use_ext_scan(hdev)) return hci_le_set_ext_scan_enable_sync(hdev, val, filter_dup); memset(&cp, 0, sizeof(cp)); cp.enable = val; if (val && hci_dev_test_flag(hdev, HCI_MESH)) cp.filter_dup = LE_SCAN_FILTER_DUP_DISABLE; else cp.filter_dup = filter_dup; return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp, HCI_CMD_TIMEOUT); } static int hci_le_set_addr_resolution_enable_sync(struct hci_dev *hdev, u8 val) { if (!use_ll_privacy(hdev)) return 0; /* If controller is not/already resolving we are done. */ if (val == hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION)) return 0; return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, sizeof(val), &val, HCI_CMD_TIMEOUT); } static int hci_scan_disable_sync(struct hci_dev *hdev) { int err; /* If controller is not scanning we are done. */ if (!hci_dev_test_flag(hdev, HCI_LE_SCAN)) return 0; if (hdev->scanning_paused) { bt_dev_dbg(hdev, "Scanning is paused for suspend"); return 0; } err = hci_le_set_scan_enable_sync(hdev, LE_SCAN_DISABLE, 0x00); if (err) { bt_dev_err(hdev, "Unable to disable scanning: %d", err); return err; } return err; } static bool scan_use_rpa(struct hci_dev *hdev) { return hci_dev_test_flag(hdev, HCI_PRIVACY); } static void hci_start_interleave_scan(struct hci_dev *hdev) { hdev->interleave_scan_state = INTERLEAVE_SCAN_NO_FILTER; queue_delayed_work(hdev->req_workqueue, &hdev->interleave_scan, 0); } static bool is_interleave_scanning(struct hci_dev *hdev) { return hdev->interleave_scan_state != INTERLEAVE_SCAN_NONE; } static void cancel_interleave_scan(struct hci_dev *hdev) { bt_dev_dbg(hdev, "cancelling interleave scan"); cancel_delayed_work_sync(&hdev->interleave_scan); hdev->interleave_scan_state = INTERLEAVE_SCAN_NONE; } /* Return true if interleave_scan wasn't started until exiting this function, * otherwise, return false */ static bool hci_update_interleaved_scan_sync(struct hci_dev *hdev) { /* Do interleaved scan only if all of the following are true: * - There is at least one ADV monitor * - At least one pending LE connection or one device to be scanned for * - Monitor offloading is not supported * If so, we should alternate between allowlist scan and one without * any filters to save power. */ bool use_interleaving = hci_is_adv_monitoring(hdev) && !(list_empty(&hdev->pend_le_conns) && list_empty(&hdev->pend_le_reports)) && hci_get_adv_monitor_offload_ext(hdev) == HCI_ADV_MONITOR_EXT_NONE; bool is_interleaving = is_interleave_scanning(hdev); if (use_interleaving && !is_interleaving) { hci_start_interleave_scan(hdev); bt_dev_dbg(hdev, "starting interleave scan"); return true; } if (!use_interleaving && is_interleaving) cancel_interleave_scan(hdev); return false; } /* Removes connection to resolve list if needed.*/ static int hci_le_del_resolve_list_sync(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type) { struct hci_cp_le_del_from_resolv_list cp; struct bdaddr_list_with_irk *entry; if (!use_ll_privacy(hdev)) return 0; /* Check if the IRK has been programmed */ entry = hci_bdaddr_list_lookup_with_irk(&hdev->le_resolv_list, bdaddr, bdaddr_type); if (!entry) return 0; cp.bdaddr_type = bdaddr_type; bacpy(&cp.bdaddr, bdaddr); return __hci_cmd_sync_status(hdev, HCI_OP_LE_DEL_FROM_RESOLV_LIST, sizeof(cp), &cp, HCI_CMD_TIMEOUT); } static int hci_le_del_accept_list_sync(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type) { struct hci_cp_le_del_from_accept_list cp; int err; /* Check if device is on accept list before removing it */ if (!hci_bdaddr_list_lookup(&hdev->le_accept_list, bdaddr, bdaddr_type)) return 0; cp.bdaddr_type = bdaddr_type; bacpy(&cp.bdaddr, bdaddr); /* Ignore errors when removing from resolving list as that is likely * that the device was never added. */ hci_le_del_resolve_list_sync(hdev, &cp.bdaddr, cp.bdaddr_type); err = __hci_cmd_sync_status(hdev, HCI_OP_LE_DEL_FROM_ACCEPT_LIST, sizeof(cp), &cp, HCI_CMD_TIMEOUT); if (err) { bt_dev_err(hdev, "Unable to remove from allow list: %d", err); return err; } bt_dev_dbg(hdev, "Remove %pMR (0x%x) from allow list", &cp.bdaddr, cp.bdaddr_type); return 0; } struct conn_params { bdaddr_t addr; u8 addr_type; hci_conn_flags_t flags; u8 privacy_mode; }; /* Adds connection to resolve list if needed. * Setting params to NULL programs local hdev->irk */ static int hci_le_add_resolve_list_sync(struct hci_dev *hdev, struct conn_params *params) { struct hci_cp_le_add_to_resolv_list cp; struct smp_irk *irk; struct bdaddr_list_with_irk *entry; struct hci_conn_params *p; if (!use_ll_privacy(hdev)) return 0; /* Attempt to program local identity address, type and irk if params is * NULL. */ if (!params) { if (!hci_dev_test_flag(hdev, HCI_PRIVACY)) return 0; hci_copy_identity_address(hdev, &cp.bdaddr, &cp.bdaddr_type); memcpy(cp.peer_irk, hdev->irk, 16); goto done; } irk = hci_find_irk_by_addr(hdev, &params->addr, params->addr_type); if (!irk) return 0; /* Check if the IK has _not_ been programmed yet. */ entry = hci_bdaddr_list_lookup_with_irk(&hdev->le_resolv_list, &params->addr, params->addr_type); if (entry) return 0; cp.bdaddr_type = params->addr_type; bacpy(&cp.bdaddr, &params->addr); memcpy(cp.peer_irk, irk->val, 16); /* Default privacy mode is always Network */ params->privacy_mode = HCI_NETWORK_PRIVACY; rcu_read_lock(); p = hci_pend_le_action_lookup(&hdev->pend_le_conns, &params->addr, params->addr_type); if (!p) p = hci_pend_le_action_lookup(&hdev->pend_le_reports, &params->addr, params->addr_type); if (p) WRITE_ONCE(p->privacy_mode, HCI_NETWORK_PRIVACY); rcu_read_unlock(); done: if (hci_dev_test_flag(hdev, HCI_PRIVACY)) memcpy(cp.local_irk, hdev->irk, 16); else memset(cp.local_irk, 0, 16); return __hci_cmd_sync_status(hdev, HCI_OP_LE_ADD_TO_RESOLV_LIST, sizeof(cp), &cp, HCI_CMD_TIMEOUT); } /* Set Device Privacy Mode. */ static int hci_le_set_privacy_mode_sync(struct hci_dev *hdev, struct conn_params *params) { struct hci_cp_le_set_privacy_mode cp; struct smp_irk *irk; /* If device privacy mode has already been set there is nothing to do */ if (params->privacy_mode == HCI_DEVICE_PRIVACY) return 0; /* Check if HCI_CONN_FLAG_DEVICE_PRIVACY has been set as it also * indicates that LL Privacy has been enabled and * HCI_OP_LE_SET_PRIVACY_MODE is supported. */ if (!(params->flags & HCI_CONN_FLAG_DEVICE_PRIVACY)) return 0; irk = hci_find_irk_by_addr(hdev, &params->addr, params->addr_type); if (!irk) return 0; memset(&cp, 0, sizeof(cp)); cp.bdaddr_type = irk->addr_type; bacpy(&cp.bdaddr, &irk->bdaddr); cp.mode = HCI_DEVICE_PRIVACY; /* Note: params->privacy_mode is not updated since it is a copy */ return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_PRIVACY_MODE, sizeof(cp), &cp, HCI_CMD_TIMEOUT); } /* Adds connection to allow list if needed, if the device uses RPA (has IRK) * this attempts to program the device in the resolving list as well and * properly set the privacy mode. */ static int hci_le_add_accept_list_sync(struct hci_dev *hdev, struct conn_params *params, u8 *num_entries) { struct hci_cp_le_add_to_accept_list cp; int err; /* During suspend, only wakeable devices can be in acceptlist */ if (hdev->suspended && !(params->flags & HCI_CONN_FLAG_REMOTE_WAKEUP)) return 0; /* Select filter policy to accept all advertising */ if (*num_entries >= hdev->le_accept_list_size) return -ENOSPC; /* Accept list can not be used with RPAs */ if (!use_ll_privacy(hdev) && hci_find_irk_by_addr(hdev, &params->addr, params->addr_type)) return -EINVAL; /* Attempt to program the device in the resolving list first to avoid * having to rollback in case it fails since the resolving list is * dynamic it can probably be smaller than the accept list. */ err = hci_le_add_resolve_list_sync(hdev, params); if (err) { bt_dev_err(hdev, "Unable to add to resolve list: %d", err); return err; } /* Set Privacy Mode */ err = hci_le_set_privacy_mode_sync(hdev, params); if (err) { bt_dev_err(hdev, "Unable to set privacy mode: %d", err); return err; } /* Check if already in accept list */ if (hci_bdaddr_list_lookup(&hdev->le_accept_list, &params->addr, params->addr_type)) return 0; *num_entries += 1; cp.bdaddr_type = params->addr_type; bacpy(&cp.bdaddr, &params->addr); err = __hci_cmd_sync_status(hdev, HCI_OP_LE_ADD_TO_ACCEPT_LIST, sizeof(cp), &cp, HCI_CMD_TIMEOUT); if (err) { bt_dev_err(hdev, "Unable to add to allow list: %d", err); /* Rollback the device from the resolving list */ hci_le_del_resolve_list_sync(hdev, &cp.bdaddr, cp.bdaddr_type); return err; } bt_dev_dbg(hdev, "Add %pMR (0x%x) to allow list", &cp.bdaddr, cp.bdaddr_type); return 0; } /* This function disables/pause all advertising instances */ static int hci_pause_advertising_sync(struct hci_dev *hdev) { int err; int old_state; /* If already been paused there is nothing to do. */ if (hdev->advertising_paused) return 0; bt_dev_dbg(hdev, "Pausing directed advertising"); /* Stop directed advertising */ old_state = hci_dev_test_flag(hdev, HCI_ADVERTISING); if (old_state) { /* When discoverable timeout triggers, then just make sure * the limited discoverable flag is cleared. Even in the case * of a timeout triggered from general discoverable, it is * safe to unconditionally clear the flag. */ hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE); hci_dev_clear_flag(hdev, HCI_DISCOVERABLE); hdev->discov_timeout = 0; } bt_dev_dbg(hdev, "Pausing advertising instances"); /* Call to disable any advertisements active on the controller. * This will succeed even if no advertisements are configured. */ err = hci_disable_advertising_sync(hdev); if (err) return err; /* If we are using software rotation, pause the loop */ if (!ext_adv_capable(hdev)) cancel_adv_timeout(hdev); hdev->advertising_paused = true; hdev->advertising_old_state = old_state; return 0; } /* This function enables all user advertising instances */ static int hci_resume_advertising_sync(struct hci_dev *hdev) { struct adv_info *adv, *tmp; int err; /* If advertising has not been paused there is nothing to do. */ if (!hdev->advertising_paused) return 0; /* Resume directed advertising */ hdev->advertising_paused = false; if (hdev->advertising_old_state) { hci_dev_set_flag(hdev, HCI_ADVERTISING); hdev->advertising_old_state = 0; } bt_dev_dbg(hdev, "Resuming advertising instances"); if (ext_adv_capable(hdev)) { /* Call for each tracked instance to be re-enabled */ list_for_each_entry_safe(adv, tmp, &hdev->adv_instances, list) { err = hci_enable_ext_advertising_sync(hdev, adv->instance); if (!err) continue; /* If the instance cannot be resumed remove it */ hci_remove_ext_adv_instance_sync(hdev, adv->instance, NULL); } } else { /* Schedule for most recent instance to be restarted and begin * the software rotation loop */ err = hci_schedule_adv_instance_sync(hdev, hdev->cur_adv_instance, true); } hdev->advertising_paused = false; return err; } static int hci_pause_addr_resolution(struct hci_dev *hdev) { int err; if (!use_ll_privacy(hdev)) return 0; if (!hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION)) return 0; /* Cannot disable addr resolution if scanning is enabled or * when initiating an LE connection. */ if (hci_dev_test_flag(hdev, HCI_LE_SCAN) || hci_lookup_le_connect(hdev)) { bt_dev_err(hdev, "Command not allowed when scan/LE connect"); return -EPERM; } /* Cannot disable addr resolution if advertising is enabled. */ err = hci_pause_advertising_sync(hdev); if (err) { bt_dev_err(hdev, "Pause advertising failed: %d", err); return err; } err = hci_le_set_addr_resolution_enable_sync(hdev, 0x00); if (err) bt_dev_err(hdev, "Unable to disable Address Resolution: %d", err); /* Return if address resolution is disabled and RPA is not used. */ if (!err && scan_use_rpa(hdev)) return 0; hci_resume_advertising_sync(hdev); return err; } struct sk_buff *hci_read_local_oob_data_sync(struct hci_dev *hdev, bool extended, struct sock *sk) { u16 opcode = extended ? HCI_OP_READ_LOCAL_OOB_EXT_DATA : HCI_OP_READ_LOCAL_OOB_DATA; return __hci_cmd_sync_sk(hdev, opcode, 0, NULL, 0, HCI_CMD_TIMEOUT, sk); } static struct conn_params *conn_params_copy(struct list_head *list, size_t *n) { struct hci_conn_params *params; struct conn_params *p; size_t i; rcu_read_lock(); i = 0; list_for_each_entry_rcu(params, list, action) ++i; *n = i; rcu_read_unlock(); p = kvcalloc(*n, sizeof(struct conn_params), GFP_KERNEL); if (!p) return NULL; rcu_read_lock(); i = 0; list_for_each_entry_rcu(params, list, action) { /* Racing adds are handled in next scan update */ if (i >= *n) break; /* No hdev->lock, but: addr, addr_type are immutable. * privacy_mode is only written by us or in * hci_cc_le_set_privacy_mode that we wait for. * We should be idempotent so MGMT updating flags * while we are processing is OK. */ bacpy(&p[i].addr, &params->addr); p[i].addr_type = params->addr_type; p[i].flags = READ_ONCE(params->flags); p[i].privacy_mode = READ_ONCE(params->privacy_mode); ++i; } rcu_read_unlock(); *n = i; return p; } /* Device must not be scanning when updating the accept list. * * Update is done using the following sequence: * * use_ll_privacy((Disable Advertising) -> Disable Resolving List) -> * Remove Devices From Accept List -> * (has IRK && use_ll_privacy(Remove Devices From Resolving List))-> * Add Devices to Accept List -> * (has IRK && use_ll_privacy(Remove Devices From Resolving List)) -> * use_ll_privacy(Enable Resolving List -> (Enable Advertising)) -> * Enable Scanning * * In case of failure advertising shall be restored to its original state and * return would disable accept list since either accept or resolving list could * not be programmed. * */ static u8 hci_update_accept_list_sync(struct hci_dev *hdev) { struct conn_params *params; struct bdaddr_list *b, *t; u8 num_entries = 0; bool pend_conn, pend_report; u8 filter_policy; size_t i, n; int err; /* Pause advertising if resolving list can be used as controllers * cannot accept resolving list modifications while advertising. */ if (use_ll_privacy(hdev)) { err = hci_pause_advertising_sync(hdev); if (err) { bt_dev_err(hdev, "pause advertising failed: %d", err); return 0x00; } } /* Disable address resolution while reprogramming accept list since * devices that do have an IRK will be programmed in the resolving list * when LL Privacy is enabled. */ err = hci_le_set_addr_resolution_enable_sync(hdev, 0x00); if (err) { bt_dev_err(hdev, "Unable to disable LL privacy: %d", err); goto done; } /* Go through the current accept list programmed into the * controller one by one and check if that address is connected or is * still in the list of pending connections or list of devices to * report. If not present in either list, then remove it from * the controller. */ list_for_each_entry_safe(b, t, &hdev->le_accept_list, list) { if (hci_conn_hash_lookup_le(hdev, &b->bdaddr, b->bdaddr_type)) continue; /* Pointers not dereferenced, no locks needed */ pend_conn = hci_pend_le_action_lookup(&hdev->pend_le_conns, &b->bdaddr, b->bdaddr_type); pend_report = hci_pend_le_action_lookup(&hdev->pend_le_reports, &b->bdaddr, b->bdaddr_type); /* If the device is not likely to connect or report, * remove it from the acceptlist. */ if (!pend_conn && !pend_report) { hci_le_del_accept_list_sync(hdev, &b->bdaddr, b->bdaddr_type); continue; } num_entries++; } /* Since all no longer valid accept list entries have been * removed, walk through the list of pending connections * and ensure that any new device gets programmed into * the controller. * * If the list of the devices is larger than the list of * available accept list entries in the controller, then * just abort and return filer policy value to not use the * accept list. * * The list and params may be mutated while we wait for events, * so make a copy and iterate it. */ params = conn_params_copy(&hdev->pend_le_conns, &n); if (!params) { err = -ENOMEM; goto done; } for (i = 0; i < n; ++i) { err = hci_le_add_accept_list_sync(hdev, &params[i], &num_entries); if (err) { kvfree(params); goto done; } } kvfree(params); /* After adding all new pending connections, walk through * the list of pending reports and also add these to the * accept list if there is still space. Abort if space runs out. */ params = conn_params_copy(&hdev->pend_le_reports, &n); if (!params) { err = -ENOMEM; goto done; } for (i = 0; i < n; ++i) { err = hci_le_add_accept_list_sync(hdev, &params[i], &num_entries); if (err) { kvfree(params); goto done; } } kvfree(params); /* Use the allowlist unless the following conditions are all true: * - We are not currently suspending * - There are 1 or more ADV monitors registered and it's not offloaded * - Interleaved scanning is not currently using the allowlist */ if (!idr_is_empty(&hdev->adv_monitors_idr) && !hdev->suspended && hci_get_adv_monitor_offload_ext(hdev) == HCI_ADV_MONITOR_EXT_NONE && hdev->interleave_scan_state != INTERLEAVE_SCAN_ALLOWLIST) err = -EINVAL; done: filter_policy = err ? 0x00 : 0x01; /* Enable address resolution when LL Privacy is enabled. */ err = hci_le_set_addr_resolution_enable_sync(hdev, 0x01); if (err) bt_dev_err(hdev, "Unable to enable LL privacy: %d", err); /* Resume advertising if it was paused */ if (use_ll_privacy(hdev)) hci_resume_advertising_sync(hdev); /* Select filter policy to use accept list */ return filter_policy; } static int hci_le_set_ext_scan_param_sync(struct hci_dev *hdev, u8 type, u16 interval, u16 window, u8 own_addr_type, u8 filter_policy) { struct hci_cp_le_set_ext_scan_params *cp; struct hci_cp_le_scan_phy_params *phy; u8 data[sizeof(*cp) + sizeof(*phy) * 2]; u8 num_phy = 0; cp = (void *)data; phy = (void *)cp->data; memset(data, 0, sizeof(data)); cp->own_addr_type = own_addr_type; cp->filter_policy = filter_policy; if (scan_1m(hdev) || scan_2m(hdev)) { cp->scanning_phys |= LE_SCAN_PHY_1M; phy->type = type; phy->interval = cpu_to_le16(interval); phy->window = cpu_to_le16(window); num_phy++; phy++; } if (scan_coded(hdev)) { cp->scanning_phys |= LE_SCAN_PHY_CODED; phy->type = type; phy->interval = cpu_to_le16(interval); phy->window = cpu_to_le16(window); num_phy++; phy++; } return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_SCAN_PARAMS, sizeof(*cp) + sizeof(*phy) * num_phy, data, HCI_CMD_TIMEOUT); } static int hci_le_set_scan_param_sync(struct hci_dev *hdev, u8 type, u16 interval, u16 window, u8 own_addr_type, u8 filter_policy) { struct hci_cp_le_set_scan_param cp; if (use_ext_scan(hdev)) return hci_le_set_ext_scan_param_sync(hdev, type, interval, window, own_addr_type, filter_policy); memset(&cp, 0, sizeof(cp)); cp.type = type; cp.interval = cpu_to_le16(interval); cp.window = cpu_to_le16(window); cp.own_address_type = own_addr_type; cp.filter_policy = filter_policy; return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_SCAN_PARAM, sizeof(cp), &cp, HCI_CMD_TIMEOUT); } static int hci_start_scan_sync(struct hci_dev *hdev, u8 type, u16 interval, u16 window, u8 own_addr_type, u8 filter_policy, u8 filter_dup) { int err; if (hdev->scanning_paused) { bt_dev_dbg(hdev, "Scanning is paused for suspend"); return 0; } err = hci_le_set_scan_param_sync(hdev, type, interval, window, own_addr_type, filter_policy); if (err) return err; return hci_le_set_scan_enable_sync(hdev, LE_SCAN_ENABLE, filter_dup); } static int hci_passive_scan_sync(struct hci_dev *hdev) { u8 own_addr_type; u8 filter_policy; u16 window, interval; u8 filter_dups = LE_SCAN_FILTER_DUP_ENABLE; int err; if (hdev->scanning_paused) { bt_dev_dbg(hdev, "Scanning is paused for suspend"); return 0; } err = hci_scan_disable_sync(hdev); if (err) { bt_dev_err(hdev, "disable scanning failed: %d", err); return err; } /* Set require_privacy to false since no SCAN_REQ are send * during passive scanning. Not using an non-resolvable address * here is important so that peer devices using direct * advertising with our address will be correctly reported * by the controller. */ if (hci_update_random_address_sync(hdev, false, scan_use_rpa(hdev), &own_addr_type)) return 0; if (hdev->enable_advmon_interleave_scan && hci_update_interleaved_scan_sync(hdev)) return 0; bt_dev_dbg(hdev, "interleave state %d", hdev->interleave_scan_state); /* Adding or removing entries from the accept list must * happen before enabling scanning. The controller does * not allow accept list modification while scanning. */ filter_policy = hci_update_accept_list_sync(hdev); /* When the controller is using random resolvable addresses and * with that having LE privacy enabled, then controllers with * Extended Scanner Filter Policies support can now enable support * for handling directed advertising. * * So instead of using filter polices 0x00 (no acceptlist) * and 0x01 (acceptlist enabled) use the new filter policies * 0x02 (no acceptlist) and 0x03 (acceptlist enabled). */ if (hci_dev_test_flag(hdev, HCI_PRIVACY) && (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)) filter_policy |= 0x02; if (hdev->suspended) { window = hdev->le_scan_window_suspend; interval = hdev->le_scan_int_suspend; } else if (hci_is_le_conn_scanning(hdev)) { window = hdev->le_scan_window_connect; interval = hdev->le_scan_int_connect; } else if (hci_is_adv_monitoring(hdev)) { window = hdev->le_scan_window_adv_monitor; interval = hdev->le_scan_int_adv_monitor; } else { window = hdev->le_scan_window; interval = hdev->le_scan_interval; } /* Disable all filtering for Mesh */ if (hci_dev_test_flag(hdev, HCI_MESH)) { filter_policy = 0; filter_dups = LE_SCAN_FILTER_DUP_DISABLE; } bt_dev_dbg(hdev, "LE passive scan with acceptlist = %d", filter_policy); return hci_start_scan_sync(hdev, LE_SCAN_PASSIVE, interval, window, own_addr_type, filter_policy, filter_dups); } /* This function controls the passive scanning based on hdev->pend_le_conns * list. If there are pending LE connection we start the background scanning, * otherwise we stop it in the following sequence: * * If there are devices to scan: * * Disable Scanning -> Update Accept List -> * use_ll_privacy((Disable Advertising) -> Disable Resolving List -> * Update Resolving List -> Enable Resolving List -> (Enable Advertising)) -> * Enable Scanning * * Otherwise: * * Disable Scanning */ int hci_update_passive_scan_sync(struct hci_dev *hdev) { int err; if (!test_bit(HCI_UP, &hdev->flags) || test_bit(HCI_INIT, &hdev->flags) || hci_dev_test_flag(hdev, HCI_SETUP) || hci_dev_test_flag(hdev, HCI_CONFIG) || hci_dev_test_flag(hdev, HCI_AUTO_OFF) || hci_dev_test_flag(hdev, HCI_UNREGISTER)) return 0; /* No point in doing scanning if LE support hasn't been enabled */ if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) return 0; /* If discovery is active don't interfere with it */ if (hdev->discovery.state != DISCOVERY_STOPPED) return 0; /* Reset RSSI and UUID filters when starting background scanning * since these filters are meant for service discovery only. * * The Start Discovery and Start Service Discovery operations * ensure to set proper values for RSSI threshold and UUID * filter list. So it is safe to just reset them here. */ hci_discovery_filter_clear(hdev); bt_dev_dbg(hdev, "ADV monitoring is %s", hci_is_adv_monitoring(hdev) ? "on" : "off"); if (!hci_dev_test_flag(hdev, HCI_MESH) && list_empty(&hdev->pend_le_conns) && list_empty(&hdev->pend_le_reports) && !hci_is_adv_monitoring(hdev) && !hci_dev_test_flag(hdev, HCI_PA_SYNC)) { /* If there is no pending LE connections or devices * to be scanned for or no ADV monitors, we should stop the * background scanning. */ bt_dev_dbg(hdev, "stopping background scanning"); err = hci_scan_disable_sync(hdev); if (err) bt_dev_err(hdev, "stop background scanning failed: %d", err); } else { /* If there is at least one pending LE connection, we should * keep the background scan running. */ /* If controller is connecting, we should not start scanning * since some controllers are not able to scan and connect at * the same time. */ if (hci_lookup_le_connect(hdev)) return 0; bt_dev_dbg(hdev, "start background scanning"); err = hci_passive_scan_sync(hdev); if (err) bt_dev_err(hdev, "start background scanning failed: %d", err); } return err; } static int update_scan_sync(struct hci_dev *hdev, void *data) { return hci_update_scan_sync(hdev); } int hci_update_scan(struct hci_dev *hdev) { return hci_cmd_sync_queue(hdev, update_scan_sync, NULL, NULL); } static int update_passive_scan_sync(struct hci_dev *hdev, void *data) { return hci_update_passive_scan_sync(hdev); } int hci_update_passive_scan(struct hci_dev *hdev) { /* Only queue if it would have any effect */ if (!test_bit(HCI_UP, &hdev->flags) || test_bit(HCI_INIT, &hdev->flags) || hci_dev_test_flag(hdev, HCI_SETUP) || hci_dev_test_flag(hdev, HCI_CONFIG) || hci_dev_test_flag(hdev, HCI_AUTO_OFF) || hci_dev_test_flag(hdev, HCI_UNREGISTER)) return 0; return hci_cmd_sync_queue(hdev, update_passive_scan_sync, NULL, NULL); } int hci_write_sc_support_sync(struct hci_dev *hdev, u8 val) { int err; if (!bredr_sc_enabled(hdev) || lmp_host_sc_capable(hdev)) return 0; err = __hci_cmd_sync_status(hdev, HCI_OP_WRITE_SC_SUPPORT, sizeof(val), &val, HCI_CMD_TIMEOUT); if (!err) { if (val) { hdev->features[1][0] |= LMP_HOST_SC; hci_dev_set_flag(hdev, HCI_SC_ENABLED); } else { hdev->features[1][0] &= ~LMP_HOST_SC; hci_dev_clear_flag(hdev, HCI_SC_ENABLED); } } return err; } int hci_write_ssp_mode_sync(struct hci_dev *hdev, u8 mode) { int err; if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED) || lmp_host_ssp_capable(hdev)) return 0; if (!mode && hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS)) { __hci_cmd_sync_status(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE, sizeof(mode), &mode, HCI_CMD_TIMEOUT); } err = __hci_cmd_sync_status(hdev, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode, HCI_CMD_TIMEOUT); if (err) return err; return hci_write_sc_support_sync(hdev, 0x01); } int hci_write_le_host_supported_sync(struct hci_dev *hdev, u8 le, u8 simul) { struct hci_cp_write_le_host_supported cp; if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) || !lmp_bredr_capable(hdev)) return 0; /* Check first if we already have the right host state * (host features set) */ if (le == lmp_host_le_capable(hdev) && simul == lmp_host_le_br_capable(hdev)) return 0; memset(&cp, 0, sizeof(cp)); cp.le = le; cp.simul = simul; return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp), &cp, HCI_CMD_TIMEOUT); } static int hci_powered_update_adv_sync(struct hci_dev *hdev) { struct adv_info *adv, *tmp; int err; if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) return 0; /* If RPA Resolution has not been enable yet it means the * resolving list is empty and we should attempt to program the * local IRK in order to support using own_addr_type * ADDR_LE_DEV_RANDOM_RESOLVED (0x03). */ if (!hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION)) { hci_le_add_resolve_list_sync(hdev, NULL); hci_le_set_addr_resolution_enable_sync(hdev, 0x01); } /* Make sure the controller has a good default for * advertising data. This also applies to the case * where BR/EDR was toggled during the AUTO_OFF phase. */ if (hci_dev_test_flag(hdev, HCI_ADVERTISING) || list_empty(&hdev->adv_instances)) { if (ext_adv_capable(hdev)) { err = hci_setup_ext_adv_instance_sync(hdev, 0x00); if (!err) hci_update_scan_rsp_data_sync(hdev, 0x00); } else { err = hci_update_adv_data_sync(hdev, 0x00); if (!err) hci_update_scan_rsp_data_sync(hdev, 0x00); } if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) hci_enable_advertising_sync(hdev); } /* Call for each tracked instance to be scheduled */ list_for_each_entry_safe(adv, tmp, &hdev->adv_instances, list) hci_schedule_adv_instance_sync(hdev, adv->instance, true); return 0; } static int hci_write_auth_enable_sync(struct hci_dev *hdev) { u8 link_sec; link_sec = hci_dev_test_flag(hdev, HCI_LINK_SECURITY); if (link_sec == test_bit(HCI_AUTH, &hdev->flags)) return 0; return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(link_sec), &link_sec, HCI_CMD_TIMEOUT); } int hci_write_fast_connectable_sync(struct hci_dev *hdev, bool enable) { struct hci_cp_write_page_scan_activity cp; u8 type; int err = 0; if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) return 0; if (hdev->hci_ver < BLUETOOTH_VER_1_2) return 0; memset(&cp, 0, sizeof(cp)); if (enable) { type = PAGE_SCAN_TYPE_INTERLACED; /* 160 msec page scan interval */ cp.interval = cpu_to_le16(0x0100); } else { type = hdev->def_page_scan_type; cp.interval = cpu_to_le16(hdev->def_page_scan_int); } cp.window = cpu_to_le16(hdev->def_page_scan_window); if (__cpu_to_le16(hdev->page_scan_interval) != cp.interval || __cpu_to_le16(hdev->page_scan_window) != cp.window) { err = __hci_cmd_sync_status(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY, sizeof(cp), &cp, HCI_CMD_TIMEOUT); if (err) return err; } if (hdev->page_scan_type != type) err = __hci_cmd_sync_status(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE, sizeof(type), &type, HCI_CMD_TIMEOUT); return err; } static bool disconnected_accept_list_entries(struct hci_dev *hdev) { struct bdaddr_list *b; list_for_each_entry(b, &hdev->accept_list, list) { struct hci_conn *conn; conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr); if (!conn) return true; if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG) return true; } return false; } static int hci_write_scan_enable_sync(struct hci_dev *hdev, u8 val) { return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(val), &val, HCI_CMD_TIMEOUT); } int hci_update_scan_sync(struct hci_dev *hdev) { u8 scan; if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) return 0; if (!hdev_is_powered(hdev)) return 0; if (mgmt_powering_down(hdev)) return 0; if (hdev->scanning_paused) return 0; if (hci_dev_test_flag(hdev, HCI_CONNECTABLE) || disconnected_accept_list_entries(hdev)) scan = SCAN_PAGE; else scan = SCAN_DISABLED; if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) scan |= SCAN_INQUIRY; if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE) && test_bit(HCI_ISCAN, &hdev->flags) == !!(scan & SCAN_INQUIRY)) return 0; return hci_write_scan_enable_sync(hdev, scan); } int hci_update_name_sync(struct hci_dev *hdev) { struct hci_cp_write_local_name cp; memset(&cp, 0, sizeof(cp)); memcpy(cp.name, hdev->dev_name, sizeof(cp.name)); return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp, HCI_CMD_TIMEOUT); } /* This function perform powered update HCI command sequence after the HCI init * sequence which end up resetting all states, the sequence is as follows: * * HCI_SSP_ENABLED(Enable SSP) * HCI_LE_ENABLED(Enable LE) * HCI_LE_ENABLED(use_ll_privacy(Add local IRK to Resolving List) -> * Update adv data) * Enable Authentication * lmp_bredr_capable(Set Fast Connectable -> Set Scan Type -> Set Class -> * Set Name -> Set EIR) * HCI_FORCE_STATIC_ADDR | BDADDR_ANY && !HCI_BREDR_ENABLED (Set Static Address) */ int hci_powered_update_sync(struct hci_dev *hdev) { int err; /* Register the available SMP channels (BR/EDR and LE) only when * successfully powering on the controller. This late * registration is required so that LE SMP can clearly decide if * the public address or static address is used. */ smp_register(hdev); err = hci_write_ssp_mode_sync(hdev, 0x01); if (err) return err; err = hci_write_le_host_supported_sync(hdev, 0x01, 0x00); if (err) return err; err = hci_powered_update_adv_sync(hdev); if (err) return err; err = hci_write_auth_enable_sync(hdev); if (err) return err; if (lmp_bredr_capable(hdev)) { if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) hci_write_fast_connectable_sync(hdev, true); else hci_write_fast_connectable_sync(hdev, false); hci_update_scan_sync(hdev); hci_update_class_sync(hdev); hci_update_name_sync(hdev); hci_update_eir_sync(hdev); } /* If forcing static address is in use or there is no public * address use the static address as random address (but skip * the HCI command if the current random address is already the * static one. * * In case BR/EDR has been disabled on a dual-mode controller * and a static address has been configured, then use that * address instead of the public BR/EDR address. */ if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) || (!bacmp(&hdev->bdaddr, BDADDR_ANY) && !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))) { if (bacmp(&hdev->static_addr, BDADDR_ANY)) return hci_set_random_addr_sync(hdev, &hdev->static_addr); } return 0; } /** * hci_dev_get_bd_addr_from_property - Get the Bluetooth Device Address * (BD_ADDR) for a HCI device from * a firmware node property. * @hdev: The HCI device * * Search the firmware node for 'local-bd-address'. * * All-zero BD addresses are rejected, because those could be properties * that exist in the firmware tables, but were not updated by the firmware. For * example, the DTS could define 'local-bd-address', with zero BD addresses. */ static void hci_dev_get_bd_addr_from_property(struct hci_dev *hdev) { struct fwnode_handle *fwnode = dev_fwnode(hdev->dev.parent); bdaddr_t ba; int ret; ret = fwnode_property_read_u8_array(fwnode, "local-bd-address", (u8 *)&ba, sizeof(ba)); if (ret < 0 || !bacmp(&ba, BDADDR_ANY)) return; bacpy(&hdev->public_addr, &ba); } struct hci_init_stage { int (*func)(struct hci_dev *hdev); }; /* Run init stage NULL terminated function table */ static int hci_init_stage_sync(struct hci_dev *hdev, const struct hci_init_stage *stage) { size_t i; for (i = 0; stage[i].func; i++) { int err; err = stage[i].func(hdev); if (err) return err; } return 0; } /* Read Local Version */ static int hci_read_local_version_sync(struct hci_dev *hdev) { return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL, HCI_CMD_TIMEOUT); } /* Read BD Address */ static int hci_read_bd_addr_sync(struct hci_dev *hdev) { return __hci_cmd_sync_status(hdev, HCI_OP_READ_BD_ADDR, 0, NULL, HCI_CMD_TIMEOUT); } #define HCI_INIT(_func) \ { \ .func = _func, \ } static const struct hci_init_stage hci_init0[] = { /* HCI_OP_READ_LOCAL_VERSION */ HCI_INIT(hci_read_local_version_sync), /* HCI_OP_READ_BD_ADDR */ HCI_INIT(hci_read_bd_addr_sync), {} }; int hci_reset_sync(struct hci_dev *hdev) { int err; set_bit(HCI_RESET, &hdev->flags); err = __hci_cmd_sync_status(hdev, HCI_OP_RESET, 0, NULL, HCI_CMD_TIMEOUT); if (err) return err; return 0; } static int hci_init0_sync(struct hci_dev *hdev) { int err; bt_dev_dbg(hdev, ""); /* Reset */ if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) { err = hci_reset_sync(hdev); if (err) return err; } return hci_init_stage_sync(hdev, hci_init0); } static int hci_unconf_init_sync(struct hci_dev *hdev) { int err; if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) return 0; err = hci_init0_sync(hdev); if (err < 0) return err; if (hci_dev_test_flag(hdev, HCI_SETUP)) hci_debugfs_create_basic(hdev); return 0; } /* Read Local Supported Features. */ static int hci_read_local_features_sync(struct hci_dev *hdev) { /* Not all AMP controllers support this command */ if (hdev->dev_type == HCI_AMP && !(hdev->commands[14] & 0x20)) return 0; return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL, HCI_CMD_TIMEOUT); } /* BR Controller init stage 1 command sequence */ static const struct hci_init_stage br_init1[] = { /* HCI_OP_READ_LOCAL_FEATURES */ HCI_INIT(hci_read_local_features_sync), /* HCI_OP_READ_LOCAL_VERSION */ HCI_INIT(hci_read_local_version_sync), /* HCI_OP_READ_BD_ADDR */ HCI_INIT(hci_read_bd_addr_sync), {} }; /* Read Local Commands */ static int hci_read_local_cmds_sync(struct hci_dev *hdev) { /* All Bluetooth 1.2 and later controllers should support the * HCI command for reading the local supported commands. * * Unfortunately some controllers indicate Bluetooth 1.2 support, * but do not have support for this command. If that is the case, * the driver can quirk the behavior and skip reading the local * supported commands. */ if (hdev->hci_ver > BLUETOOTH_VER_1_1 && !test_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks)) return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL, HCI_CMD_TIMEOUT); return 0; } /* Read Local AMP Info */ static int hci_read_local_amp_info_sync(struct hci_dev *hdev) { return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL, HCI_CMD_TIMEOUT); } /* Read Data Blk size */ static int hci_read_data_block_size_sync(struct hci_dev *hdev) { return __hci_cmd_sync_status(hdev, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL, HCI_CMD_TIMEOUT); } /* Read Flow Control Mode */ static int hci_read_flow_control_mode_sync(struct hci_dev *hdev) { return __hci_cmd_sync_status(hdev, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL, HCI_CMD_TIMEOUT); } /* Read Location Data */ static int hci_read_location_data_sync(struct hci_dev *hdev) { return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCATION_DATA, 0, NULL, HCI_CMD_TIMEOUT); } /* AMP Controller init stage 1 command sequence */ static const struct hci_init_stage amp_init1[] = { /* HCI_OP_READ_LOCAL_VERSION */ HCI_INIT(hci_read_local_version_sync), /* HCI_OP_READ_LOCAL_COMMANDS */ HCI_INIT(hci_read_local_cmds_sync), /* HCI_OP_READ_LOCAL_AMP_INFO */ HCI_INIT(hci_read_local_amp_info_sync), /* HCI_OP_READ_DATA_BLOCK_SIZE */ HCI_INIT(hci_read_data_block_size_sync), /* HCI_OP_READ_FLOW_CONTROL_MODE */ HCI_INIT(hci_read_flow_control_mode_sync), /* HCI_OP_READ_LOCATION_DATA */ HCI_INIT(hci_read_location_data_sync), {} }; static int hci_init1_sync(struct hci_dev *hdev) { int err; bt_dev_dbg(hdev, ""); /* Reset */ if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) { err = hci_reset_sync(hdev); if (err) return err; } switch (hdev->dev_type) { case HCI_PRIMARY: hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED; return hci_init_stage_sync(hdev, br_init1); case HCI_AMP: hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED; return hci_init_stage_sync(hdev, amp_init1); default: bt_dev_err(hdev, "Unknown device type %d", hdev->dev_type); break; } return 0; } /* AMP Controller init stage 2 command sequence */ static const struct hci_init_stage amp_init2[] = { /* HCI_OP_READ_LOCAL_FEATURES */ HCI_INIT(hci_read_local_features_sync), {} }; /* Read Buffer Size (ACL mtu, max pkt, etc.) */ static int hci_read_buffer_size_sync(struct hci_dev *hdev) { return __hci_cmd_sync_status(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL, HCI_CMD_TIMEOUT); } /* Read Class of Device */ static int hci_read_dev_class_sync(struct hci_dev *hdev) { return __hci_cmd_sync_status(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL, HCI_CMD_TIMEOUT); } /* Read Local Name */ static int hci_read_local_name_sync(struct hci_dev *hdev) { return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL, HCI_CMD_TIMEOUT); } /* Read Voice Setting */ static int hci_read_voice_setting_sync(struct hci_dev *hdev) { return __hci_cmd_sync_status(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL, HCI_CMD_TIMEOUT); } /* Read Number of Supported IAC */ static int hci_read_num_supported_iac_sync(struct hci_dev *hdev) { return __hci_cmd_sync_status(hdev, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL, HCI_CMD_TIMEOUT); } /* Read Current IAC LAP */ static int hci_read_current_iac_lap_sync(struct hci_dev *hdev) { return __hci_cmd_sync_status(hdev, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL, HCI_CMD_TIMEOUT); } static int hci_set_event_filter_sync(struct hci_dev *hdev, u8 flt_type, u8 cond_type, bdaddr_t *bdaddr, u8 auto_accept) { struct hci_cp_set_event_filter cp; if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) return 0; if (test_bit(HCI_QUIRK_BROKEN_FILTER_CLEAR_ALL, &hdev->quirks)) return 0; memset(&cp, 0, sizeof(cp)); cp.flt_type = flt_type; if (flt_type != HCI_FLT_CLEAR_ALL) { cp.cond_type = cond_type; bacpy(&cp.addr_conn_flt.bdaddr, bdaddr); cp.addr_conn_flt.auto_accept = auto_accept; } return __hci_cmd_sync_status(hdev, HCI_OP_SET_EVENT_FLT, flt_type == HCI_FLT_CLEAR_ALL ? sizeof(cp.flt_type) : sizeof(cp), &cp, HCI_CMD_TIMEOUT); } static int hci_clear_event_filter_sync(struct hci_dev *hdev) { if (!hci_dev_test_flag(hdev, HCI_EVENT_FILTER_CONFIGURED)) return 0; /* In theory the state machine should not reach here unless * a hci_set_event_filter_sync() call succeeds, but we do * the check both for parity and as a future reminder. */ if (test_bit(HCI_QUIRK_BROKEN_FILTER_CLEAR_ALL, &hdev->quirks)) return 0; return hci_set_event_filter_sync(hdev, HCI_FLT_CLEAR_ALL, 0x00, BDADDR_ANY, 0x00); } /* Connection accept timeout ~20 secs */ static int hci_write_ca_timeout_sync(struct hci_dev *hdev) { __le16 param = cpu_to_le16(0x7d00); return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_CA_TIMEOUT, sizeof(param), &param, HCI_CMD_TIMEOUT); } /* BR Controller init stage 2 command sequence */ static const struct hci_init_stage br_init2[] = { /* HCI_OP_READ_BUFFER_SIZE */ HCI_INIT(hci_read_buffer_size_sync), /* HCI_OP_READ_CLASS_OF_DEV */ HCI_INIT(hci_read_dev_class_sync), /* HCI_OP_READ_LOCAL_NAME */ HCI_INIT(hci_read_local_name_sync), /* HCI_OP_READ_VOICE_SETTING */ HCI_INIT(hci_read_voice_setting_sync), /* HCI_OP_READ_NUM_SUPPORTED_IAC */ HCI_INIT(hci_read_num_supported_iac_sync), /* HCI_OP_READ_CURRENT_IAC_LAP */ HCI_INIT(hci_read_current_iac_lap_sync), /* HCI_OP_SET_EVENT_FLT */ HCI_INIT(hci_clear_event_filter_sync), /* HCI_OP_WRITE_CA_TIMEOUT */ HCI_INIT(hci_write_ca_timeout_sync), {} }; static int hci_write_ssp_mode_1_sync(struct hci_dev *hdev) { u8 mode = 0x01; if (!lmp_ssp_capable(hdev) || !hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) return 0; /* When SSP is available, then the host features page * should also be available as well. However some * controllers list the max_page as 0 as long as SSP * has not been enabled. To achieve proper debugging * output, force the minimum max_page to 1 at least. */ hdev->max_page = 0x01; return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode, HCI_CMD_TIMEOUT); } static int hci_write_eir_sync(struct hci_dev *hdev) { struct hci_cp_write_eir cp; if (!lmp_ssp_capable(hdev) || hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) return 0; memset(hdev->eir, 0, sizeof(hdev->eir)); memset(&cp, 0, sizeof(cp)); return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_EIR, sizeof(cp), &cp, HCI_CMD_TIMEOUT); } static int hci_write_inquiry_mode_sync(struct hci_dev *hdev) { u8 mode; if (!lmp_inq_rssi_capable(hdev) && !test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks)) return 0; /* If Extended Inquiry Result events are supported, then * they are clearly preferred over Inquiry Result with RSSI * events. */ mode = lmp_ext_inq_capable(hdev) ? 0x02 : 0x01; return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_INQUIRY_MODE, sizeof(mode), &mode, HCI_CMD_TIMEOUT); } static int hci_read_inq_rsp_tx_power_sync(struct hci_dev *hdev) { if (!lmp_inq_tx_pwr_capable(hdev)) return 0; return __hci_cmd_sync_status(hdev, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL, HCI_CMD_TIMEOUT); } static int hci_read_local_ext_features_sync(struct hci_dev *hdev, u8 page) { struct hci_cp_read_local_ext_features cp; if (!lmp_ext_feat_capable(hdev)) return 0; memset(&cp, 0, sizeof(cp)); cp.page = page; return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCAL_EXT_FEATURES, sizeof(cp), &cp, HCI_CMD_TIMEOUT); } static int hci_read_local_ext_features_1_sync(struct hci_dev *hdev) { return hci_read_local_ext_features_sync(hdev, 0x01); } /* HCI Controller init stage 2 command sequence */ static const struct hci_init_stage hci_init2[] = { /* HCI_OP_READ_LOCAL_COMMANDS */ HCI_INIT(hci_read_local_cmds_sync), /* HCI_OP_WRITE_SSP_MODE */ HCI_INIT(hci_write_ssp_mode_1_sync), /* HCI_OP_WRITE_EIR */ HCI_INIT(hci_write_eir_sync), /* HCI_OP_WRITE_INQUIRY_MODE */ HCI_INIT(hci_write_inquiry_mode_sync), /* HCI_OP_READ_INQ_RSP_TX_POWER */ HCI_INIT(hci_read_inq_rsp_tx_power_sync), /* HCI_OP_READ_LOCAL_EXT_FEATURES */ HCI_INIT(hci_read_local_ext_features_1_sync), /* HCI_OP_WRITE_AUTH_ENABLE */ HCI_INIT(hci_write_auth_enable_sync), {} }; /* Read LE Buffer Size */ static int hci_le_read_buffer_size_sync(struct hci_dev *hdev) { /* Use Read LE Buffer Size V2 if supported */ if (iso_capable(hdev) && hdev->commands[41] & 0x20) return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_BUFFER_SIZE_V2, 0, NULL, HCI_CMD_TIMEOUT); return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL, HCI_CMD_TIMEOUT); } /* Read LE Local Supported Features */ static int hci_le_read_local_features_sync(struct hci_dev *hdev) { return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL, HCI_CMD_TIMEOUT); } /* Read LE Supported States */ static int hci_le_read_supported_states_sync(struct hci_dev *hdev) { return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL, HCI_CMD_TIMEOUT); } /* LE Controller init stage 2 command sequence */ static const struct hci_init_stage le_init2[] = { /* HCI_OP_LE_READ_LOCAL_FEATURES */ HCI_INIT(hci_le_read_local_features_sync), /* HCI_OP_LE_READ_BUFFER_SIZE */ HCI_INIT(hci_le_read_buffer_size_sync), /* HCI_OP_LE_READ_SUPPORTED_STATES */ HCI_INIT(hci_le_read_supported_states_sync), {} }; static int hci_init2_sync(struct hci_dev *hdev) { int err; bt_dev_dbg(hdev, ""); if (hdev->dev_type == HCI_AMP) return hci_init_stage_sync(hdev, amp_init2); err = hci_init_stage_sync(hdev, hci_init2); if (err) return err; if (lmp_bredr_capable(hdev)) { err = hci_init_stage_sync(hdev, br_init2); if (err) return err; } else { hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED); } if (lmp_le_capable(hdev)) { err = hci_init_stage_sync(hdev, le_init2); if (err) return err; /* LE-only controllers have LE implicitly enabled */ if (!lmp_bredr_capable(hdev)) hci_dev_set_flag(hdev, HCI_LE_ENABLED); } return 0; } static int hci_set_event_mask_sync(struct hci_dev *hdev) { /* The second byte is 0xff instead of 0x9f (two reserved bits * disabled) since a Broadcom 1.2 dongle doesn't respond to the * command otherwise. */ u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 }; /* CSR 1.1 dongles does not accept any bitfield so don't try to set * any event mask for pre 1.2 devices. */ if (hdev->hci_ver < BLUETOOTH_VER_1_2) return 0; if (lmp_bredr_capable(hdev)) { events[4] |= 0x01; /* Flow Specification Complete */ /* Don't set Disconnect Complete when suspended as that * would wakeup the host when disconnecting due to * suspend. */ if (hdev->suspended) events[0] &= 0xef; } else { /* Use a different default for LE-only devices */ memset(events, 0, sizeof(events)); events[1] |= 0x20; /* Command Complete */ events[1] |= 0x40; /* Command Status */ events[1] |= 0x80; /* Hardware Error */ /* If the controller supports the Disconnect command, enable * the corresponding event. In addition enable packet flow * control related events. */ if (hdev->commands[0] & 0x20) { /* Don't set Disconnect Complete when suspended as that * would wakeup the host when disconnecting due to * suspend. */ if (!hdev->suspended) events[0] |= 0x10; /* Disconnection Complete */ events[2] |= 0x04; /* Number of Completed Packets */ events[3] |= 0x02; /* Data Buffer Overflow */ } /* If the controller supports the Read Remote Version * Information command, enable the corresponding event. */ if (hdev->commands[2] & 0x80) events[1] |= 0x08; /* Read Remote Version Information * Complete */ if (hdev->le_features[0] & HCI_LE_ENCRYPTION) { events[0] |= 0x80; /* Encryption Change */ events[5] |= 0x80; /* Encryption Key Refresh Complete */ } } if (lmp_inq_rssi_capable(hdev) || test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks)) events[4] |= 0x02; /* Inquiry Result with RSSI */ if (lmp_ext_feat_capable(hdev)) events[4] |= 0x04; /* Read Remote Extended Features Complete */ if (lmp_esco_capable(hdev)) { events[5] |= 0x08; /* Synchronous Connection Complete */ events[5] |= 0x10; /* Synchronous Connection Changed */ } if (lmp_sniffsubr_capable(hdev)) events[5] |= 0x20; /* Sniff Subrating */ if (lmp_pause_enc_capable(hdev)) events[5] |= 0x80; /* Encryption Key Refresh Complete */ if (lmp_ext_inq_capable(hdev)) events[5] |= 0x40; /* Extended Inquiry Result */ if (lmp_no_flush_capable(hdev)) events[7] |= 0x01; /* Enhanced Flush Complete */ if (lmp_lsto_capable(hdev)) events[6] |= 0x80; /* Link Supervision Timeout Changed */ if (lmp_ssp_capable(hdev)) { events[6] |= 0x01; /* IO Capability Request */ events[6] |= 0x02; /* IO Capability Response */ events[6] |= 0x04; /* User Confirmation Request */ events[6] |= 0x08; /* User Passkey Request */ events[6] |= 0x10; /* Remote OOB Data Request */ events[6] |= 0x20; /* Simple Pairing Complete */ events[7] |= 0x04; /* User Passkey Notification */ events[7] |= 0x08; /* Keypress Notification */ events[7] |= 0x10; /* Remote Host Supported * Features Notification */ } if (lmp_le_capable(hdev)) events[7] |= 0x20; /* LE Meta-Event */ return __hci_cmd_sync_status(hdev, HCI_OP_SET_EVENT_MASK, sizeof(events), events, HCI_CMD_TIMEOUT); } static int hci_read_stored_link_key_sync(struct hci_dev *hdev) { struct hci_cp_read_stored_link_key cp; if (!(hdev->commands[6] & 0x20) || test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) return 0; memset(&cp, 0, sizeof(cp)); bacpy(&cp.bdaddr, BDADDR_ANY); cp.read_all = 0x01; return __hci_cmd_sync_status(hdev, HCI_OP_READ_STORED_LINK_KEY, sizeof(cp), &cp, HCI_CMD_TIMEOUT); } static int hci_setup_link_policy_sync(struct hci_dev *hdev) { struct hci_cp_write_def_link_policy cp; u16 link_policy = 0; if (!(hdev->commands[5] & 0x10)) return 0; memset(&cp, 0, sizeof(cp)); if (lmp_rswitch_capable(hdev)) link_policy |= HCI_LP_RSWITCH; if (lmp_hold_capable(hdev)) link_policy |= HCI_LP_HOLD; if (lmp_sniff_capable(hdev)) link_policy |= HCI_LP_SNIFF; if (lmp_park_capable(hdev)) link_policy |= HCI_LP_PARK; cp.policy = cpu_to_le16(link_policy); return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp, HCI_CMD_TIMEOUT); } static int hci_read_page_scan_activity_sync(struct hci_dev *hdev) { if (!(hdev->commands[8] & 0x01)) return 0; return __hci_cmd_sync_status(hdev, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL, HCI_CMD_TIMEOUT); } static int hci_read_def_err_data_reporting_sync(struct hci_dev *hdev) { if (!(hdev->commands[18] & 0x04) || !(hdev->features[0][6] & LMP_ERR_DATA_REPORTING) || test_bit(HCI_QUIRK_BROKEN_ERR_DATA_REPORTING, &hdev->quirks)) return 0; return __hci_cmd_sync_status(hdev, HCI_OP_READ_DEF_ERR_DATA_REPORTING, 0, NULL, HCI_CMD_TIMEOUT); } static int hci_read_page_scan_type_sync(struct hci_dev *hdev) { /* Some older Broadcom based Bluetooth 1.2 controllers do not * support the Read Page Scan Type command. Check support for * this command in the bit mask of supported commands. */ if (!(hdev->commands[13] & 0x01)) return 0; return __hci_cmd_sync_status(hdev, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL, HCI_CMD_TIMEOUT); } /* Read features beyond page 1 if available */ static int hci_read_local_ext_features_all_sync(struct hci_dev *hdev) { u8 page; int err; if (!lmp_ext_feat_capable(hdev)) return 0; for (page = 2; page < HCI_MAX_PAGES && page <= hdev->max_page; page++) { err = hci_read_local_ext_features_sync(hdev, page); if (err) return err; } return 0; } /* HCI Controller init stage 3 command sequence */ static const struct hci_init_stage hci_init3[] = { /* HCI_OP_SET_EVENT_MASK */ HCI_INIT(hci_set_event_mask_sync), /* HCI_OP_READ_STORED_LINK_KEY */ HCI_INIT(hci_read_stored_link_key_sync), /* HCI_OP_WRITE_DEF_LINK_POLICY */ HCI_INIT(hci_setup_link_policy_sync), /* HCI_OP_READ_PAGE_SCAN_ACTIVITY */ HCI_INIT(hci_read_page_scan_activity_sync), /* HCI_OP_READ_DEF_ERR_DATA_REPORTING */ HCI_INIT(hci_read_def_err_data_reporting_sync), /* HCI_OP_READ_PAGE_SCAN_TYPE */ HCI_INIT(hci_read_page_scan_type_sync), /* HCI_OP_READ_LOCAL_EXT_FEATURES */ HCI_INIT(hci_read_local_ext_features_all_sync), {} }; static int hci_le_set_event_mask_sync(struct hci_dev *hdev) { u8 events[8]; if (!lmp_le_capable(hdev)) return 0; memset(events, 0, sizeof(events)); if (hdev->le_features[0] & HCI_LE_ENCRYPTION) events[0] |= 0x10; /* LE Long Term Key Request */ /* If controller supports the Connection Parameters Request * Link Layer Procedure, enable the corresponding event. */ if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC) /* LE Remote Connection Parameter Request */ events[0] |= 0x20; /* If the controller supports the Data Length Extension * feature, enable the corresponding event. */ if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) events[0] |= 0x40; /* LE Data Length Change */ /* If the controller supports LL Privacy feature or LE Extended Adv, * enable the corresponding event. */ if (use_enhanced_conn_complete(hdev)) events[1] |= 0x02; /* LE Enhanced Connection Complete */ /* If the controller supports Extended Scanner Filter * Policies, enable the corresponding event. */ if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY) events[1] |= 0x04; /* LE Direct Advertising Report */ /* If the controller supports Channel Selection Algorithm #2 * feature, enable the corresponding event. */ if (hdev->le_features[1] & HCI_LE_CHAN_SEL_ALG2) events[2] |= 0x08; /* LE Channel Selection Algorithm */ /* If the controller supports the LE Set Scan Enable command, * enable the corresponding advertising report event. */ if (hdev->commands[26] & 0x08) events[0] |= 0x02; /* LE Advertising Report */ /* If the controller supports the LE Create Connection * command, enable the corresponding event. */ if (hdev->commands[26] & 0x10) events[0] |= 0x01; /* LE Connection Complete */ /* If the controller supports the LE Connection Update * command, enable the corresponding event. */ if (hdev->commands[27] & 0x04) events[0] |= 0x04; /* LE Connection Update Complete */ /* If the controller supports the LE Read Remote Used Features * command, enable the corresponding event. */ if (hdev->commands[27] & 0x20) /* LE Read Remote Used Features Complete */ events[0] |= 0x08; /* If the controller supports the LE Read Local P-256 * Public Key command, enable the corresponding event. */ if (hdev->commands[34] & 0x02) /* LE Read Local P-256 Public Key Complete */ events[0] |= 0x80; /* If the controller supports the LE Generate DHKey * command, enable the corresponding event. */ if (hdev->commands[34] & 0x04) events[1] |= 0x01; /* LE Generate DHKey Complete */ /* If the controller supports the LE Set Default PHY or * LE Set PHY commands, enable the corresponding event. */ if (hdev->commands[35] & (0x20 | 0x40)) events[1] |= 0x08; /* LE PHY Update Complete */ /* If the controller supports LE Set Extended Scan Parameters * and LE Set Extended Scan Enable commands, enable the * corresponding event. */ if (use_ext_scan(hdev)) events[1] |= 0x10; /* LE Extended Advertising Report */ /* If the controller supports the LE Extended Advertising * command, enable the corresponding event. */ if (ext_adv_capable(hdev)) events[2] |= 0x02; /* LE Advertising Set Terminated */ if (cis_capable(hdev)) { events[3] |= 0x01; /* LE CIS Established */ if (cis_peripheral_capable(hdev)) events[3] |= 0x02; /* LE CIS Request */ } if (bis_capable(hdev)) { events[1] |= 0x20; /* LE PA Report */ events[1] |= 0x40; /* LE PA Sync Established */ events[3] |= 0x04; /* LE Create BIG Complete */ events[3] |= 0x08; /* LE Terminate BIG Complete */ events[3] |= 0x10; /* LE BIG Sync Established */ events[3] |= 0x20; /* LE BIG Sync Loss */ events[4] |= 0x02; /* LE BIG Info Advertising Report */ } return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EVENT_MASK, sizeof(events), events, HCI_CMD_TIMEOUT); } /* Read LE Advertising Channel TX Power */ static int hci_le_read_adv_tx_power_sync(struct hci_dev *hdev) { if ((hdev->commands[25] & 0x40) && !ext_adv_capable(hdev)) { /* HCI TS spec forbids mixing of legacy and extended * advertising commands wherein READ_ADV_TX_POWER is * also included. So do not call it if extended adv * is supported otherwise controller will return * COMMAND_DISALLOWED for extended commands. */ return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL, HCI_CMD_TIMEOUT); } return 0; } /* Read LE Min/Max Tx Power*/ static int hci_le_read_tx_power_sync(struct hci_dev *hdev) { if (!(hdev->commands[38] & 0x80) || test_bit(HCI_QUIRK_BROKEN_READ_TRANSMIT_POWER, &hdev->quirks)) return 0; return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_TRANSMIT_POWER, 0, NULL, HCI_CMD_TIMEOUT); } /* Read LE Accept List Size */ static int hci_le_read_accept_list_size_sync(struct hci_dev *hdev) { if (!(hdev->commands[26] & 0x40)) return 0; return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_ACCEPT_LIST_SIZE, 0, NULL, HCI_CMD_TIMEOUT); } /* Clear LE Accept List */ static int hci_le_clear_accept_list_sync(struct hci_dev *hdev) { if (!(hdev->commands[26] & 0x80)) return 0; return __hci_cmd_sync_status(hdev, HCI_OP_LE_CLEAR_ACCEPT_LIST, 0, NULL, HCI_CMD_TIMEOUT); } /* Read LE Resolving List Size */ static int hci_le_read_resolv_list_size_sync(struct hci_dev *hdev) { if (!(hdev->commands[34] & 0x40)) return 0; return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_RESOLV_LIST_SIZE, 0, NULL, HCI_CMD_TIMEOUT); } /* Clear LE Resolving List */ static int hci_le_clear_resolv_list_sync(struct hci_dev *hdev) { if (!(hdev->commands[34] & 0x20)) return 0; return __hci_cmd_sync_status(hdev, HCI_OP_LE_CLEAR_RESOLV_LIST, 0, NULL, HCI_CMD_TIMEOUT); } /* Set RPA timeout */ static int hci_le_set_rpa_timeout_sync(struct hci_dev *hdev) { __le16 timeout = cpu_to_le16(hdev->rpa_timeout); if (!(hdev->commands[35] & 0x04) || test_bit(HCI_QUIRK_BROKEN_SET_RPA_TIMEOUT, &hdev->quirks)) return 0; return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_RPA_TIMEOUT, sizeof(timeout), &timeout, HCI_CMD_TIMEOUT); } /* Read LE Maximum Data Length */ static int hci_le_read_max_data_len_sync(struct hci_dev *hdev) { if (!(hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)) return 0; return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL, HCI_CMD_TIMEOUT); } /* Read LE Suggested Default Data Length */ static int hci_le_read_def_data_len_sync(struct hci_dev *hdev) { if (!(hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)) return 0; return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL, HCI_CMD_TIMEOUT); } /* Read LE Number of Supported Advertising Sets */ static int hci_le_read_num_support_adv_sets_sync(struct hci_dev *hdev) { if (!ext_adv_capable(hdev)) return 0; return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS, 0, NULL, HCI_CMD_TIMEOUT); } /* Write LE Host Supported */ static int hci_set_le_support_sync(struct hci_dev *hdev) { struct hci_cp_write_le_host_supported cp; /* LE-only devices do not support explicit enablement */ if (!lmp_bredr_capable(hdev)) return 0; memset(&cp, 0, sizeof(cp)); if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) { cp.le = 0x01; cp.simul = 0x00; } if (cp.le == lmp_host_le_capable(hdev)) return 0; return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp), &cp, HCI_CMD_TIMEOUT); } /* LE Set Host Feature */ static int hci_le_set_host_feature_sync(struct hci_dev *hdev) { struct hci_cp_le_set_host_feature cp; if (!iso_capable(hdev)) return 0; memset(&cp, 0, sizeof(cp)); /* Isochronous Channels (Host Support) */ cp.bit_number = 32; cp.bit_value = 1; return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_HOST_FEATURE, sizeof(cp), &cp, HCI_CMD_TIMEOUT); } /* LE Controller init stage 3 command sequence */ static const struct hci_init_stage le_init3[] = { /* HCI_OP_LE_SET_EVENT_MASK */ HCI_INIT(hci_le_set_event_mask_sync), /* HCI_OP_LE_READ_ADV_TX_POWER */ HCI_INIT(hci_le_read_adv_tx_power_sync), /* HCI_OP_LE_READ_TRANSMIT_POWER */ HCI_INIT(hci_le_read_tx_power_sync), /* HCI_OP_LE_READ_ACCEPT_LIST_SIZE */ HCI_INIT(hci_le_read_accept_list_size_sync), /* HCI_OP_LE_CLEAR_ACCEPT_LIST */ HCI_INIT(hci_le_clear_accept_list_sync), /* HCI_OP_LE_READ_RESOLV_LIST_SIZE */ HCI_INIT(hci_le_read_resolv_list_size_sync), /* HCI_OP_LE_CLEAR_RESOLV_LIST */ HCI_INIT(hci_le_clear_resolv_list_sync), /* HCI_OP_LE_SET_RPA_TIMEOUT */ HCI_INIT(hci_le_set_rpa_timeout_sync), /* HCI_OP_LE_READ_MAX_DATA_LEN */ HCI_INIT(hci_le_read_max_data_len_sync), /* HCI_OP_LE_READ_DEF_DATA_LEN */ HCI_INIT(hci_le_read_def_data_len_sync), /* HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS */ HCI_INIT(hci_le_read_num_support_adv_sets_sync), /* HCI_OP_WRITE_LE_HOST_SUPPORTED */ HCI_INIT(hci_set_le_support_sync), /* HCI_OP_LE_SET_HOST_FEATURE */ HCI_INIT(hci_le_set_host_feature_sync), {} }; static int hci_init3_sync(struct hci_dev *hdev) { int err; bt_dev_dbg(hdev, ""); err = hci_init_stage_sync(hdev, hci_init3); if (err) return err; if (lmp_le_capable(hdev)) return hci_init_stage_sync(hdev, le_init3); return 0; } static int hci_delete_stored_link_key_sync(struct hci_dev *hdev) { struct hci_cp_delete_stored_link_key cp; /* Some Broadcom based Bluetooth controllers do not support the * Delete Stored Link Key command. They are clearly indicating its * absence in the bit mask of supported commands. * * Check the supported commands and only if the command is marked * as supported send it. If not supported assume that the controller * does not have actual support for stored link keys which makes this * command redundant anyway. * * Some controllers indicate that they support handling deleting * stored link keys, but they don't. The quirk lets a driver * just disable this command. */ if (!(hdev->commands[6] & 0x80) || test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) return 0; memset(&cp, 0, sizeof(cp)); bacpy(&cp.bdaddr, BDADDR_ANY); cp.delete_all = 0x01; return __hci_cmd_sync_status(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp, HCI_CMD_TIMEOUT); } static int hci_set_event_mask_page_2_sync(struct hci_dev *hdev) { u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }; bool changed = false; /* Set event mask page 2 if the HCI command for it is supported */ if (!(hdev->commands[22] & 0x04)) return 0; /* If Connectionless Peripheral Broadcast central role is supported * enable all necessary events for it. */ if (lmp_cpb_central_capable(hdev)) { events[1] |= 0x40; /* Triggered Clock Capture */ events[1] |= 0x80; /* Synchronization Train Complete */ events[2] |= 0x08; /* Truncated Page Complete */ events[2] |= 0x20; /* CPB Channel Map Change */ changed = true; } /* If Connectionless Peripheral Broadcast peripheral role is supported * enable all necessary events for it. */ if (lmp_cpb_peripheral_capable(hdev)) { events[2] |= 0x01; /* Synchronization Train Received */ events[2] |= 0x02; /* CPB Receive */ events[2] |= 0x04; /* CPB Timeout */ events[2] |= 0x10; /* Peripheral Page Response Timeout */ changed = true; } /* Enable Authenticated Payload Timeout Expired event if supported */ if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING) { events[2] |= 0x80; changed = true; } /* Some Broadcom based controllers indicate support for Set Event * Mask Page 2 command, but then actually do not support it. Since * the default value is all bits set to zero, the command is only * required if the event mask has to be changed. In case no change * to the event mask is needed, skip this command. */ if (!changed) return 0; return __hci_cmd_sync_status(hdev, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events, HCI_CMD_TIMEOUT); } /* Read local codec list if the HCI command is supported */ static int hci_read_local_codecs_sync(struct hci_dev *hdev) { if (hdev->commands[45] & 0x04) hci_read_supported_codecs_v2(hdev); else if (hdev->commands[29] & 0x20) hci_read_supported_codecs(hdev); return 0; } /* Read local pairing options if the HCI command is supported */ static int hci_read_local_pairing_opts_sync(struct hci_dev *hdev) { if (!(hdev->commands[41] & 0x08)) return 0; return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCAL_PAIRING_OPTS, 0, NULL, HCI_CMD_TIMEOUT); } /* Get MWS transport configuration if the HCI command is supported */ static int hci_get_mws_transport_config_sync(struct hci_dev *hdev) { if (!mws_transport_config_capable(hdev)) return 0; return __hci_cmd_sync_status(hdev, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL, HCI_CMD_TIMEOUT); } /* Check for Synchronization Train support */ static int hci_read_sync_train_params_sync(struct hci_dev *hdev) { if (!lmp_sync_train_capable(hdev)) return 0; return __hci_cmd_sync_status(hdev, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL, HCI_CMD_TIMEOUT); } /* Enable Secure Connections if supported and configured */ static int hci_write_sc_support_1_sync(struct hci_dev *hdev) { u8 support = 0x01; if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED) || !bredr_sc_enabled(hdev)) return 0; return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_SC_SUPPORT, sizeof(support), &support, HCI_CMD_TIMEOUT); } /* Set erroneous data reporting if supported to the wideband speech * setting value */ static int hci_set_err_data_report_sync(struct hci_dev *hdev) { struct hci_cp_write_def_err_data_reporting cp; bool enabled = hci_dev_test_flag(hdev, HCI_WIDEBAND_SPEECH_ENABLED); if (!(hdev->commands[18] & 0x08) || !(hdev->features[0][6] & LMP_ERR_DATA_REPORTING) || test_bit(HCI_QUIRK_BROKEN_ERR_DATA_REPORTING, &hdev->quirks)) return 0; if (enabled == hdev->err_data_reporting) return 0; memset(&cp, 0, sizeof(cp)); cp.err_data_reporting = enabled ? ERR_DATA_REPORTING_ENABLED : ERR_DATA_REPORTING_DISABLED; return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_DEF_ERR_DATA_REPORTING, sizeof(cp), &cp, HCI_CMD_TIMEOUT); } static const struct hci_init_stage hci_init4[] = { /* HCI_OP_DELETE_STORED_LINK_KEY */ HCI_INIT(hci_delete_stored_link_key_sync), /* HCI_OP_SET_EVENT_MASK_PAGE_2 */ HCI_INIT(hci_set_event_mask_page_2_sync), /* HCI_OP_READ_LOCAL_CODECS */ HCI_INIT(hci_read_local_codecs_sync), /* HCI_OP_READ_LOCAL_PAIRING_OPTS */ HCI_INIT(hci_read_local_pairing_opts_sync), /* HCI_OP_GET_MWS_TRANSPORT_CONFIG */ HCI_INIT(hci_get_mws_transport_config_sync), /* HCI_OP_READ_SYNC_TRAIN_PARAMS */ HCI_INIT(hci_read_sync_train_params_sync), /* HCI_OP_WRITE_SC_SUPPORT */ HCI_INIT(hci_write_sc_support_1_sync), /* HCI_OP_WRITE_DEF_ERR_DATA_REPORTING */ HCI_INIT(hci_set_err_data_report_sync), {} }; /* Set Suggested Default Data Length to maximum if supported */ static int hci_le_set_write_def_data_len_sync(struct hci_dev *hdev) { struct hci_cp_le_write_def_data_len cp; if (!(hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)) return 0; memset(&cp, 0, sizeof(cp)); cp.tx_len = cpu_to_le16(hdev->le_max_tx_len); cp.tx_time = cpu_to_le16(hdev->le_max_tx_time); return __hci_cmd_sync_status(hdev, HCI_OP_LE_WRITE_DEF_DATA_LEN, sizeof(cp), &cp, HCI_CMD_TIMEOUT); } /* Set Default PHY parameters if command is supported, enables all supported * PHYs according to the LE Features bits. */ static int hci_le_set_default_phy_sync(struct hci_dev *hdev) { struct hci_cp_le_set_default_phy cp; if (!(hdev->commands[35] & 0x20)) { /* If the command is not supported it means only 1M PHY is * supported. */ hdev->le_tx_def_phys = HCI_LE_SET_PHY_1M; hdev->le_rx_def_phys = HCI_LE_SET_PHY_1M; return 0; } memset(&cp, 0, sizeof(cp)); cp.all_phys = 0x00; cp.tx_phys = HCI_LE_SET_PHY_1M; cp.rx_phys = HCI_LE_SET_PHY_1M; /* Enables 2M PHY if supported */ if (le_2m_capable(hdev)) { cp.tx_phys |= HCI_LE_SET_PHY_2M; cp.rx_phys |= HCI_LE_SET_PHY_2M; } /* Enables Coded PHY if supported */ if (le_coded_capable(hdev)) { cp.tx_phys |= HCI_LE_SET_PHY_CODED; cp.rx_phys |= HCI_LE_SET_PHY_CODED; } return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_DEFAULT_PHY, sizeof(cp), &cp, HCI_CMD_TIMEOUT); } static const struct hci_init_stage le_init4[] = { /* HCI_OP_LE_WRITE_DEF_DATA_LEN */ HCI_INIT(hci_le_set_write_def_data_len_sync), /* HCI_OP_LE_SET_DEFAULT_PHY */ HCI_INIT(hci_le_set_default_phy_sync), {} }; static int hci_init4_sync(struct hci_dev *hdev) { int err; bt_dev_dbg(hdev, ""); err = hci_init_stage_sync(hdev, hci_init4); if (err) return err; if (lmp_le_capable(hdev)) return hci_init_stage_sync(hdev, le_init4); return 0; } static int hci_init_sync(struct hci_dev *hdev) { int err; err = hci_init1_sync(hdev); if (err < 0) return err; if (hci_dev_test_flag(hdev, HCI_SETUP)) hci_debugfs_create_basic(hdev); err = hci_init2_sync(hdev); if (err < 0) return err; /* HCI_PRIMARY covers both single-mode LE, BR/EDR and dual-mode * BR/EDR/LE type controllers. AMP controllers only need the * first two stages of init. */ if (hdev->dev_type != HCI_PRIMARY) return 0; err = hci_init3_sync(hdev); if (err < 0) return err; err = hci_init4_sync(hdev); if (err < 0) return err; /* This function is only called when the controller is actually in * configured state. When the controller is marked as unconfigured, * this initialization procedure is not run. * * It means that it is possible that a controller runs through its * setup phase and then discovers missing settings. If that is the * case, then this function will not be called. It then will only * be called during the config phase. * * So only when in setup phase or config phase, create the debugfs * entries and register the SMP channels. */ if (!hci_dev_test_flag(hdev, HCI_SETUP) && !hci_dev_test_flag(hdev, HCI_CONFIG)) return 0; if (hci_dev_test_and_set_flag(hdev, HCI_DEBUGFS_CREATED)) return 0; hci_debugfs_create_common(hdev); if (lmp_bredr_capable(hdev)) hci_debugfs_create_bredr(hdev); if (lmp_le_capable(hdev)) hci_debugfs_create_le(hdev); return 0; } #define HCI_QUIRK_BROKEN(_quirk, _desc) { HCI_QUIRK_BROKEN_##_quirk, _desc } static const struct { unsigned long quirk; const char *desc; } hci_broken_table[] = { HCI_QUIRK_BROKEN(LOCAL_COMMANDS, "HCI Read Local Supported Commands not supported"), HCI_QUIRK_BROKEN(STORED_LINK_KEY, "HCI Delete Stored Link Key command is advertised, " "but not supported."), HCI_QUIRK_BROKEN(ERR_DATA_REPORTING, "HCI Read Default Erroneous Data Reporting command is " "advertised, but not supported."), HCI_QUIRK_BROKEN(READ_TRANSMIT_POWER, "HCI Read Transmit Power Level command is advertised, " "but not supported."), HCI_QUIRK_BROKEN(FILTER_CLEAR_ALL, "HCI Set Event Filter command not supported."), HCI_QUIRK_BROKEN(ENHANCED_SETUP_SYNC_CONN, "HCI Enhanced Setup Synchronous Connection command is " "advertised, but not supported."), HCI_QUIRK_BROKEN(SET_RPA_TIMEOUT, "HCI LE Set Random Private Address Timeout command is " "advertised, but not supported."), HCI_QUIRK_BROKEN(LE_CODED, "HCI LE Coded PHY feature bit is set, " "but its usage is not supported.") }; /* This function handles hdev setup stage: * * Calls hdev->setup * Setup address if HCI_QUIRK_USE_BDADDR_PROPERTY is set. */ static int hci_dev_setup_sync(struct hci_dev *hdev) { int ret = 0; bool invalid_bdaddr; size_t i; if (!hci_dev_test_flag(hdev, HCI_SETUP) && !test_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks)) return 0; bt_dev_dbg(hdev, ""); hci_sock_dev_event(hdev, HCI_DEV_SETUP); if (hdev->setup) ret = hdev->setup(hdev); for (i = 0; i < ARRAY_SIZE(hci_broken_table); i++) { if (test_bit(hci_broken_table[i].quirk, &hdev->quirks)) bt_dev_warn(hdev, "%s", hci_broken_table[i].desc); } /* The transport driver can set the quirk to mark the * BD_ADDR invalid before creating the HCI device or in * its setup callback. */ invalid_bdaddr = test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) || test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks); if (!ret) { if (test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks) && !bacmp(&hdev->public_addr, BDADDR_ANY)) hci_dev_get_bd_addr_from_property(hdev); if (invalid_bdaddr && bacmp(&hdev->public_addr, BDADDR_ANY) && hdev->set_bdaddr) { ret = hdev->set_bdaddr(hdev, &hdev->public_addr); if (!ret) invalid_bdaddr = false; } } /* The transport driver can set these quirks before * creating the HCI device or in its setup callback. * * For the invalid BD_ADDR quirk it is possible that * it becomes a valid address if the bootloader does * provide it (see above). * * In case any of them is set, the controller has to * start up as unconfigured. */ if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) || invalid_bdaddr) hci_dev_set_flag(hdev, HCI_UNCONFIGURED); /* For an unconfigured controller it is required to * read at least the version information provided by * the Read Local Version Information command. * * If the set_bdaddr driver callback is provided, then * also the original Bluetooth public device address * will be read using the Read BD Address command. */ if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) return hci_unconf_init_sync(hdev); return ret; } /* This function handles hdev init stage: * * Calls hci_dev_setup_sync to perform setup stage * Calls hci_init_sync to perform HCI command init sequence */ static int hci_dev_init_sync(struct hci_dev *hdev) { int ret; bt_dev_dbg(hdev, ""); atomic_set(&hdev->cmd_cnt, 1); set_bit(HCI_INIT, &hdev->flags); ret = hci_dev_setup_sync(hdev); if (hci_dev_test_flag(hdev, HCI_CONFIG)) { /* If public address change is configured, ensure that * the address gets programmed. If the driver does not * support changing the public address, fail the power * on procedure. */ if (bacmp(&hdev->public_addr, BDADDR_ANY) && hdev->set_bdaddr) ret = hdev->set_bdaddr(hdev, &hdev->public_addr); else ret = -EADDRNOTAVAIL; } if (!ret) { if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED) && !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) { ret = hci_init_sync(hdev); if (!ret && hdev->post_init) ret = hdev->post_init(hdev); } } /* If the HCI Reset command is clearing all diagnostic settings, * then they need to be reprogrammed after the init procedure * completed. */ if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) && !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) && hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) && hdev->set_diag) ret = hdev->set_diag(hdev, true); if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) { msft_do_open(hdev); aosp_do_open(hdev); } clear_bit(HCI_INIT, &hdev->flags); return ret; } int hci_dev_open_sync(struct hci_dev *hdev) { int ret; bt_dev_dbg(hdev, ""); if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) { ret = -ENODEV; goto done; } if (!hci_dev_test_flag(hdev, HCI_SETUP) && !hci_dev_test_flag(hdev, HCI_CONFIG)) { /* Check for rfkill but allow the HCI setup stage to * proceed (which in itself doesn't cause any RF activity). */ if (hci_dev_test_flag(hdev, HCI_RFKILLED)) { ret = -ERFKILL; goto done; } /* Check for valid public address or a configured static * random address, but let the HCI setup proceed to * be able to determine if there is a public address * or not. * * In case of user channel usage, it is not important * if a public address or static random address is * available. * * This check is only valid for BR/EDR controllers * since AMP controllers do not have an address. */ if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) && hdev->dev_type == HCI_PRIMARY && !bacmp(&hdev->bdaddr, BDADDR_ANY) && !bacmp(&hdev->static_addr, BDADDR_ANY)) { ret = -EADDRNOTAVAIL; goto done; } } if (test_bit(HCI_UP, &hdev->flags)) { ret = -EALREADY; goto done; } if (hdev->open(hdev)) { ret = -EIO; goto done; } hci_devcd_reset(hdev); set_bit(HCI_RUNNING, &hdev->flags); hci_sock_dev_event(hdev, HCI_DEV_OPEN); ret = hci_dev_init_sync(hdev); if (!ret) { hci_dev_hold(hdev); hci_dev_set_flag(hdev, HCI_RPA_EXPIRED); hci_adv_instances_set_rpa_expired(hdev, true); set_bit(HCI_UP, &hdev->flags); hci_sock_dev_event(hdev, HCI_DEV_UP); hci_leds_update_powered(hdev, true); if (!hci_dev_test_flag(hdev, HCI_SETUP) && !hci_dev_test_flag(hdev, HCI_CONFIG) && !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) && !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) && hci_dev_test_flag(hdev, HCI_MGMT) && hdev->dev_type == HCI_PRIMARY) { ret = hci_powered_update_sync(hdev); mgmt_power_on(hdev, ret); } } else { /* Init failed, cleanup */ flush_work(&hdev->tx_work); /* Since hci_rx_work() is possible to awake new cmd_work * it should be flushed first to avoid unexpected call of * hci_cmd_work() */ flush_work(&hdev->rx_work); flush_work(&hdev->cmd_work); skb_queue_purge(&hdev->cmd_q); skb_queue_purge(&hdev->rx_q); if (hdev->flush) hdev->flush(hdev); if (hdev->sent_cmd) { cancel_delayed_work_sync(&hdev->cmd_timer); kfree_skb(hdev->sent_cmd); hdev->sent_cmd = NULL; } clear_bit(HCI_RUNNING, &hdev->flags); hci_sock_dev_event(hdev, HCI_DEV_CLOSE); hdev->close(hdev); hdev->flags &= BIT(HCI_RAW); } done: return ret; } /* This function requires the caller holds hdev->lock */ static void hci_pend_le_actions_clear(struct hci_dev *hdev) { struct hci_conn_params *p; list_for_each_entry(p, &hdev->le_conn_params, list) { hci_pend_le_list_del_init(p); if (p->conn) { hci_conn_drop(p->conn); hci_conn_put(p->conn); p->conn = NULL; } } BT_DBG("All LE pending actions cleared"); } static int hci_dev_shutdown(struct hci_dev *hdev) { int err = 0; /* Similar to how we first do setup and then set the exclusive access * bit for userspace, we must first unset userchannel and then clean up. * Otherwise, the kernel can't properly use the hci channel to clean up * the controller (some shutdown routines require sending additional * commands to the controller for example). */ bool was_userchannel = hci_dev_test_and_clear_flag(hdev, HCI_USER_CHANNEL); if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) && test_bit(HCI_UP, &hdev->flags)) { /* Execute vendor specific shutdown routine */ if (hdev->shutdown) err = hdev->shutdown(hdev); } if (was_userchannel) hci_dev_set_flag(hdev, HCI_USER_CHANNEL); return err; } int hci_dev_close_sync(struct hci_dev *hdev) { bool auto_off; int err = 0; bt_dev_dbg(hdev, ""); cancel_delayed_work(&hdev->power_off); cancel_delayed_work(&hdev->ncmd_timer); cancel_delayed_work(&hdev->le_scan_disable); cancel_delayed_work(&hdev->le_scan_restart); hci_request_cancel_all(hdev); if (hdev->adv_instance_timeout) { cancel_delayed_work_sync(&hdev->adv_instance_expire); hdev->adv_instance_timeout = 0; } err = hci_dev_shutdown(hdev); if (!test_and_clear_bit(HCI_UP, &hdev->flags)) { cancel_delayed_work_sync(&hdev->cmd_timer); return err; } hci_leds_update_powered(hdev, false); /* Flush RX and TX works */ flush_work(&hdev->tx_work); flush_work(&hdev->rx_work); if (hdev->discov_timeout > 0) { hdev->discov_timeout = 0; hci_dev_clear_flag(hdev, HCI_DISCOVERABLE); hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE); } if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) cancel_delayed_work(&hdev->service_cache); if (hci_dev_test_flag(hdev, HCI_MGMT)) { struct adv_info *adv_instance; cancel_delayed_work_sync(&hdev->rpa_expired); list_for_each_entry(adv_instance, &hdev->adv_instances, list) cancel_delayed_work_sync(&adv_instance->rpa_expired_cb); } /* Avoid potential lockdep warnings from the *_flush() calls by * ensuring the workqueue is empty up front. */ drain_workqueue(hdev->workqueue); hci_dev_lock(hdev); hci_discovery_set_state(hdev, DISCOVERY_STOPPED); auto_off = hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF); if (!auto_off && hdev->dev_type == HCI_PRIMARY && !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) && hci_dev_test_flag(hdev, HCI_MGMT)) __mgmt_power_off(hdev); hci_inquiry_cache_flush(hdev); hci_pend_le_actions_clear(hdev); hci_conn_hash_flush(hdev); /* Prevent data races on hdev->smp_data or hdev->smp_bredr_data */ smp_unregister(hdev); hci_dev_unlock(hdev); hci_sock_dev_event(hdev, HCI_DEV_DOWN); if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) { aosp_do_close(hdev); msft_do_close(hdev); } if (hdev->flush) hdev->flush(hdev); /* Reset device */ skb_queue_purge(&hdev->cmd_q); atomic_set(&hdev->cmd_cnt, 1); if (test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks) && !auto_off && !hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) { set_bit(HCI_INIT, &hdev->flags); hci_reset_sync(hdev); clear_bit(HCI_INIT, &hdev->flags); } /* flush cmd work */ flush_work(&hdev->cmd_work); /* Drop queues */ skb_queue_purge(&hdev->rx_q); skb_queue_purge(&hdev->cmd_q); skb_queue_purge(&hdev->raw_q); /* Drop last sent command */ if (hdev->sent_cmd) { cancel_delayed_work_sync(&hdev->cmd_timer); kfree_skb(hdev->sent_cmd); hdev->sent_cmd = NULL; } clear_bit(HCI_RUNNING, &hdev->flags); hci_sock_dev_event(hdev, HCI_DEV_CLOSE); /* After this point our queues are empty and no tasks are scheduled. */ hdev->close(hdev); /* Clear flags */ hdev->flags &= BIT(HCI_RAW); hci_dev_clear_volatile_flags(hdev); /* Controller radio is available but is currently powered down */ hdev->amp_status = AMP_STATUS_POWERED_DOWN; memset(hdev->eir, 0, sizeof(hdev->eir)); memset(hdev->dev_class, 0, sizeof(hdev->dev_class)); bacpy(&hdev->random_addr, BDADDR_ANY); hci_dev_put(hdev); return err; } /* This function perform power on HCI command sequence as follows: * * If controller is already up (HCI_UP) performs hci_powered_update_sync * sequence otherwise run hci_dev_open_sync which will follow with * hci_powered_update_sync after the init sequence is completed. */ static int hci_power_on_sync(struct hci_dev *hdev) { int err; if (test_bit(HCI_UP, &hdev->flags) && hci_dev_test_flag(hdev, HCI_MGMT) && hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) { cancel_delayed_work(&hdev->power_off); return hci_powered_update_sync(hdev); } err = hci_dev_open_sync(hdev); if (err < 0) return err; /* During the HCI setup phase, a few error conditions are * ignored and they need to be checked now. If they are still * valid, it is important to return the device back off. */ if (hci_dev_test_flag(hdev, HCI_RFKILLED) || hci_dev_test_flag(hdev, HCI_UNCONFIGURED) || (hdev->dev_type == HCI_PRIMARY && !bacmp(&hdev->bdaddr, BDADDR_ANY) && !bacmp(&hdev->static_addr, BDADDR_ANY))) { hci_dev_clear_flag(hdev, HCI_AUTO_OFF); hci_dev_close_sync(hdev); } else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) { queue_delayed_work(hdev->req_workqueue, &hdev->power_off, HCI_AUTO_OFF_TIMEOUT); } if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) { /* For unconfigured devices, set the HCI_RAW flag * so that userspace can easily identify them. */ if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) set_bit(HCI_RAW, &hdev->flags); /* For fully configured devices, this will send * the Index Added event. For unconfigured devices, * it will send Unconfigued Index Added event. * * Devices with HCI_QUIRK_RAW_DEVICE are ignored * and no event will be send. */ mgmt_index_added(hdev); } else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) { /* When the controller is now configured, then it * is important to clear the HCI_RAW flag. */ if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) clear_bit(HCI_RAW, &hdev->flags); /* Powering on the controller with HCI_CONFIG set only * happens with the transition from unconfigured to * configured. This will send the Index Added event. */ mgmt_index_added(hdev); } return 0; } static int hci_remote_name_cancel_sync(struct hci_dev *hdev, bdaddr_t *addr) { struct hci_cp_remote_name_req_cancel cp; memset(&cp, 0, sizeof(cp)); bacpy(&cp.bdaddr, addr); return __hci_cmd_sync_status(hdev, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp), &cp, HCI_CMD_TIMEOUT); } int hci_stop_discovery_sync(struct hci_dev *hdev) { struct discovery_state *d = &hdev->discovery; struct inquiry_entry *e; int err; bt_dev_dbg(hdev, "state %u", hdev->discovery.state); if (d->state == DISCOVERY_FINDING || d->state == DISCOVERY_STOPPING) { if (test_bit(HCI_INQUIRY, &hdev->flags)) { err = __hci_cmd_sync_status(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL, HCI_CMD_TIMEOUT); if (err) return err; } if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) { cancel_delayed_work(&hdev->le_scan_disable); cancel_delayed_work(&hdev->le_scan_restart); err = hci_scan_disable_sync(hdev); if (err) return err; } } else { err = hci_scan_disable_sync(hdev); if (err) return err; } /* Resume advertising if it was paused */ if (use_ll_privacy(hdev)) hci_resume_advertising_sync(hdev); /* No further actions needed for LE-only discovery */ if (d->type == DISCOV_TYPE_LE) return 0; if (d->state == DISCOVERY_RESOLVING || d->state == DISCOVERY_STOPPING) { e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_PENDING); if (!e) return 0; return hci_remote_name_cancel_sync(hdev, &e->data.bdaddr); } return 0; } static int hci_disconnect_phy_link_sync(struct hci_dev *hdev, u16 handle, u8 reason) { struct hci_cp_disconn_phy_link cp; memset(&cp, 0, sizeof(cp)); cp.phy_handle = HCI_PHY_HANDLE(handle); cp.reason = reason; return __hci_cmd_sync_status(hdev, HCI_OP_DISCONN_PHY_LINK, sizeof(cp), &cp, HCI_CMD_TIMEOUT); } static int hci_disconnect_sync(struct hci_dev *hdev, struct hci_conn *conn, u8 reason) { struct hci_cp_disconnect cp; if (conn->type == AMP_LINK) return hci_disconnect_phy_link_sync(hdev, conn->handle, reason); memset(&cp, 0, sizeof(cp)); cp.handle = cpu_to_le16(conn->handle); cp.reason = reason; /* Wait for HCI_EV_DISCONN_COMPLETE, not HCI_EV_CMD_STATUS, when the * reason is anything but HCI_ERROR_REMOTE_POWER_OFF. This reason is * used when suspending or powering off, where we don't want to wait * for the peer's response. */ if (reason != HCI_ERROR_REMOTE_POWER_OFF) return __hci_cmd_sync_status_sk(hdev, HCI_OP_DISCONNECT, sizeof(cp), &cp, HCI_EV_DISCONN_COMPLETE, HCI_CMD_TIMEOUT, NULL); return __hci_cmd_sync_status(hdev, HCI_OP_DISCONNECT, sizeof(cp), &cp, HCI_CMD_TIMEOUT); } static int hci_le_connect_cancel_sync(struct hci_dev *hdev, struct hci_conn *conn, u8 reason) { /* Return reason if scanning since the connection shall probably be * cleanup directly. */ if (test_bit(HCI_CONN_SCANNING, &conn->flags)) return reason; if (conn->role == HCI_ROLE_SLAVE || test_and_set_bit(HCI_CONN_CANCEL, &conn->flags)) return 0; return __hci_cmd_sync_status(hdev, HCI_OP_LE_CREATE_CONN_CANCEL, 0, NULL, HCI_CMD_TIMEOUT); } static int hci_connect_cancel_sync(struct hci_dev *hdev, struct hci_conn *conn, u8 reason) { if (conn->type == LE_LINK) return hci_le_connect_cancel_sync(hdev, conn, reason); if (conn->type == ISO_LINK) { /* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E * page 1857: * * If this command is issued for a CIS on the Central and the * CIS is successfully terminated before being established, * then an HCI_LE_CIS_Established event shall also be sent for * this CIS with the Status Operation Cancelled by Host (0x44). */ if (test_bit(HCI_CONN_CREATE_CIS, &conn->flags)) return hci_disconnect_sync(hdev, conn, reason); /* CIS with no Create CIS sent have nothing to cancel */ if (bacmp(&conn->dst, BDADDR_ANY)) return HCI_ERROR_LOCAL_HOST_TERM; /* There is no way to cancel a BIS without terminating the BIG * which is done later on connection cleanup. */ return 0; } if (hdev->hci_ver < BLUETOOTH_VER_1_2) return 0; /* Wait for HCI_EV_CONN_COMPLETE, not HCI_EV_CMD_STATUS, when the * reason is anything but HCI_ERROR_REMOTE_POWER_OFF. This reason is * used when suspending or powering off, where we don't want to wait * for the peer's response. */ if (reason != HCI_ERROR_REMOTE_POWER_OFF) return __hci_cmd_sync_status_sk(hdev, HCI_OP_CREATE_CONN_CANCEL, 6, &conn->dst, HCI_EV_CONN_COMPLETE, HCI_CMD_TIMEOUT, NULL); return __hci_cmd_sync_status(hdev, HCI_OP_CREATE_CONN_CANCEL, 6, &conn->dst, HCI_CMD_TIMEOUT); } static int hci_reject_sco_sync(struct hci_dev *hdev, struct hci_conn *conn, u8 reason) { struct hci_cp_reject_sync_conn_req cp; memset(&cp, 0, sizeof(cp)); bacpy(&cp.bdaddr, &conn->dst); cp.reason = reason; /* SCO rejection has its own limited set of * allowed error values (0x0D-0x0F). */ if (reason < 0x0d || reason > 0x0f) cp.reason = HCI_ERROR_REJ_LIMITED_RESOURCES; return __hci_cmd_sync_status(hdev, HCI_OP_REJECT_SYNC_CONN_REQ, sizeof(cp), &cp, HCI_CMD_TIMEOUT); } static int hci_le_reject_cis_sync(struct hci_dev *hdev, struct hci_conn *conn, u8 reason) { struct hci_cp_le_reject_cis cp; memset(&cp, 0, sizeof(cp)); cp.handle = cpu_to_le16(conn->handle); cp.reason = reason; return __hci_cmd_sync_status(hdev, HCI_OP_LE_REJECT_CIS, sizeof(cp), &cp, HCI_CMD_TIMEOUT); } static int hci_reject_conn_sync(struct hci_dev *hdev, struct hci_conn *conn, u8 reason) { struct hci_cp_reject_conn_req cp; if (conn->type == ISO_LINK) return hci_le_reject_cis_sync(hdev, conn, reason); if (conn->type == SCO_LINK || conn->type == ESCO_LINK) return hci_reject_sco_sync(hdev, conn, reason); memset(&cp, 0, sizeof(cp)); bacpy(&cp.bdaddr, &conn->dst); cp.reason = reason; return __hci_cmd_sync_status(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp, HCI_CMD_TIMEOUT); } int hci_abort_conn_sync(struct hci_dev *hdev, struct hci_conn *conn, u8 reason) { int err = 0; u16 handle = conn->handle; struct hci_conn *c; switch (conn->state) { case BT_CONNECTED: case BT_CONFIG: err = hci_disconnect_sync(hdev, conn, reason); break; case BT_CONNECT: err = hci_connect_cancel_sync(hdev, conn, reason); break; case BT_CONNECT2: err = hci_reject_conn_sync(hdev, conn, reason); break; case BT_OPEN: hci_dev_lock(hdev); /* Cleanup bis or pa sync connections */ if (test_and_clear_bit(HCI_CONN_BIG_SYNC_FAILED, &conn->flags) || test_and_clear_bit(HCI_CONN_PA_SYNC_FAILED, &conn->flags)) { hci_conn_failed(conn, reason); } else if (test_bit(HCI_CONN_PA_SYNC, &conn->flags) || test_bit(HCI_CONN_BIG_SYNC, &conn->flags)) { conn->state = BT_CLOSED; hci_disconn_cfm(conn, reason); hci_conn_del(conn); } hci_dev_unlock(hdev); return 0; case BT_BOUND: hci_dev_lock(hdev); hci_conn_failed(conn, reason); hci_dev_unlock(hdev); return 0; default: hci_dev_lock(hdev); conn->state = BT_CLOSED; hci_disconn_cfm(conn, reason); hci_conn_del(conn); hci_dev_unlock(hdev); return 0; } hci_dev_lock(hdev); /* Check if the connection hasn't been cleanup while waiting * commands to complete. */ c = hci_conn_hash_lookup_handle(hdev, handle); if (!c || c != conn) { err = 0; goto unlock; } /* Cleanup hci_conn object if it cannot be cancelled as it * likelly means the controller and host stack are out of sync * or in case of LE it was still scanning so it can be cleanup * safely. */ hci_conn_failed(conn, reason); unlock: hci_dev_unlock(hdev); return err; } static int hci_disconnect_all_sync(struct hci_dev *hdev, u8 reason) { struct list_head *head = &hdev->conn_hash.list; struct hci_conn *conn; rcu_read_lock(); while ((conn = list_first_or_null_rcu(head, struct hci_conn, list))) { /* Make sure the connection is not freed while unlocking */ conn = hci_conn_get(conn); rcu_read_unlock(); /* Disregard possible errors since hci_conn_del shall have been * called even in case of errors had occurred since it would * then cause hci_conn_failed to be called which calls * hci_conn_del internally. */ hci_abort_conn_sync(hdev, conn, reason); hci_conn_put(conn); rcu_read_lock(); } rcu_read_unlock(); return 0; } /* This function perform power off HCI command sequence as follows: * * Clear Advertising * Stop Discovery * Disconnect all connections * hci_dev_close_sync */ static int hci_power_off_sync(struct hci_dev *hdev) { int err; /* If controller is already down there is nothing to do */ if (!test_bit(HCI_UP, &hdev->flags)) return 0; if (test_bit(HCI_ISCAN, &hdev->flags) || test_bit(HCI_PSCAN, &hdev->flags)) { err = hci_write_scan_enable_sync(hdev, 0x00); if (err) return err; } err = hci_clear_adv_sync(hdev, NULL, false); if (err) return err; err = hci_stop_discovery_sync(hdev); if (err) return err; /* Terminated due to Power Off */ err = hci_disconnect_all_sync(hdev, HCI_ERROR_REMOTE_POWER_OFF); if (err) return err; return hci_dev_close_sync(hdev); } int hci_set_powered_sync(struct hci_dev *hdev, u8 val) { if (val) return hci_power_on_sync(hdev); return hci_power_off_sync(hdev); } static int hci_write_iac_sync(struct hci_dev *hdev) { struct hci_cp_write_current_iac_lap cp; if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) return 0; memset(&cp, 0, sizeof(cp)); if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) { /* Limited discoverable mode */ cp.num_iac = min_t(u8, hdev->num_iac, 2); cp.iac_lap[0] = 0x00; /* LIAC */ cp.iac_lap[1] = 0x8b; cp.iac_lap[2] = 0x9e; cp.iac_lap[3] = 0x33; /* GIAC */ cp.iac_lap[4] = 0x8b; cp.iac_lap[5] = 0x9e; } else { /* General discoverable mode */ cp.num_iac = 1; cp.iac_lap[0] = 0x33; /* GIAC */ cp.iac_lap[1] = 0x8b; cp.iac_lap[2] = 0x9e; } return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_CURRENT_IAC_LAP, (cp.num_iac * 3) + 1, &cp, HCI_CMD_TIMEOUT); } int hci_update_discoverable_sync(struct hci_dev *hdev) { int err = 0; if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) { err = hci_write_iac_sync(hdev); if (err) return err; err = hci_update_scan_sync(hdev); if (err) return err; err = hci_update_class_sync(hdev); if (err) return err; } /* Advertising instances don't use the global discoverable setting, so * only update AD if advertising was enabled using Set Advertising. */ if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) { err = hci_update_adv_data_sync(hdev, 0x00); if (err) return err; /* Discoverable mode affects the local advertising * address in limited privacy mode. */ if (hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY)) { if (ext_adv_capable(hdev)) err = hci_start_ext_adv_sync(hdev, 0x00); else err = hci_enable_advertising_sync(hdev); } } return err; } static int update_discoverable_sync(struct hci_dev *hdev, void *data) { return hci_update_discoverable_sync(hdev); } int hci_update_discoverable(struct hci_dev *hdev) { /* Only queue if it would have any effect */ if (hdev_is_powered(hdev) && hci_dev_test_flag(hdev, HCI_ADVERTISING) && hci_dev_test_flag(hdev, HCI_DISCOVERABLE) && hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY)) return hci_cmd_sync_queue(hdev, update_discoverable_sync, NULL, NULL); return 0; } int hci_update_connectable_sync(struct hci_dev *hdev) { int err; err = hci_update_scan_sync(hdev); if (err) return err; /* If BR/EDR is not enabled and we disable advertising as a * by-product of disabling connectable, we need to update the * advertising flags. */ if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) err = hci_update_adv_data_sync(hdev, hdev->cur_adv_instance); /* Update the advertising parameters if necessary */ if (hci_dev_test_flag(hdev, HCI_ADVERTISING) || !list_empty(&hdev->adv_instances)) { if (ext_adv_capable(hdev)) err = hci_start_ext_adv_sync(hdev, hdev->cur_adv_instance); else err = hci_enable_advertising_sync(hdev); if (err) return err; } return hci_update_passive_scan_sync(hdev); } static int hci_inquiry_sync(struct hci_dev *hdev, u8 length) { const u8 giac[3] = { 0x33, 0x8b, 0x9e }; const u8 liac[3] = { 0x00, 0x8b, 0x9e }; struct hci_cp_inquiry cp; bt_dev_dbg(hdev, ""); if (hci_dev_test_flag(hdev, HCI_INQUIRY)) return 0; hci_dev_lock(hdev); hci_inquiry_cache_flush(hdev); hci_dev_unlock(hdev); memset(&cp, 0, sizeof(cp)); if (hdev->discovery.limited) memcpy(&cp.lap, liac, sizeof(cp.lap)); else memcpy(&cp.lap, giac, sizeof(cp.lap)); cp.length = length; return __hci_cmd_sync_status(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp, HCI_CMD_TIMEOUT); } static int hci_active_scan_sync(struct hci_dev *hdev, uint16_t interval) { u8 own_addr_type; /* Accept list is not used for discovery */ u8 filter_policy = 0x00; /* Default is to enable duplicates filter */ u8 filter_dup = LE_SCAN_FILTER_DUP_ENABLE; int err; bt_dev_dbg(hdev, ""); /* If controller is scanning, it means the passive scanning is * running. Thus, we should temporarily stop it in order to set the * discovery scanning parameters. */ err = hci_scan_disable_sync(hdev); if (err) { bt_dev_err(hdev, "Unable to disable scanning: %d", err); return err; } cancel_interleave_scan(hdev); /* Pause address resolution for active scan and stop advertising if * privacy is enabled. */ err = hci_pause_addr_resolution(hdev); if (err) goto failed; /* All active scans will be done with either a resolvable private * address (when privacy feature has been enabled) or non-resolvable * private address. */ err = hci_update_random_address_sync(hdev, true, scan_use_rpa(hdev), &own_addr_type); if (err < 0) own_addr_type = ADDR_LE_DEV_PUBLIC; if (hci_is_adv_monitoring(hdev)) { /* Duplicate filter should be disabled when some advertisement * monitor is activated, otherwise AdvMon can only receive one * advertisement for one peer(*) during active scanning, and * might report loss to these peers. * * Note that different controllers have different meanings of * |duplicate|. Some of them consider packets with the same * address as duplicate, and others consider packets with the * same address and the same RSSI as duplicate. Although in the * latter case we don't need to disable duplicate filter, but * it is common to have active scanning for a short period of * time, the power impact should be neglectable. */ filter_dup = LE_SCAN_FILTER_DUP_DISABLE; } err = hci_start_scan_sync(hdev, LE_SCAN_ACTIVE, interval, hdev->le_scan_window_discovery, own_addr_type, filter_policy, filter_dup); if (!err) return err; failed: /* Resume advertising if it was paused */ if (use_ll_privacy(hdev)) hci_resume_advertising_sync(hdev); /* Resume passive scanning */ hci_update_passive_scan_sync(hdev); return err; } static int hci_start_interleaved_discovery_sync(struct hci_dev *hdev) { int err; bt_dev_dbg(hdev, ""); err = hci_active_scan_sync(hdev, hdev->le_scan_int_discovery * 2); if (err) return err; return hci_inquiry_sync(hdev, DISCOV_BREDR_INQUIRY_LEN); } int hci_start_discovery_sync(struct hci_dev *hdev) { unsigned long timeout; int err; bt_dev_dbg(hdev, "type %u", hdev->discovery.type); switch (hdev->discovery.type) { case DISCOV_TYPE_BREDR: return hci_inquiry_sync(hdev, DISCOV_BREDR_INQUIRY_LEN); case DISCOV_TYPE_INTERLEAVED: /* When running simultaneous discovery, the LE scanning time * should occupy the whole discovery time sine BR/EDR inquiry * and LE scanning are scheduled by the controller. * * For interleaving discovery in comparison, BR/EDR inquiry * and LE scanning are done sequentially with separate * timeouts. */ if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) { timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT); /* During simultaneous discovery, we double LE scan * interval. We must leave some time for the controller * to do BR/EDR inquiry. */ err = hci_start_interleaved_discovery_sync(hdev); break; } timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout); err = hci_active_scan_sync(hdev, hdev->le_scan_int_discovery); break; case DISCOV_TYPE_LE: timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT); err = hci_active_scan_sync(hdev, hdev->le_scan_int_discovery); break; default: return -EINVAL; } if (err) return err; bt_dev_dbg(hdev, "timeout %u ms", jiffies_to_msecs(timeout)); /* When service discovery is used and the controller has a * strict duplicate filter, it is important to remember the * start and duration of the scan. This is required for * restarting scanning during the discovery phase. */ if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) && hdev->discovery.result_filtering) { hdev->discovery.scan_start = jiffies; hdev->discovery.scan_duration = timeout; } queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_disable, timeout); return 0; } static void hci_suspend_monitor_sync(struct hci_dev *hdev) { switch (hci_get_adv_monitor_offload_ext(hdev)) { case HCI_ADV_MONITOR_EXT_MSFT: msft_suspend_sync(hdev); break; default: return; } } /* This function disables discovery and mark it as paused */ static int hci_pause_discovery_sync(struct hci_dev *hdev) { int old_state = hdev->discovery.state; int err; /* If discovery already stopped/stopping/paused there nothing to do */ if (old_state == DISCOVERY_STOPPED || old_state == DISCOVERY_STOPPING || hdev->discovery_paused) return 0; hci_discovery_set_state(hdev, DISCOVERY_STOPPING); err = hci_stop_discovery_sync(hdev); if (err) return err; hdev->discovery_paused = true; hdev->discovery_old_state = old_state; hci_discovery_set_state(hdev, DISCOVERY_STOPPED); return 0; } static int hci_update_event_filter_sync(struct hci_dev *hdev) { struct bdaddr_list_with_flags *b; u8 scan = SCAN_DISABLED; bool scanning = test_bit(HCI_PSCAN, &hdev->flags); int err; if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) return 0; /* Some fake CSR controllers lock up after setting this type of * filter, so avoid sending the request altogether. */ if (test_bit(HCI_QUIRK_BROKEN_FILTER_CLEAR_ALL, &hdev->quirks)) return 0; /* Always clear event filter when starting */ hci_clear_event_filter_sync(hdev); list_for_each_entry(b, &hdev->accept_list, list) { if (!(b->flags & HCI_CONN_FLAG_REMOTE_WAKEUP)) continue; bt_dev_dbg(hdev, "Adding event filters for %pMR", &b->bdaddr); err = hci_set_event_filter_sync(hdev, HCI_FLT_CONN_SETUP, HCI_CONN_SETUP_ALLOW_BDADDR, &b->bdaddr, HCI_CONN_SETUP_AUTO_ON); if (err) bt_dev_dbg(hdev, "Failed to set event filter for %pMR", &b->bdaddr); else scan = SCAN_PAGE; } if (scan && !scanning) hci_write_scan_enable_sync(hdev, scan); else if (!scan && scanning) hci_write_scan_enable_sync(hdev, scan); return 0; } /* This function disables scan (BR and LE) and mark it as paused */ static int hci_pause_scan_sync(struct hci_dev *hdev) { if (hdev->scanning_paused) return 0; /* Disable page scan if enabled */ if (test_bit(HCI_PSCAN, &hdev->flags)) hci_write_scan_enable_sync(hdev, SCAN_DISABLED); hci_scan_disable_sync(hdev); hdev->scanning_paused = true; return 0; } /* This function performs the HCI suspend procedures in the follow order: * * Pause discovery (active scanning/inquiry) * Pause Directed Advertising/Advertising * Pause Scanning (passive scanning in case discovery was not active) * Disconnect all connections * Set suspend_status to BT_SUSPEND_DISCONNECT if hdev cannot wakeup * otherwise: * Update event mask (only set events that are allowed to wake up the host) * Update event filter (with devices marked with HCI_CONN_FLAG_REMOTE_WAKEUP) * Update passive scanning (lower duty cycle) * Set suspend_status to BT_SUSPEND_CONFIGURE_WAKE */ int hci_suspend_sync(struct hci_dev *hdev) { int err; /* If marked as suspended there nothing to do */ if (hdev->suspended) return 0; /* Mark device as suspended */ hdev->suspended = true; /* Pause discovery if not already stopped */ hci_pause_discovery_sync(hdev); /* Pause other advertisements */ hci_pause_advertising_sync(hdev); /* Suspend monitor filters */ hci_suspend_monitor_sync(hdev); /* Prevent disconnects from causing scanning to be re-enabled */ hci_pause_scan_sync(hdev); if (hci_conn_count(hdev)) { /* Soft disconnect everything (power off) */ err = hci_disconnect_all_sync(hdev, HCI_ERROR_REMOTE_POWER_OFF); if (err) { /* Set state to BT_RUNNING so resume doesn't notify */ hdev->suspend_state = BT_RUNNING; hci_resume_sync(hdev); return err; } /* Update event mask so only the allowed event can wakeup the * host. */ hci_set_event_mask_sync(hdev); } /* Only configure accept list if disconnect succeeded and wake * isn't being prevented. */ if (!hdev->wakeup || !hdev->wakeup(hdev)) { hdev->suspend_state = BT_SUSPEND_DISCONNECT; return 0; } /* Unpause to take care of updating scanning params */ hdev->scanning_paused = false; /* Enable event filter for paired devices */ hci_update_event_filter_sync(hdev); /* Update LE passive scan if enabled */ hci_update_passive_scan_sync(hdev); /* Pause scan changes again. */ hdev->scanning_paused = true; hdev->suspend_state = BT_SUSPEND_CONFIGURE_WAKE; return 0; } /* This function resumes discovery */ static int hci_resume_discovery_sync(struct hci_dev *hdev) { int err; /* If discovery not paused there nothing to do */ if (!hdev->discovery_paused) return 0; hdev->discovery_paused = false; hci_discovery_set_state(hdev, DISCOVERY_STARTING); err = hci_start_discovery_sync(hdev); hci_discovery_set_state(hdev, err ? DISCOVERY_STOPPED : DISCOVERY_FINDING); return err; } static void hci_resume_monitor_sync(struct hci_dev *hdev) { switch (hci_get_adv_monitor_offload_ext(hdev)) { case HCI_ADV_MONITOR_EXT_MSFT: msft_resume_sync(hdev); break; default: return; } } /* This function resume scan and reset paused flag */ static int hci_resume_scan_sync(struct hci_dev *hdev) { if (!hdev->scanning_paused) return 0; hdev->scanning_paused = false; hci_update_scan_sync(hdev); /* Reset passive scanning to normal */ hci_update_passive_scan_sync(hdev); return 0; } /* This function performs the HCI suspend procedures in the follow order: * * Restore event mask * Clear event filter * Update passive scanning (normal duty cycle) * Resume Directed Advertising/Advertising * Resume discovery (active scanning/inquiry) */ int hci_resume_sync(struct hci_dev *hdev) { /* If not marked as suspended there nothing to do */ if (!hdev->suspended) return 0; hdev->suspended = false; /* Restore event mask */ hci_set_event_mask_sync(hdev); /* Clear any event filters and restore scan state */ hci_clear_event_filter_sync(hdev); /* Resume scanning */ hci_resume_scan_sync(hdev); /* Resume monitor filters */ hci_resume_monitor_sync(hdev); /* Resume other advertisements */ hci_resume_advertising_sync(hdev); /* Resume discovery */ hci_resume_discovery_sync(hdev); return 0; } static bool conn_use_rpa(struct hci_conn *conn) { struct hci_dev *hdev = conn->hdev; return hci_dev_test_flag(hdev, HCI_PRIVACY); } static int hci_le_ext_directed_advertising_sync(struct hci_dev *hdev, struct hci_conn *conn) { struct hci_cp_le_set_ext_adv_params cp; int err; bdaddr_t random_addr; u8 own_addr_type; err = hci_update_random_address_sync(hdev, false, conn_use_rpa(conn), &own_addr_type); if (err) return err; /* Set require_privacy to false so that the remote device has a * chance of identifying us. */ err = hci_get_random_address(hdev, false, conn_use_rpa(conn), NULL, &own_addr_type, &random_addr); if (err) return err; memset(&cp, 0, sizeof(cp)); cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_DIRECT_IND); cp.channel_map = hdev->le_adv_channel_map; cp.tx_power = HCI_TX_POWER_INVALID; cp.primary_phy = HCI_ADV_PHY_1M; cp.secondary_phy = HCI_ADV_PHY_1M; cp.handle = 0x00; /* Use instance 0 for directed adv */ cp.own_addr_type = own_addr_type; cp.peer_addr_type = conn->dst_type; bacpy(&cp.peer_addr, &conn->dst); /* As per Core Spec 5.2 Vol 2, PART E, Sec 7.8.53, for * advertising_event_property LE_LEGACY_ADV_DIRECT_IND * does not supports advertising data when the advertising set already * contains some, the controller shall return erroc code 'Invalid * HCI Command Parameters(0x12). * So it is required to remove adv set for handle 0x00. since we use * instance 0 for directed adv. */ err = hci_remove_ext_adv_instance_sync(hdev, cp.handle, NULL); if (err) return err; err = __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_ADV_PARAMS, sizeof(cp), &cp, HCI_CMD_TIMEOUT); if (err) return err; /* Check if random address need to be updated */ if (own_addr_type == ADDR_LE_DEV_RANDOM && bacmp(&random_addr, BDADDR_ANY) && bacmp(&random_addr, &hdev->random_addr)) { err = hci_set_adv_set_random_addr_sync(hdev, 0x00, &random_addr); if (err) return err; } return hci_enable_ext_advertising_sync(hdev, 0x00); } static int hci_le_directed_advertising_sync(struct hci_dev *hdev, struct hci_conn *conn) { struct hci_cp_le_set_adv_param cp; u8 status; u8 own_addr_type; u8 enable; if (ext_adv_capable(hdev)) return hci_le_ext_directed_advertising_sync(hdev, conn); /* Clear the HCI_LE_ADV bit temporarily so that the * hci_update_random_address knows that it's safe to go ahead * and write a new random address. The flag will be set back on * as soon as the SET_ADV_ENABLE HCI command completes. */ hci_dev_clear_flag(hdev, HCI_LE_ADV); /* Set require_privacy to false so that the remote device has a * chance of identifying us. */ status = hci_update_random_address_sync(hdev, false, conn_use_rpa(conn), &own_addr_type); if (status) return status; memset(&cp, 0, sizeof(cp)); /* Some controllers might reject command if intervals are not * within range for undirected advertising. * BCM20702A0 is known to be affected by this. */ cp.min_interval = cpu_to_le16(0x0020); cp.max_interval = cpu_to_le16(0x0020); cp.type = LE_ADV_DIRECT_IND; cp.own_address_type = own_addr_type; cp.direct_addr_type = conn->dst_type; bacpy(&cp.direct_addr, &conn->dst); cp.channel_map = hdev->le_adv_channel_map; status = __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp, HCI_CMD_TIMEOUT); if (status) return status; enable = 0x01; return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable, HCI_CMD_TIMEOUT); } static void set_ext_conn_params(struct hci_conn *conn, struct hci_cp_le_ext_conn_param *p) { struct hci_dev *hdev = conn->hdev; memset(p, 0, sizeof(*p)); p->scan_interval = cpu_to_le16(hdev->le_scan_int_connect); p->scan_window = cpu_to_le16(hdev->le_scan_window_connect); p->conn_interval_min = cpu_to_le16(conn->le_conn_min_interval); p->conn_interval_max = cpu_to_le16(conn->le_conn_max_interval); p->conn_latency = cpu_to_le16(conn->le_conn_latency); p->supervision_timeout = cpu_to_le16(conn->le_supv_timeout); p->min_ce_len = cpu_to_le16(0x0000); p->max_ce_len = cpu_to_le16(0x0000); } static int hci_le_ext_create_conn_sync(struct hci_dev *hdev, struct hci_conn *conn, u8 own_addr_type) { struct hci_cp_le_ext_create_conn *cp; struct hci_cp_le_ext_conn_param *p; u8 data[sizeof(*cp) + sizeof(*p) * 3]; u32 plen; cp = (void *)data; p = (void *)cp->data; memset(cp, 0, sizeof(*cp)); bacpy(&cp->peer_addr, &conn->dst); cp->peer_addr_type = conn->dst_type; cp->own_addr_type = own_addr_type; plen = sizeof(*cp); if (scan_1m(hdev)) { cp->phys |= LE_SCAN_PHY_1M; set_ext_conn_params(conn, p); p++; plen += sizeof(*p); } if (scan_2m(hdev)) { cp->phys |= LE_SCAN_PHY_2M; set_ext_conn_params(conn, p); p++; plen += sizeof(*p); } if (scan_coded(hdev)) { cp->phys |= LE_SCAN_PHY_CODED; set_ext_conn_params(conn, p); plen += sizeof(*p); } return __hci_cmd_sync_status_sk(hdev, HCI_OP_LE_EXT_CREATE_CONN, plen, data, HCI_EV_LE_ENHANCED_CONN_COMPLETE, conn->conn_timeout, NULL); } int hci_le_create_conn_sync(struct hci_dev *hdev, struct hci_conn *conn) { struct hci_cp_le_create_conn cp; struct hci_conn_params *params; u8 own_addr_type; int err; /* If requested to connect as peripheral use directed advertising */ if (conn->role == HCI_ROLE_SLAVE) { /* If we're active scanning and simultaneous roles is not * enabled simply reject the attempt. */ if (hci_dev_test_flag(hdev, HCI_LE_SCAN) && hdev->le_scan_type == LE_SCAN_ACTIVE && !hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES)) { hci_conn_del(conn); return -EBUSY; } /* Pause advertising while doing directed advertising. */ hci_pause_advertising_sync(hdev); err = hci_le_directed_advertising_sync(hdev, conn); goto done; } /* Disable advertising if simultaneous roles is not in use. */ if (!hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES)) hci_pause_advertising_sync(hdev); params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type); if (params) { conn->le_conn_min_interval = params->conn_min_interval; conn->le_conn_max_interval = params->conn_max_interval; conn->le_conn_latency = params->conn_latency; conn->le_supv_timeout = params->supervision_timeout; } else { conn->le_conn_min_interval = hdev->le_conn_min_interval; conn->le_conn_max_interval = hdev->le_conn_max_interval; conn->le_conn_latency = hdev->le_conn_latency; conn->le_supv_timeout = hdev->le_supv_timeout; } /* If controller is scanning, we stop it since some controllers are * not able to scan and connect at the same time. Also set the * HCI_LE_SCAN_INTERRUPTED flag so that the command complete * handler for scan disabling knows to set the correct discovery * state. */ if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) { hci_scan_disable_sync(hdev); hci_dev_set_flag(hdev, HCI_LE_SCAN_INTERRUPTED); } /* Update random address, but set require_privacy to false so * that we never connect with an non-resolvable address. */ err = hci_update_random_address_sync(hdev, false, conn_use_rpa(conn), &own_addr_type); if (err) goto done; if (use_ext_conn(hdev)) { err = hci_le_ext_create_conn_sync(hdev, conn, own_addr_type); goto done; } memset(&cp, 0, sizeof(cp)); cp.scan_interval = cpu_to_le16(hdev->le_scan_int_connect); cp.scan_window = cpu_to_le16(hdev->le_scan_window_connect); bacpy(&cp.peer_addr, &conn->dst); cp.peer_addr_type = conn->dst_type; cp.own_address_type = own_addr_type; cp.conn_interval_min = cpu_to_le16(conn->le_conn_min_interval); cp.conn_interval_max = cpu_to_le16(conn->le_conn_max_interval); cp.conn_latency = cpu_to_le16(conn->le_conn_latency); cp.supervision_timeout = cpu_to_le16(conn->le_supv_timeout); cp.min_ce_len = cpu_to_le16(0x0000); cp.max_ce_len = cpu_to_le16(0x0000); /* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E page 2261: * * If this event is unmasked and the HCI_LE_Connection_Complete event * is unmasked, only the HCI_LE_Enhanced_Connection_Complete event is * sent when a new connection has been created. */ err = __hci_cmd_sync_status_sk(hdev, HCI_OP_LE_CREATE_CONN, sizeof(cp), &cp, use_enhanced_conn_complete(hdev) ? HCI_EV_LE_ENHANCED_CONN_COMPLETE : HCI_EV_LE_CONN_COMPLETE, conn->conn_timeout, NULL); done: if (err == -ETIMEDOUT) hci_le_connect_cancel_sync(hdev, conn, 0x00); /* Re-enable advertising after the connection attempt is finished. */ hci_resume_advertising_sync(hdev); return err; } int hci_le_create_cis_sync(struct hci_dev *hdev) { struct { struct hci_cp_le_create_cis cp; struct hci_cis cis[0x1f]; } cmd; struct hci_conn *conn; u8 cig = BT_ISO_QOS_CIG_UNSET; /* The spec allows only one pending LE Create CIS command at a time. If * the command is pending now, don't do anything. We check for pending * connections after each CIS Established event. * * BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E * page 2566: * * If the Host issues this command before all the * HCI_LE_CIS_Established events from the previous use of the * command have been generated, the Controller shall return the * error code Command Disallowed (0x0C). * * BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E * page 2567: * * When the Controller receives the HCI_LE_Create_CIS command, the * Controller sends the HCI_Command_Status event to the Host. An * HCI_LE_CIS_Established event will be generated for each CIS when it * is established or if it is disconnected or considered lost before * being established; until all the events are generated, the command * remains pending. */ memset(&cmd, 0, sizeof(cmd)); hci_dev_lock(hdev); rcu_read_lock(); /* Wait until previous Create CIS has completed */ list_for_each_entry_rcu(conn, &hdev->conn_hash.list, list) { if (test_bit(HCI_CONN_CREATE_CIS, &conn->flags)) goto done; } /* Find CIG with all CIS ready */ list_for_each_entry_rcu(conn, &hdev->conn_hash.list, list) { struct hci_conn *link; if (hci_conn_check_create_cis(conn)) continue; cig = conn->iso_qos.ucast.cig; list_for_each_entry_rcu(link, &hdev->conn_hash.list, list) { if (hci_conn_check_create_cis(link) > 0 && link->iso_qos.ucast.cig == cig && link->state != BT_CONNECTED) { cig = BT_ISO_QOS_CIG_UNSET; break; } } if (cig != BT_ISO_QOS_CIG_UNSET) break; } if (cig == BT_ISO_QOS_CIG_UNSET) goto done; list_for_each_entry_rcu(conn, &hdev->conn_hash.list, list) { struct hci_cis *cis = &cmd.cis[cmd.cp.num_cis]; if (hci_conn_check_create_cis(conn) || conn->iso_qos.ucast.cig != cig) continue; set_bit(HCI_CONN_CREATE_CIS, &conn->flags); cis->acl_handle = cpu_to_le16(conn->parent->handle); cis->cis_handle = cpu_to_le16(conn->handle); cmd.cp.num_cis++; if (cmd.cp.num_cis >= ARRAY_SIZE(cmd.cis)) break; } done: rcu_read_unlock(); hci_dev_unlock(hdev); if (!cmd.cp.num_cis) return 0; /* Wait for HCI_LE_CIS_Established */ return __hci_cmd_sync_status_sk(hdev, HCI_OP_LE_CREATE_CIS, sizeof(cmd.cp) + sizeof(cmd.cis[0]) * cmd.cp.num_cis, &cmd, HCI_EVT_LE_CIS_ESTABLISHED, conn->conn_timeout, NULL); } int hci_le_remove_cig_sync(struct hci_dev *hdev, u8 handle) { struct hci_cp_le_remove_cig cp; memset(&cp, 0, sizeof(cp)); cp.cig_id = handle; return __hci_cmd_sync_status(hdev, HCI_OP_LE_REMOVE_CIG, sizeof(cp), &cp, HCI_CMD_TIMEOUT); } int hci_le_big_terminate_sync(struct hci_dev *hdev, u8 handle) { struct hci_cp_le_big_term_sync cp; memset(&cp, 0, sizeof(cp)); cp.handle = handle; return __hci_cmd_sync_status(hdev, HCI_OP_LE_BIG_TERM_SYNC, sizeof(cp), &cp, HCI_CMD_TIMEOUT); } int hci_le_pa_terminate_sync(struct hci_dev *hdev, u16 handle) { struct hci_cp_le_pa_term_sync cp; memset(&cp, 0, sizeof(cp)); cp.handle = cpu_to_le16(handle); return __hci_cmd_sync_status(hdev, HCI_OP_LE_PA_TERM_SYNC, sizeof(cp), &cp, HCI_CMD_TIMEOUT); } int hci_get_random_address(struct hci_dev *hdev, bool require_privacy, bool use_rpa, struct adv_info *adv_instance, u8 *own_addr_type, bdaddr_t *rand_addr) { int err; bacpy(rand_addr, BDADDR_ANY); /* If privacy is enabled use a resolvable private address. If * current RPA has expired then generate a new one. */ if (use_rpa) { /* If Controller supports LL Privacy use own address type is * 0x03 */ if (use_ll_privacy(hdev)) *own_addr_type = ADDR_LE_DEV_RANDOM_RESOLVED; else *own_addr_type = ADDR_LE_DEV_RANDOM; if (adv_instance) { if (adv_rpa_valid(adv_instance)) return 0; } else { if (rpa_valid(hdev)) return 0; } err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa); if (err < 0) { bt_dev_err(hdev, "failed to generate new RPA"); return err; } bacpy(rand_addr, &hdev->rpa); return 0; } /* In case of required privacy without resolvable private address, * use an non-resolvable private address. This is useful for * non-connectable advertising. */ if (require_privacy) { bdaddr_t nrpa; while (true) { /* The non-resolvable private address is generated * from random six bytes with the two most significant * bits cleared. */ get_random_bytes(&nrpa, 6); nrpa.b[5] &= 0x3f; /* The non-resolvable private address shall not be * equal to the public address. */ if (bacmp(&hdev->bdaddr, &nrpa)) break; } *own_addr_type = ADDR_LE_DEV_RANDOM; bacpy(rand_addr, &nrpa); return 0; } /* No privacy so use a public address. */ *own_addr_type = ADDR_LE_DEV_PUBLIC; return 0; } static int _update_adv_data_sync(struct hci_dev *hdev, void *data) { u8 instance = PTR_UINT(data); return hci_update_adv_data_sync(hdev, instance); } int hci_update_adv_data(struct hci_dev *hdev, u8 instance) { return hci_cmd_sync_queue(hdev, _update_adv_data_sync, UINT_PTR(instance), NULL); }
linux-master
net/bluetooth/hci_sync.c
// SPDX-License-Identifier: GPL-2.0 /* Bluetooth HCI driver model support. */ #include <linux/module.h> #include <net/bluetooth/bluetooth.h> #include <net/bluetooth/hci_core.h> static const struct class bt_class = { .name = "bluetooth", }; static void bt_link_release(struct device *dev) { struct hci_conn *conn = to_hci_conn(dev); kfree(conn); } static const struct device_type bt_link = { .name = "link", .release = bt_link_release, }; /* * The rfcomm tty device will possibly retain even when conn * is down, and sysfs doesn't support move zombie device, * so we should move the device before conn device is destroyed. */ static int __match_tty(struct device *dev, void *data) { return !strncmp(dev_name(dev), "rfcomm", 6); } void hci_conn_init_sysfs(struct hci_conn *conn) { struct hci_dev *hdev = conn->hdev; BT_DBG("conn %p", conn); conn->dev.type = &bt_link; conn->dev.class = &bt_class; conn->dev.parent = &hdev->dev; device_initialize(&conn->dev); } void hci_conn_add_sysfs(struct hci_conn *conn) { struct hci_dev *hdev = conn->hdev; BT_DBG("conn %p", conn); if (device_is_registered(&conn->dev)) return; dev_set_name(&conn->dev, "%s:%d", hdev->name, conn->handle); if (device_add(&conn->dev) < 0) { bt_dev_err(hdev, "failed to register connection device"); return; } hci_dev_hold(hdev); } void hci_conn_del_sysfs(struct hci_conn *conn) { struct hci_dev *hdev = conn->hdev; if (!device_is_registered(&conn->dev)) return; while (1) { struct device *dev; dev = device_find_child(&conn->dev, NULL, __match_tty); if (!dev) break; device_move(dev, NULL, DPM_ORDER_DEV_LAST); put_device(dev); } device_del(&conn->dev); hci_dev_put(hdev); } static void bt_host_release(struct device *dev) { struct hci_dev *hdev = to_hci_dev(dev); if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) hci_release_dev(hdev); else kfree(hdev); module_put(THIS_MODULE); } static const struct device_type bt_host = { .name = "host", .release = bt_host_release, }; void hci_init_sysfs(struct hci_dev *hdev) { struct device *dev = &hdev->dev; dev->type = &bt_host; dev->class = &bt_class; __module_get(THIS_MODULE); device_initialize(dev); } int __init bt_sysfs_init(void) { return class_register(&bt_class); } void bt_sysfs_cleanup(void) { class_unregister(&bt_class); }
linux-master
net/bluetooth/hci_sysfs.c
// SPDX-License-Identifier: GPL-2.0 /* * BlueZ - Bluetooth protocol stack for Linux * * Copyright (C) 2021 Intel Corporation */ #include <net/bluetooth/bluetooth.h> #include <net/bluetooth/hci_core.h> #include <net/bluetooth/mgmt.h> #include "eir.h" #define PNP_INFO_SVCLASS_ID 0x1200 static u8 eir_append_name(u8 *eir, u16 eir_len, u8 type, u8 *data, u8 data_len) { u8 name[HCI_MAX_SHORT_NAME_LENGTH + 1]; /* If data is already NULL terminated just pass it directly */ if (data[data_len - 1] == '\0') return eir_append_data(eir, eir_len, type, data, data_len); memcpy(name, data, HCI_MAX_SHORT_NAME_LENGTH); name[HCI_MAX_SHORT_NAME_LENGTH] = '\0'; return eir_append_data(eir, eir_len, type, name, sizeof(name)); } u8 eir_append_local_name(struct hci_dev *hdev, u8 *ptr, u8 ad_len) { size_t short_len; size_t complete_len; /* no space left for name (+ NULL + type + len) */ if ((max_adv_len(hdev) - ad_len) < HCI_MAX_SHORT_NAME_LENGTH + 3) return ad_len; /* use complete name if present and fits */ complete_len = strnlen(hdev->dev_name, sizeof(hdev->dev_name)); if (complete_len && complete_len <= HCI_MAX_SHORT_NAME_LENGTH) return eir_append_name(ptr, ad_len, EIR_NAME_COMPLETE, hdev->dev_name, complete_len + 1); /* use short name if present */ short_len = strnlen(hdev->short_name, sizeof(hdev->short_name)); if (short_len) return eir_append_name(ptr, ad_len, EIR_NAME_SHORT, hdev->short_name, short_len == HCI_MAX_SHORT_NAME_LENGTH ? short_len : short_len + 1); /* use shortened full name if present, we already know that name * is longer then HCI_MAX_SHORT_NAME_LENGTH */ if (complete_len) return eir_append_name(ptr, ad_len, EIR_NAME_SHORT, hdev->dev_name, HCI_MAX_SHORT_NAME_LENGTH); return ad_len; } u8 eir_append_appearance(struct hci_dev *hdev, u8 *ptr, u8 ad_len) { return eir_append_le16(ptr, ad_len, EIR_APPEARANCE, hdev->appearance); } u8 eir_append_service_data(u8 *eir, u16 eir_len, u16 uuid, u8 *data, u8 data_len) { eir[eir_len++] = sizeof(u8) + sizeof(uuid) + data_len; eir[eir_len++] = EIR_SERVICE_DATA; put_unaligned_le16(uuid, &eir[eir_len]); eir_len += sizeof(uuid); memcpy(&eir[eir_len], data, data_len); eir_len += data_len; return eir_len; } static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len) { u8 *ptr = data, *uuids_start = NULL; struct bt_uuid *uuid; if (len < 4) return ptr; list_for_each_entry(uuid, &hdev->uuids, list) { u16 uuid16; if (uuid->size != 16) continue; uuid16 = get_unaligned_le16(&uuid->uuid[12]); if (uuid16 < 0x1100) continue; if (uuid16 == PNP_INFO_SVCLASS_ID) continue; if (!uuids_start) { uuids_start = ptr; uuids_start[0] = 1; uuids_start[1] = EIR_UUID16_ALL; ptr += 2; } /* Stop if not enough space to put next UUID */ if ((ptr - data) + sizeof(u16) > len) { uuids_start[1] = EIR_UUID16_SOME; break; } *ptr++ = (uuid16 & 0x00ff); *ptr++ = (uuid16 & 0xff00) >> 8; uuids_start[0] += sizeof(uuid16); } return ptr; } static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len) { u8 *ptr = data, *uuids_start = NULL; struct bt_uuid *uuid; if (len < 6) return ptr; list_for_each_entry(uuid, &hdev->uuids, list) { if (uuid->size != 32) continue; if (!uuids_start) { uuids_start = ptr; uuids_start[0] = 1; uuids_start[1] = EIR_UUID32_ALL; ptr += 2; } /* Stop if not enough space to put next UUID */ if ((ptr - data) + sizeof(u32) > len) { uuids_start[1] = EIR_UUID32_SOME; break; } memcpy(ptr, &uuid->uuid[12], sizeof(u32)); ptr += sizeof(u32); uuids_start[0] += sizeof(u32); } return ptr; } static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len) { u8 *ptr = data, *uuids_start = NULL; struct bt_uuid *uuid; if (len < 18) return ptr; list_for_each_entry(uuid, &hdev->uuids, list) { if (uuid->size != 128) continue; if (!uuids_start) { uuids_start = ptr; uuids_start[0] = 1; uuids_start[1] = EIR_UUID128_ALL; ptr += 2; } /* Stop if not enough space to put next UUID */ if ((ptr - data) + 16 > len) { uuids_start[1] = EIR_UUID128_SOME; break; } memcpy(ptr, uuid->uuid, 16); ptr += 16; uuids_start[0] += 16; } return ptr; } void eir_create(struct hci_dev *hdev, u8 *data) { u8 *ptr = data; size_t name_len; name_len = strnlen(hdev->dev_name, sizeof(hdev->dev_name)); if (name_len > 0) { /* EIR Data type */ if (name_len > 48) { name_len = 48; ptr[1] = EIR_NAME_SHORT; } else { ptr[1] = EIR_NAME_COMPLETE; } /* EIR Data length */ ptr[0] = name_len + 1; memcpy(ptr + 2, hdev->dev_name, name_len); ptr += (name_len + 2); } if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) { ptr[0] = 2; ptr[1] = EIR_TX_POWER; ptr[2] = (u8)hdev->inq_tx_power; ptr += 3; } if (hdev->devid_source > 0) { ptr[0] = 9; ptr[1] = EIR_DEVICE_ID; put_unaligned_le16(hdev->devid_source, ptr + 2); put_unaligned_le16(hdev->devid_vendor, ptr + 4); put_unaligned_le16(hdev->devid_product, ptr + 6); put_unaligned_le16(hdev->devid_version, ptr + 8); ptr += 10; } ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data)); ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data)); ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data)); } u8 eir_create_per_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr) { struct adv_info *adv = NULL; u8 ad_len = 0; /* Return 0 when the current instance identifier is invalid. */ if (instance) { adv = hci_find_adv_instance(hdev, instance); if (!adv) return 0; } if (adv) { memcpy(ptr, adv->per_adv_data, adv->per_adv_data_len); ad_len += adv->per_adv_data_len; ptr += adv->per_adv_data_len; } return ad_len; } u8 eir_create_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr) { struct adv_info *adv = NULL; u8 ad_len = 0, flags = 0; u32 instance_flags; /* Return 0 when the current instance identifier is invalid. */ if (instance) { adv = hci_find_adv_instance(hdev, instance); if (!adv) return 0; } instance_flags = hci_adv_instance_flags(hdev, instance); /* If instance already has the flags set skip adding it once * again. */ if (adv && eir_get_data(adv->adv_data, adv->adv_data_len, EIR_FLAGS, NULL)) goto skip_flags; /* The Add Advertising command allows userspace to set both the general * and limited discoverable flags. */ if (instance_flags & MGMT_ADV_FLAG_DISCOV) flags |= LE_AD_GENERAL; if (instance_flags & MGMT_ADV_FLAG_LIMITED_DISCOV) flags |= LE_AD_LIMITED; if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) flags |= LE_AD_NO_BREDR; if (flags || (instance_flags & MGMT_ADV_FLAG_MANAGED_FLAGS)) { /* If a discovery flag wasn't provided, simply use the global * settings. */ if (!flags) flags |= mgmt_get_adv_discov_flags(hdev); /* If flags would still be empty, then there is no need to * include the "Flags" AD field". */ if (flags) { ptr[0] = 0x02; ptr[1] = EIR_FLAGS; ptr[2] = flags; ad_len += 3; ptr += 3; } } skip_flags: if (adv) { memcpy(ptr, adv->adv_data, adv->adv_data_len); ad_len += adv->adv_data_len; ptr += adv->adv_data_len; } if (instance_flags & MGMT_ADV_FLAG_TX_POWER) { s8 adv_tx_power; if (ext_adv_capable(hdev)) { if (adv) adv_tx_power = adv->tx_power; else adv_tx_power = hdev->adv_tx_power; } else { adv_tx_power = hdev->adv_tx_power; } /* Provide Tx Power only if we can provide a valid value for it */ if (adv_tx_power != HCI_TX_POWER_INVALID) { ptr[0] = 0x02; ptr[1] = EIR_TX_POWER; ptr[2] = (u8)adv_tx_power; ad_len += 3; ptr += 3; } } return ad_len; } static u8 create_default_scan_rsp(struct hci_dev *hdev, u8 *ptr) { u8 scan_rsp_len = 0; if (hdev->appearance) scan_rsp_len = eir_append_appearance(hdev, ptr, scan_rsp_len); return eir_append_local_name(hdev, ptr, scan_rsp_len); } u8 eir_create_scan_rsp(struct hci_dev *hdev, u8 instance, u8 *ptr) { struct adv_info *adv; u8 scan_rsp_len = 0; if (!instance) return create_default_scan_rsp(hdev, ptr); adv = hci_find_adv_instance(hdev, instance); if (!adv) return 0; if ((adv->flags & MGMT_ADV_FLAG_APPEARANCE) && hdev->appearance) scan_rsp_len = eir_append_appearance(hdev, ptr, scan_rsp_len); memcpy(&ptr[scan_rsp_len], adv->scan_rsp_data, adv->scan_rsp_len); scan_rsp_len += adv->scan_rsp_len; if (adv->flags & MGMT_ADV_FLAG_LOCAL_NAME) scan_rsp_len = eir_append_local_name(hdev, ptr, scan_rsp_len); return scan_rsp_len; } void *eir_get_service_data(u8 *eir, size_t eir_len, u16 uuid, size_t *len) { while ((eir = eir_get_data(eir, eir_len, EIR_SERVICE_DATA, len))) { u16 value = get_unaligned_le16(eir); if (uuid == value) { if (len) *len -= 2; return &eir[2]; } eir += *len; eir_len -= *len; } return NULL; }
linux-master
net/bluetooth/eir.c
/* BlueZ - Bluetooth protocol stack for Linux Copyright (C) 2000-2001 Qualcomm Incorporated Written 2000,2001 by Maxim Krasnyansky <[email protected]> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License version 2 as published by the Free Software Foundation; THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS SOFTWARE IS DISCLAIMED. */ /* Bluetooth address family and sockets. */ #include <linux/module.h> #include <linux/debugfs.h> #include <linux/stringify.h> #include <linux/sched/signal.h> #include <asm/ioctls.h> #include <net/bluetooth/bluetooth.h> #include <linux/proc_fs.h> #include "leds.h" #include "selftest.h" /* Bluetooth sockets */ #define BT_MAX_PROTO (BTPROTO_LAST + 1) static const struct net_proto_family *bt_proto[BT_MAX_PROTO]; static DEFINE_RWLOCK(bt_proto_lock); static struct lock_class_key bt_lock_key[BT_MAX_PROTO]; static const char *const bt_key_strings[BT_MAX_PROTO] = { "sk_lock-AF_BLUETOOTH-BTPROTO_L2CAP", "sk_lock-AF_BLUETOOTH-BTPROTO_HCI", "sk_lock-AF_BLUETOOTH-BTPROTO_SCO", "sk_lock-AF_BLUETOOTH-BTPROTO_RFCOMM", "sk_lock-AF_BLUETOOTH-BTPROTO_BNEP", "sk_lock-AF_BLUETOOTH-BTPROTO_CMTP", "sk_lock-AF_BLUETOOTH-BTPROTO_HIDP", "sk_lock-AF_BLUETOOTH-BTPROTO_AVDTP", "sk_lock-AF_BLUETOOTH-BTPROTO_ISO", }; static struct lock_class_key bt_slock_key[BT_MAX_PROTO]; static const char *const bt_slock_key_strings[BT_MAX_PROTO] = { "slock-AF_BLUETOOTH-BTPROTO_L2CAP", "slock-AF_BLUETOOTH-BTPROTO_HCI", "slock-AF_BLUETOOTH-BTPROTO_SCO", "slock-AF_BLUETOOTH-BTPROTO_RFCOMM", "slock-AF_BLUETOOTH-BTPROTO_BNEP", "slock-AF_BLUETOOTH-BTPROTO_CMTP", "slock-AF_BLUETOOTH-BTPROTO_HIDP", "slock-AF_BLUETOOTH-BTPROTO_AVDTP", "slock-AF_BLUETOOTH-BTPROTO_ISO", }; void bt_sock_reclassify_lock(struct sock *sk, int proto) { BUG_ON(!sk); BUG_ON(!sock_allow_reclassification(sk)); sock_lock_init_class_and_name(sk, bt_slock_key_strings[proto], &bt_slock_key[proto], bt_key_strings[proto], &bt_lock_key[proto]); } EXPORT_SYMBOL(bt_sock_reclassify_lock); int bt_sock_register(int proto, const struct net_proto_family *ops) { int err = 0; if (proto < 0 || proto >= BT_MAX_PROTO) return -EINVAL; write_lock(&bt_proto_lock); if (bt_proto[proto]) err = -EEXIST; else bt_proto[proto] = ops; write_unlock(&bt_proto_lock); return err; } EXPORT_SYMBOL(bt_sock_register); void bt_sock_unregister(int proto) { if (proto < 0 || proto >= BT_MAX_PROTO) return; write_lock(&bt_proto_lock); bt_proto[proto] = NULL; write_unlock(&bt_proto_lock); } EXPORT_SYMBOL(bt_sock_unregister); static int bt_sock_create(struct net *net, struct socket *sock, int proto, int kern) { int err; if (net != &init_net) return -EAFNOSUPPORT; if (proto < 0 || proto >= BT_MAX_PROTO) return -EINVAL; if (!bt_proto[proto]) request_module("bt-proto-%d", proto); err = -EPROTONOSUPPORT; read_lock(&bt_proto_lock); if (bt_proto[proto] && try_module_get(bt_proto[proto]->owner)) { err = bt_proto[proto]->create(net, sock, proto, kern); if (!err) bt_sock_reclassify_lock(sock->sk, proto); module_put(bt_proto[proto]->owner); } read_unlock(&bt_proto_lock); return err; } struct sock *bt_sock_alloc(struct net *net, struct socket *sock, struct proto *prot, int proto, gfp_t prio, int kern) { struct sock *sk; sk = sk_alloc(net, PF_BLUETOOTH, prio, prot, kern); if (!sk) return NULL; sock_init_data(sock, sk); INIT_LIST_HEAD(&bt_sk(sk)->accept_q); sock_reset_flag(sk, SOCK_ZAPPED); sk->sk_protocol = proto; sk->sk_state = BT_OPEN; /* Init peer information so it can be properly monitored */ if (!kern) { spin_lock(&sk->sk_peer_lock); sk->sk_peer_pid = get_pid(task_tgid(current)); sk->sk_peer_cred = get_current_cred(); spin_unlock(&sk->sk_peer_lock); } return sk; } EXPORT_SYMBOL(bt_sock_alloc); void bt_sock_link(struct bt_sock_list *l, struct sock *sk) { write_lock(&l->lock); sk_add_node(sk, &l->head); write_unlock(&l->lock); } EXPORT_SYMBOL(bt_sock_link); void bt_sock_unlink(struct bt_sock_list *l, struct sock *sk) { write_lock(&l->lock); sk_del_node_init(sk); write_unlock(&l->lock); } EXPORT_SYMBOL(bt_sock_unlink); void bt_accept_enqueue(struct sock *parent, struct sock *sk, bool bh) { const struct cred *old_cred; struct pid *old_pid; BT_DBG("parent %p, sk %p", parent, sk); sock_hold(sk); if (bh) bh_lock_sock_nested(sk); else lock_sock_nested(sk, SINGLE_DEPTH_NESTING); list_add_tail(&bt_sk(sk)->accept_q, &bt_sk(parent)->accept_q); bt_sk(sk)->parent = parent; /* Copy credentials from parent since for incoming connections the * socket is allocated by the kernel. */ spin_lock(&sk->sk_peer_lock); old_pid = sk->sk_peer_pid; old_cred = sk->sk_peer_cred; sk->sk_peer_pid = get_pid(parent->sk_peer_pid); sk->sk_peer_cred = get_cred(parent->sk_peer_cred); spin_unlock(&sk->sk_peer_lock); put_pid(old_pid); put_cred(old_cred); if (bh) bh_unlock_sock(sk); else release_sock(sk); sk_acceptq_added(parent); } EXPORT_SYMBOL(bt_accept_enqueue); /* Calling function must hold the sk lock. * bt_sk(sk)->parent must be non-NULL meaning sk is in the parent list. */ void bt_accept_unlink(struct sock *sk) { BT_DBG("sk %p state %d", sk, sk->sk_state); list_del_init(&bt_sk(sk)->accept_q); sk_acceptq_removed(bt_sk(sk)->parent); bt_sk(sk)->parent = NULL; sock_put(sk); } EXPORT_SYMBOL(bt_accept_unlink); struct sock *bt_accept_dequeue(struct sock *parent, struct socket *newsock) { struct bt_sock *s, *n; struct sock *sk; BT_DBG("parent %p", parent); restart: list_for_each_entry_safe(s, n, &bt_sk(parent)->accept_q, accept_q) { sk = (struct sock *)s; /* Prevent early freeing of sk due to unlink and sock_kill */ sock_hold(sk); lock_sock(sk); /* Check sk has not already been unlinked via * bt_accept_unlink() due to serialisation caused by sk locking */ if (!bt_sk(sk)->parent) { BT_DBG("sk %p, already unlinked", sk); release_sock(sk); sock_put(sk); /* Restart the loop as sk is no longer in the list * and also avoid a potential infinite loop because * list_for_each_entry_safe() is not thread safe. */ goto restart; } /* sk is safely in the parent list so reduce reference count */ sock_put(sk); /* FIXME: Is this check still needed */ if (sk->sk_state == BT_CLOSED) { bt_accept_unlink(sk); release_sock(sk); continue; } if (sk->sk_state == BT_CONNECTED || !newsock || test_bit(BT_SK_DEFER_SETUP, &bt_sk(parent)->flags)) { bt_accept_unlink(sk); if (newsock) sock_graft(sk, newsock); release_sock(sk); return sk; } release_sock(sk); } return NULL; } EXPORT_SYMBOL(bt_accept_dequeue); int bt_sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, int flags) { struct sock *sk = sock->sk; struct sk_buff *skb; size_t copied; size_t skblen; int err; BT_DBG("sock %p sk %p len %zu", sock, sk, len); if (flags & MSG_OOB) return -EOPNOTSUPP; skb = skb_recv_datagram(sk, flags, &err); if (!skb) { if (sk->sk_shutdown & RCV_SHUTDOWN) return 0; return err; } skblen = skb->len; copied = skb->len; if (len < copied) { msg->msg_flags |= MSG_TRUNC; copied = len; } skb_reset_transport_header(skb); err = skb_copy_datagram_msg(skb, 0, msg, copied); if (err == 0) { sock_recv_cmsgs(msg, sk, skb); if (msg->msg_name && bt_sk(sk)->skb_msg_name) bt_sk(sk)->skb_msg_name(skb, msg->msg_name, &msg->msg_namelen); if (test_bit(BT_SK_PKT_STATUS, &bt_sk(sk)->flags)) { u8 pkt_status = hci_skb_pkt_status(skb); put_cmsg(msg, SOL_BLUETOOTH, BT_SCM_PKT_STATUS, sizeof(pkt_status), &pkt_status); } } skb_free_datagram(sk, skb); if (flags & MSG_TRUNC) copied = skblen; return err ? : copied; } EXPORT_SYMBOL(bt_sock_recvmsg); static long bt_sock_data_wait(struct sock *sk, long timeo) { DECLARE_WAITQUEUE(wait, current); add_wait_queue(sk_sleep(sk), &wait); for (;;) { set_current_state(TASK_INTERRUPTIBLE); if (!skb_queue_empty(&sk->sk_receive_queue)) break; if (sk->sk_err || (sk->sk_shutdown & RCV_SHUTDOWN)) break; if (signal_pending(current) || !timeo) break; sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk); release_sock(sk); timeo = schedule_timeout(timeo); lock_sock(sk); sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk); } __set_current_state(TASK_RUNNING); remove_wait_queue(sk_sleep(sk), &wait); return timeo; } int bt_sock_stream_recvmsg(struct socket *sock, struct msghdr *msg, size_t size, int flags) { struct sock *sk = sock->sk; int err = 0; size_t target, copied = 0; long timeo; if (flags & MSG_OOB) return -EOPNOTSUPP; BT_DBG("sk %p size %zu", sk, size); lock_sock(sk); target = sock_rcvlowat(sk, flags & MSG_WAITALL, size); timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); do { struct sk_buff *skb; int chunk; skb = skb_dequeue(&sk->sk_receive_queue); if (!skb) { if (copied >= target) break; err = sock_error(sk); if (err) break; if (sk->sk_shutdown & RCV_SHUTDOWN) break; err = -EAGAIN; if (!timeo) break; timeo = bt_sock_data_wait(sk, timeo); if (signal_pending(current)) { err = sock_intr_errno(timeo); goto out; } continue; } chunk = min_t(unsigned int, skb->len, size); if (skb_copy_datagram_msg(skb, 0, msg, chunk)) { skb_queue_head(&sk->sk_receive_queue, skb); if (!copied) copied = -EFAULT; break; } copied += chunk; size -= chunk; sock_recv_cmsgs(msg, sk, skb); if (!(flags & MSG_PEEK)) { int skb_len = skb_headlen(skb); if (chunk <= skb_len) { __skb_pull(skb, chunk); } else { struct sk_buff *frag; __skb_pull(skb, skb_len); chunk -= skb_len; skb_walk_frags(skb, frag) { if (chunk <= frag->len) { /* Pulling partial data */ skb->len -= chunk; skb->data_len -= chunk; __skb_pull(frag, chunk); break; } else if (frag->len) { /* Pulling all frag data */ chunk -= frag->len; skb->len -= frag->len; skb->data_len -= frag->len; __skb_pull(frag, frag->len); } } } if (skb->len) { skb_queue_head(&sk->sk_receive_queue, skb); break; } kfree_skb(skb); } else { /* put message back and return */ skb_queue_head(&sk->sk_receive_queue, skb); break; } } while (size); out: release_sock(sk); return copied ? : err; } EXPORT_SYMBOL(bt_sock_stream_recvmsg); static inline __poll_t bt_accept_poll(struct sock *parent) { struct bt_sock *s, *n; struct sock *sk; list_for_each_entry_safe(s, n, &bt_sk(parent)->accept_q, accept_q) { sk = (struct sock *)s; if (sk->sk_state == BT_CONNECTED || (test_bit(BT_SK_DEFER_SETUP, &bt_sk(parent)->flags) && sk->sk_state == BT_CONNECT2)) return EPOLLIN | EPOLLRDNORM; } return 0; } __poll_t bt_sock_poll(struct file *file, struct socket *sock, poll_table *wait) { struct sock *sk = sock->sk; __poll_t mask = 0; poll_wait(file, sk_sleep(sk), wait); if (sk->sk_state == BT_LISTEN) return bt_accept_poll(sk); if (sk->sk_err || !skb_queue_empty_lockless(&sk->sk_error_queue)) mask |= EPOLLERR | (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? EPOLLPRI : 0); if (sk->sk_shutdown & RCV_SHUTDOWN) mask |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM; if (sk->sk_shutdown == SHUTDOWN_MASK) mask |= EPOLLHUP; if (!skb_queue_empty_lockless(&sk->sk_receive_queue)) mask |= EPOLLIN | EPOLLRDNORM; if (sk->sk_state == BT_CLOSED) mask |= EPOLLHUP; if (sk->sk_state == BT_CONNECT || sk->sk_state == BT_CONNECT2 || sk->sk_state == BT_CONFIG) return mask; if (!test_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags) && sock_writeable(sk)) mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND; else sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk); return mask; } EXPORT_SYMBOL(bt_sock_poll); int bt_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) { struct sock *sk = sock->sk; struct sk_buff *skb; long amount; int err; BT_DBG("sk %p cmd %x arg %lx", sk, cmd, arg); switch (cmd) { case TIOCOUTQ: if (sk->sk_state == BT_LISTEN) return -EINVAL; amount = sk->sk_sndbuf - sk_wmem_alloc_get(sk); if (amount < 0) amount = 0; err = put_user(amount, (int __user *)arg); break; case TIOCINQ: if (sk->sk_state == BT_LISTEN) return -EINVAL; lock_sock(sk); skb = skb_peek(&sk->sk_receive_queue); amount = skb ? skb->len : 0; release_sock(sk); err = put_user(amount, (int __user *)arg); break; default: err = -ENOIOCTLCMD; break; } return err; } EXPORT_SYMBOL(bt_sock_ioctl); /* This function expects the sk lock to be held when called */ int bt_sock_wait_state(struct sock *sk, int state, unsigned long timeo) { DECLARE_WAITQUEUE(wait, current); int err = 0; BT_DBG("sk %p", sk); add_wait_queue(sk_sleep(sk), &wait); set_current_state(TASK_INTERRUPTIBLE); while (sk->sk_state != state) { if (!timeo) { err = -EINPROGRESS; break; } if (signal_pending(current)) { err = sock_intr_errno(timeo); break; } release_sock(sk); timeo = schedule_timeout(timeo); lock_sock(sk); set_current_state(TASK_INTERRUPTIBLE); err = sock_error(sk); if (err) break; } __set_current_state(TASK_RUNNING); remove_wait_queue(sk_sleep(sk), &wait); return err; } EXPORT_SYMBOL(bt_sock_wait_state); /* This function expects the sk lock to be held when called */ int bt_sock_wait_ready(struct sock *sk, unsigned int msg_flags) { DECLARE_WAITQUEUE(wait, current); unsigned long timeo; int err = 0; BT_DBG("sk %p", sk); timeo = sock_sndtimeo(sk, !!(msg_flags & MSG_DONTWAIT)); add_wait_queue(sk_sleep(sk), &wait); set_current_state(TASK_INTERRUPTIBLE); while (test_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags)) { if (!timeo) { err = -EAGAIN; break; } if (signal_pending(current)) { err = sock_intr_errno(timeo); break; } release_sock(sk); timeo = schedule_timeout(timeo); lock_sock(sk); set_current_state(TASK_INTERRUPTIBLE); err = sock_error(sk); if (err) break; } __set_current_state(TASK_RUNNING); remove_wait_queue(sk_sleep(sk), &wait); return err; } EXPORT_SYMBOL(bt_sock_wait_ready); #ifdef CONFIG_PROC_FS static void *bt_seq_start(struct seq_file *seq, loff_t *pos) __acquires(seq->private->l->lock) { struct bt_sock_list *l = pde_data(file_inode(seq->file)); read_lock(&l->lock); return seq_hlist_start_head(&l->head, *pos); } static void *bt_seq_next(struct seq_file *seq, void *v, loff_t *pos) { struct bt_sock_list *l = pde_data(file_inode(seq->file)); return seq_hlist_next(v, &l->head, pos); } static void bt_seq_stop(struct seq_file *seq, void *v) __releases(seq->private->l->lock) { struct bt_sock_list *l = pde_data(file_inode(seq->file)); read_unlock(&l->lock); } static int bt_seq_show(struct seq_file *seq, void *v) { struct bt_sock_list *l = pde_data(file_inode(seq->file)); if (v == SEQ_START_TOKEN) { seq_puts(seq, "sk RefCnt Rmem Wmem User Inode Parent"); if (l->custom_seq_show) { seq_putc(seq, ' '); l->custom_seq_show(seq, v); } seq_putc(seq, '\n'); } else { struct sock *sk = sk_entry(v); struct bt_sock *bt = bt_sk(sk); seq_printf(seq, "%pK %-6d %-6u %-6u %-6u %-6lu %-6lu", sk, refcount_read(&sk->sk_refcnt), sk_rmem_alloc_get(sk), sk_wmem_alloc_get(sk), from_kuid(seq_user_ns(seq), sock_i_uid(sk)), sock_i_ino(sk), bt->parent ? sock_i_ino(bt->parent) : 0LU); if (l->custom_seq_show) { seq_putc(seq, ' '); l->custom_seq_show(seq, v); } seq_putc(seq, '\n'); } return 0; } static const struct seq_operations bt_seq_ops = { .start = bt_seq_start, .next = bt_seq_next, .stop = bt_seq_stop, .show = bt_seq_show, }; int bt_procfs_init(struct net *net, const char *name, struct bt_sock_list *sk_list, int (*seq_show)(struct seq_file *, void *)) { sk_list->custom_seq_show = seq_show; if (!proc_create_seq_data(name, 0, net->proc_net, &bt_seq_ops, sk_list)) return -ENOMEM; return 0; } void bt_procfs_cleanup(struct net *net, const char *name) { remove_proc_entry(name, net->proc_net); } #else int bt_procfs_init(struct net *net, const char *name, struct bt_sock_list *sk_list, int (*seq_show)(struct seq_file *, void *)) { return 0; } void bt_procfs_cleanup(struct net *net, const char *name) { } #endif EXPORT_SYMBOL(bt_procfs_init); EXPORT_SYMBOL(bt_procfs_cleanup); static const struct net_proto_family bt_sock_family_ops = { .owner = THIS_MODULE, .family = PF_BLUETOOTH, .create = bt_sock_create, }; struct dentry *bt_debugfs; EXPORT_SYMBOL_GPL(bt_debugfs); #define VERSION __stringify(BT_SUBSYS_VERSION) "." \ __stringify(BT_SUBSYS_REVISION) static int __init bt_init(void) { int err; sock_skb_cb_check_size(sizeof(struct bt_skb_cb)); BT_INFO("Core ver %s", VERSION); err = bt_selftest(); if (err < 0) return err; bt_debugfs = debugfs_create_dir("bluetooth", NULL); bt_leds_init(); err = bt_sysfs_init(); if (err < 0) goto cleanup_led; err = sock_register(&bt_sock_family_ops); if (err) goto cleanup_sysfs; BT_INFO("HCI device and connection manager initialized"); err = hci_sock_init(); if (err) goto unregister_socket; err = l2cap_init(); if (err) goto cleanup_socket; err = sco_init(); if (err) goto cleanup_cap; err = mgmt_init(); if (err) goto cleanup_sco; return 0; cleanup_sco: sco_exit(); cleanup_cap: l2cap_exit(); cleanup_socket: hci_sock_cleanup(); unregister_socket: sock_unregister(PF_BLUETOOTH); cleanup_sysfs: bt_sysfs_cleanup(); cleanup_led: bt_leds_cleanup(); return err; } static void __exit bt_exit(void) { mgmt_exit(); sco_exit(); l2cap_exit(); hci_sock_cleanup(); sock_unregister(PF_BLUETOOTH); bt_sysfs_cleanup(); bt_leds_cleanup(); debugfs_remove_recursive(bt_debugfs); } subsys_initcall(bt_init); module_exit(bt_exit); MODULE_AUTHOR("Marcel Holtmann <[email protected]>"); MODULE_DESCRIPTION("Bluetooth Core ver " VERSION); MODULE_VERSION(VERSION); MODULE_LICENSE("GPL"); MODULE_ALIAS_NETPROTO(PF_BLUETOOTH);
linux-master
net/bluetooth/af_bluetooth.c
/* BlueZ - Bluetooth protocol stack for Linux Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved. Copyright 2023 NXP Written 2000,2001 by Maxim Krasnyansky <[email protected]> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License version 2 as published by the Free Software Foundation; THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS SOFTWARE IS DISCLAIMED. */ /* Bluetooth HCI connection handling. */ #include <linux/export.h> #include <linux/debugfs.h> #include <net/bluetooth/bluetooth.h> #include <net/bluetooth/hci_core.h> #include <net/bluetooth/l2cap.h> #include <net/bluetooth/iso.h> #include <net/bluetooth/mgmt.h> #include "hci_request.h" #include "smp.h" #include "a2mp.h" #include "eir.h" struct sco_param { u16 pkt_type; u16 max_latency; u8 retrans_effort; }; struct conn_handle_t { struct hci_conn *conn; __u16 handle; }; static const struct sco_param esco_param_cvsd[] = { { EDR_ESCO_MASK & ~ESCO_2EV3, 0x000a, 0x01 }, /* S3 */ { EDR_ESCO_MASK & ~ESCO_2EV3, 0x0007, 0x01 }, /* S2 */ { EDR_ESCO_MASK | ESCO_EV3, 0x0007, 0x01 }, /* S1 */ { EDR_ESCO_MASK | ESCO_HV3, 0xffff, 0x01 }, /* D1 */ { EDR_ESCO_MASK | ESCO_HV1, 0xffff, 0x01 }, /* D0 */ }; static const struct sco_param sco_param_cvsd[] = { { EDR_ESCO_MASK | ESCO_HV3, 0xffff, 0xff }, /* D1 */ { EDR_ESCO_MASK | ESCO_HV1, 0xffff, 0xff }, /* D0 */ }; static const struct sco_param esco_param_msbc[] = { { EDR_ESCO_MASK & ~ESCO_2EV3, 0x000d, 0x02 }, /* T2 */ { EDR_ESCO_MASK | ESCO_EV3, 0x0008, 0x02 }, /* T1 */ }; /* This function requires the caller holds hdev->lock */ static void hci_connect_le_scan_cleanup(struct hci_conn *conn, u8 status) { struct hci_conn_params *params; struct hci_dev *hdev = conn->hdev; struct smp_irk *irk; bdaddr_t *bdaddr; u8 bdaddr_type; bdaddr = &conn->dst; bdaddr_type = conn->dst_type; /* Check if we need to convert to identity address */ irk = hci_get_irk(hdev, bdaddr, bdaddr_type); if (irk) { bdaddr = &irk->bdaddr; bdaddr_type = irk->addr_type; } params = hci_pend_le_action_lookup(&hdev->pend_le_conns, bdaddr, bdaddr_type); if (!params) return; if (params->conn) { hci_conn_drop(params->conn); hci_conn_put(params->conn); params->conn = NULL; } if (!params->explicit_connect) return; /* If the status indicates successful cancellation of * the attempt (i.e. Unknown Connection Id) there's no point of * notifying failure since we'll go back to keep trying to * connect. The only exception is explicit connect requests * where a timeout + cancel does indicate an actual failure. */ if (status && status != HCI_ERROR_UNKNOWN_CONN_ID) mgmt_connect_failed(hdev, &conn->dst, conn->type, conn->dst_type, status); /* The connection attempt was doing scan for new RPA, and is * in scan phase. If params are not associated with any other * autoconnect action, remove them completely. If they are, just unmark * them as waiting for connection, by clearing explicit_connect field. */ params->explicit_connect = false; hci_pend_le_list_del_init(params); switch (params->auto_connect) { case HCI_AUTO_CONN_EXPLICIT: hci_conn_params_del(hdev, bdaddr, bdaddr_type); /* return instead of break to avoid duplicate scan update */ return; case HCI_AUTO_CONN_DIRECT: case HCI_AUTO_CONN_ALWAYS: hci_pend_le_list_add(params, &hdev->pend_le_conns); break; case HCI_AUTO_CONN_REPORT: hci_pend_le_list_add(params, &hdev->pend_le_reports); break; default: break; } hci_update_passive_scan(hdev); } static void hci_conn_cleanup(struct hci_conn *conn) { struct hci_dev *hdev = conn->hdev; if (test_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags)) hci_conn_params_del(conn->hdev, &conn->dst, conn->dst_type); if (test_and_clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags)) hci_remove_link_key(hdev, &conn->dst); hci_chan_list_flush(conn); hci_conn_hash_del(hdev, conn); if (conn->cleanup) conn->cleanup(conn); if (conn->type == SCO_LINK || conn->type == ESCO_LINK) { switch (conn->setting & SCO_AIRMODE_MASK) { case SCO_AIRMODE_CVSD: case SCO_AIRMODE_TRANSP: if (hdev->notify) hdev->notify(hdev, HCI_NOTIFY_DISABLE_SCO); break; } } else { if (hdev->notify) hdev->notify(hdev, HCI_NOTIFY_CONN_DEL); } hci_conn_del_sysfs(conn); debugfs_remove_recursive(conn->debugfs); hci_dev_put(hdev); hci_conn_put(conn); } static void hci_acl_create_connection(struct hci_conn *conn) { struct hci_dev *hdev = conn->hdev; struct inquiry_entry *ie; struct hci_cp_create_conn cp; BT_DBG("hcon %p", conn); /* Many controllers disallow HCI Create Connection while it is doing * HCI Inquiry. So we cancel the Inquiry first before issuing HCI Create * Connection. This may cause the MGMT discovering state to become false * without user space's request but it is okay since the MGMT Discovery * APIs do not promise that discovery should be done forever. Instead, * the user space monitors the status of MGMT discovering and it may * request for discovery again when this flag becomes false. */ if (test_bit(HCI_INQUIRY, &hdev->flags)) { /* Put this connection to "pending" state so that it will be * executed after the inquiry cancel command complete event. */ conn->state = BT_CONNECT2; hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL); return; } conn->state = BT_CONNECT; conn->out = true; conn->role = HCI_ROLE_MASTER; conn->attempt++; conn->link_policy = hdev->link_policy; memset(&cp, 0, sizeof(cp)); bacpy(&cp.bdaddr, &conn->dst); cp.pscan_rep_mode = 0x02; ie = hci_inquiry_cache_lookup(hdev, &conn->dst); if (ie) { if (inquiry_entry_age(ie) <= INQUIRY_ENTRY_AGE_MAX) { cp.pscan_rep_mode = ie->data.pscan_rep_mode; cp.pscan_mode = ie->data.pscan_mode; cp.clock_offset = ie->data.clock_offset | cpu_to_le16(0x8000); } memcpy(conn->dev_class, ie->data.dev_class, 3); } cp.pkt_type = cpu_to_le16(conn->pkt_type); if (lmp_rswitch_capable(hdev) && !(hdev->link_mode & HCI_LM_MASTER)) cp.role_switch = 0x01; else cp.role_switch = 0x00; hci_send_cmd(hdev, HCI_OP_CREATE_CONN, sizeof(cp), &cp); } int hci_disconnect(struct hci_conn *conn, __u8 reason) { BT_DBG("hcon %p", conn); /* When we are central of an established connection and it enters * the disconnect timeout, then go ahead and try to read the * current clock offset. Processing of the result is done * within the event handling and hci_clock_offset_evt function. */ if (conn->type == ACL_LINK && conn->role == HCI_ROLE_MASTER && (conn->state == BT_CONNECTED || conn->state == BT_CONFIG)) { struct hci_dev *hdev = conn->hdev; struct hci_cp_read_clock_offset clkoff_cp; clkoff_cp.handle = cpu_to_le16(conn->handle); hci_send_cmd(hdev, HCI_OP_READ_CLOCK_OFFSET, sizeof(clkoff_cp), &clkoff_cp); } return hci_abort_conn(conn, reason); } static void hci_add_sco(struct hci_conn *conn, __u16 handle) { struct hci_dev *hdev = conn->hdev; struct hci_cp_add_sco cp; BT_DBG("hcon %p", conn); conn->state = BT_CONNECT; conn->out = true; conn->attempt++; cp.handle = cpu_to_le16(handle); cp.pkt_type = cpu_to_le16(conn->pkt_type); hci_send_cmd(hdev, HCI_OP_ADD_SCO, sizeof(cp), &cp); } static bool find_next_esco_param(struct hci_conn *conn, const struct sco_param *esco_param, int size) { if (!conn->parent) return false; for (; conn->attempt <= size; conn->attempt++) { if (lmp_esco_2m_capable(conn->parent) || (esco_param[conn->attempt - 1].pkt_type & ESCO_2EV3)) break; BT_DBG("hcon %p skipped attempt %d, eSCO 2M not supported", conn, conn->attempt); } return conn->attempt <= size; } static int configure_datapath_sync(struct hci_dev *hdev, struct bt_codec *codec) { int err; __u8 vnd_len, *vnd_data = NULL; struct hci_op_configure_data_path *cmd = NULL; err = hdev->get_codec_config_data(hdev, ESCO_LINK, codec, &vnd_len, &vnd_data); if (err < 0) goto error; cmd = kzalloc(sizeof(*cmd) + vnd_len, GFP_KERNEL); if (!cmd) { err = -ENOMEM; goto error; } err = hdev->get_data_path_id(hdev, &cmd->data_path_id); if (err < 0) goto error; cmd->vnd_len = vnd_len; memcpy(cmd->vnd_data, vnd_data, vnd_len); cmd->direction = 0x00; __hci_cmd_sync_status(hdev, HCI_CONFIGURE_DATA_PATH, sizeof(*cmd) + vnd_len, cmd, HCI_CMD_TIMEOUT); cmd->direction = 0x01; err = __hci_cmd_sync_status(hdev, HCI_CONFIGURE_DATA_PATH, sizeof(*cmd) + vnd_len, cmd, HCI_CMD_TIMEOUT); error: kfree(cmd); kfree(vnd_data); return err; } static int hci_enhanced_setup_sync(struct hci_dev *hdev, void *data) { struct conn_handle_t *conn_handle = data; struct hci_conn *conn = conn_handle->conn; __u16 handle = conn_handle->handle; struct hci_cp_enhanced_setup_sync_conn cp; const struct sco_param *param; kfree(conn_handle); bt_dev_dbg(hdev, "hcon %p", conn); /* for offload use case, codec needs to configured before opening SCO */ if (conn->codec.data_path) configure_datapath_sync(hdev, &conn->codec); conn->state = BT_CONNECT; conn->out = true; conn->attempt++; memset(&cp, 0x00, sizeof(cp)); cp.handle = cpu_to_le16(handle); cp.tx_bandwidth = cpu_to_le32(0x00001f40); cp.rx_bandwidth = cpu_to_le32(0x00001f40); switch (conn->codec.id) { case BT_CODEC_MSBC: if (!find_next_esco_param(conn, esco_param_msbc, ARRAY_SIZE(esco_param_msbc))) return -EINVAL; param = &esco_param_msbc[conn->attempt - 1]; cp.tx_coding_format.id = 0x05; cp.rx_coding_format.id = 0x05; cp.tx_codec_frame_size = __cpu_to_le16(60); cp.rx_codec_frame_size = __cpu_to_le16(60); cp.in_bandwidth = __cpu_to_le32(32000); cp.out_bandwidth = __cpu_to_le32(32000); cp.in_coding_format.id = 0x04; cp.out_coding_format.id = 0x04; cp.in_coded_data_size = __cpu_to_le16(16); cp.out_coded_data_size = __cpu_to_le16(16); cp.in_pcm_data_format = 2; cp.out_pcm_data_format = 2; cp.in_pcm_sample_payload_msb_pos = 0; cp.out_pcm_sample_payload_msb_pos = 0; cp.in_data_path = conn->codec.data_path; cp.out_data_path = conn->codec.data_path; cp.in_transport_unit_size = 1; cp.out_transport_unit_size = 1; break; case BT_CODEC_TRANSPARENT: if (!find_next_esco_param(conn, esco_param_msbc, ARRAY_SIZE(esco_param_msbc))) return false; param = &esco_param_msbc[conn->attempt - 1]; cp.tx_coding_format.id = 0x03; cp.rx_coding_format.id = 0x03; cp.tx_codec_frame_size = __cpu_to_le16(60); cp.rx_codec_frame_size = __cpu_to_le16(60); cp.in_bandwidth = __cpu_to_le32(0x1f40); cp.out_bandwidth = __cpu_to_le32(0x1f40); cp.in_coding_format.id = 0x03; cp.out_coding_format.id = 0x03; cp.in_coded_data_size = __cpu_to_le16(16); cp.out_coded_data_size = __cpu_to_le16(16); cp.in_pcm_data_format = 2; cp.out_pcm_data_format = 2; cp.in_pcm_sample_payload_msb_pos = 0; cp.out_pcm_sample_payload_msb_pos = 0; cp.in_data_path = conn->codec.data_path; cp.out_data_path = conn->codec.data_path; cp.in_transport_unit_size = 1; cp.out_transport_unit_size = 1; break; case BT_CODEC_CVSD: if (conn->parent && lmp_esco_capable(conn->parent)) { if (!find_next_esco_param(conn, esco_param_cvsd, ARRAY_SIZE(esco_param_cvsd))) return -EINVAL; param = &esco_param_cvsd[conn->attempt - 1]; } else { if (conn->attempt > ARRAY_SIZE(sco_param_cvsd)) return -EINVAL; param = &sco_param_cvsd[conn->attempt - 1]; } cp.tx_coding_format.id = 2; cp.rx_coding_format.id = 2; cp.tx_codec_frame_size = __cpu_to_le16(60); cp.rx_codec_frame_size = __cpu_to_le16(60); cp.in_bandwidth = __cpu_to_le32(16000); cp.out_bandwidth = __cpu_to_le32(16000); cp.in_coding_format.id = 4; cp.out_coding_format.id = 4; cp.in_coded_data_size = __cpu_to_le16(16); cp.out_coded_data_size = __cpu_to_le16(16); cp.in_pcm_data_format = 2; cp.out_pcm_data_format = 2; cp.in_pcm_sample_payload_msb_pos = 0; cp.out_pcm_sample_payload_msb_pos = 0; cp.in_data_path = conn->codec.data_path; cp.out_data_path = conn->codec.data_path; cp.in_transport_unit_size = 16; cp.out_transport_unit_size = 16; break; default: return -EINVAL; } cp.retrans_effort = param->retrans_effort; cp.pkt_type = __cpu_to_le16(param->pkt_type); cp.max_latency = __cpu_to_le16(param->max_latency); if (hci_send_cmd(hdev, HCI_OP_ENHANCED_SETUP_SYNC_CONN, sizeof(cp), &cp) < 0) return -EIO; return 0; } static bool hci_setup_sync_conn(struct hci_conn *conn, __u16 handle) { struct hci_dev *hdev = conn->hdev; struct hci_cp_setup_sync_conn cp; const struct sco_param *param; bt_dev_dbg(hdev, "hcon %p", conn); conn->state = BT_CONNECT; conn->out = true; conn->attempt++; cp.handle = cpu_to_le16(handle); cp.tx_bandwidth = cpu_to_le32(0x00001f40); cp.rx_bandwidth = cpu_to_le32(0x00001f40); cp.voice_setting = cpu_to_le16(conn->setting); switch (conn->setting & SCO_AIRMODE_MASK) { case SCO_AIRMODE_TRANSP: if (!find_next_esco_param(conn, esco_param_msbc, ARRAY_SIZE(esco_param_msbc))) return false; param = &esco_param_msbc[conn->attempt - 1]; break; case SCO_AIRMODE_CVSD: if (conn->parent && lmp_esco_capable(conn->parent)) { if (!find_next_esco_param(conn, esco_param_cvsd, ARRAY_SIZE(esco_param_cvsd))) return false; param = &esco_param_cvsd[conn->attempt - 1]; } else { if (conn->attempt > ARRAY_SIZE(sco_param_cvsd)) return false; param = &sco_param_cvsd[conn->attempt - 1]; } break; default: return false; } cp.retrans_effort = param->retrans_effort; cp.pkt_type = __cpu_to_le16(param->pkt_type); cp.max_latency = __cpu_to_le16(param->max_latency); if (hci_send_cmd(hdev, HCI_OP_SETUP_SYNC_CONN, sizeof(cp), &cp) < 0) return false; return true; } bool hci_setup_sync(struct hci_conn *conn, __u16 handle) { int result; struct conn_handle_t *conn_handle; if (enhanced_sync_conn_capable(conn->hdev)) { conn_handle = kzalloc(sizeof(*conn_handle), GFP_KERNEL); if (!conn_handle) return false; conn_handle->conn = conn; conn_handle->handle = handle; result = hci_cmd_sync_queue(conn->hdev, hci_enhanced_setup_sync, conn_handle, NULL); if (result < 0) kfree(conn_handle); return result == 0; } return hci_setup_sync_conn(conn, handle); } u8 hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max, u16 latency, u16 to_multiplier) { struct hci_dev *hdev = conn->hdev; struct hci_conn_params *params; struct hci_cp_le_conn_update cp; hci_dev_lock(hdev); params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type); if (params) { params->conn_min_interval = min; params->conn_max_interval = max; params->conn_latency = latency; params->supervision_timeout = to_multiplier; } hci_dev_unlock(hdev); memset(&cp, 0, sizeof(cp)); cp.handle = cpu_to_le16(conn->handle); cp.conn_interval_min = cpu_to_le16(min); cp.conn_interval_max = cpu_to_le16(max); cp.conn_latency = cpu_to_le16(latency); cp.supervision_timeout = cpu_to_le16(to_multiplier); cp.min_ce_len = cpu_to_le16(0x0000); cp.max_ce_len = cpu_to_le16(0x0000); hci_send_cmd(hdev, HCI_OP_LE_CONN_UPDATE, sizeof(cp), &cp); if (params) return 0x01; return 0x00; } void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __le64 rand, __u8 ltk[16], __u8 key_size) { struct hci_dev *hdev = conn->hdev; struct hci_cp_le_start_enc cp; BT_DBG("hcon %p", conn); memset(&cp, 0, sizeof(cp)); cp.handle = cpu_to_le16(conn->handle); cp.rand = rand; cp.ediv = ediv; memcpy(cp.ltk, ltk, key_size); hci_send_cmd(hdev, HCI_OP_LE_START_ENC, sizeof(cp), &cp); } /* Device _must_ be locked */ void hci_sco_setup(struct hci_conn *conn, __u8 status) { struct hci_link *link; link = list_first_entry_or_null(&conn->link_list, struct hci_link, list); if (!link || !link->conn) return; BT_DBG("hcon %p", conn); if (!status) { if (lmp_esco_capable(conn->hdev)) hci_setup_sync(link->conn, conn->handle); else hci_add_sco(link->conn, conn->handle); } else { hci_connect_cfm(link->conn, status); hci_conn_del(link->conn); } } static void hci_conn_timeout(struct work_struct *work) { struct hci_conn *conn = container_of(work, struct hci_conn, disc_work.work); int refcnt = atomic_read(&conn->refcnt); BT_DBG("hcon %p state %s", conn, state_to_string(conn->state)); WARN_ON(refcnt < 0); /* FIXME: It was observed that in pairing failed scenario, refcnt * drops below 0. Probably this is because l2cap_conn_del calls * l2cap_chan_del for each channel, and inside l2cap_chan_del conn is * dropped. After that loop hci_chan_del is called which also drops * conn. For now make sure that ACL is alive if refcnt is higher then 0, * otherwise drop it. */ if (refcnt > 0) return; hci_abort_conn(conn, hci_proto_disconn_ind(conn)); } /* Enter sniff mode */ static void hci_conn_idle(struct work_struct *work) { struct hci_conn *conn = container_of(work, struct hci_conn, idle_work.work); struct hci_dev *hdev = conn->hdev; BT_DBG("hcon %p mode %d", conn, conn->mode); if (!lmp_sniff_capable(hdev) || !lmp_sniff_capable(conn)) return; if (conn->mode != HCI_CM_ACTIVE || !(conn->link_policy & HCI_LP_SNIFF)) return; if (lmp_sniffsubr_capable(hdev) && lmp_sniffsubr_capable(conn)) { struct hci_cp_sniff_subrate cp; cp.handle = cpu_to_le16(conn->handle); cp.max_latency = cpu_to_le16(0); cp.min_remote_timeout = cpu_to_le16(0); cp.min_local_timeout = cpu_to_le16(0); hci_send_cmd(hdev, HCI_OP_SNIFF_SUBRATE, sizeof(cp), &cp); } if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) { struct hci_cp_sniff_mode cp; cp.handle = cpu_to_le16(conn->handle); cp.max_interval = cpu_to_le16(hdev->sniff_max_interval); cp.min_interval = cpu_to_le16(hdev->sniff_min_interval); cp.attempt = cpu_to_le16(4); cp.timeout = cpu_to_le16(1); hci_send_cmd(hdev, HCI_OP_SNIFF_MODE, sizeof(cp), &cp); } } static void hci_conn_auto_accept(struct work_struct *work) { struct hci_conn *conn = container_of(work, struct hci_conn, auto_accept_work.work); hci_send_cmd(conn->hdev, HCI_OP_USER_CONFIRM_REPLY, sizeof(conn->dst), &conn->dst); } static void le_disable_advertising(struct hci_dev *hdev) { if (ext_adv_capable(hdev)) { struct hci_cp_le_set_ext_adv_enable cp; cp.enable = 0x00; cp.num_of_sets = 0x00; hci_send_cmd(hdev, HCI_OP_LE_SET_EXT_ADV_ENABLE, sizeof(cp), &cp); } else { u8 enable = 0x00; hci_send_cmd(hdev, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable); } } static void le_conn_timeout(struct work_struct *work) { struct hci_conn *conn = container_of(work, struct hci_conn, le_conn_timeout.work); struct hci_dev *hdev = conn->hdev; BT_DBG(""); /* We could end up here due to having done directed advertising, * so clean up the state if necessary. This should however only * happen with broken hardware or if low duty cycle was used * (which doesn't have a timeout of its own). */ if (conn->role == HCI_ROLE_SLAVE) { /* Disable LE Advertising */ le_disable_advertising(hdev); hci_dev_lock(hdev); hci_conn_failed(conn, HCI_ERROR_ADVERTISING_TIMEOUT); hci_dev_unlock(hdev); return; } hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM); } struct iso_cig_params { struct hci_cp_le_set_cig_params cp; struct hci_cis_params cis[0x1f]; }; struct iso_list_data { union { u8 cig; u8 big; }; union { u8 cis; u8 bis; u16 sync_handle; }; int count; bool big_term; bool pa_sync_term; bool big_sync_term; }; static void bis_list(struct hci_conn *conn, void *data) { struct iso_list_data *d = data; /* Skip if not broadcast/ANY address */ if (bacmp(&conn->dst, BDADDR_ANY)) return; if (d->big != conn->iso_qos.bcast.big || d->bis == BT_ISO_QOS_BIS_UNSET || d->bis != conn->iso_qos.bcast.bis) return; d->count++; } static int terminate_big_sync(struct hci_dev *hdev, void *data) { struct iso_list_data *d = data; bt_dev_dbg(hdev, "big 0x%2.2x bis 0x%2.2x", d->big, d->bis); hci_remove_ext_adv_instance_sync(hdev, d->bis, NULL); /* Only terminate BIG if it has been created */ if (!d->big_term) return 0; return hci_le_terminate_big_sync(hdev, d->big, HCI_ERROR_LOCAL_HOST_TERM); } static void terminate_big_destroy(struct hci_dev *hdev, void *data, int err) { kfree(data); } static int hci_le_terminate_big(struct hci_dev *hdev, struct hci_conn *conn) { struct iso_list_data *d; int ret; bt_dev_dbg(hdev, "big 0x%2.2x bis 0x%2.2x", conn->iso_qos.bcast.big, conn->iso_qos.bcast.bis); d = kzalloc(sizeof(*d), GFP_KERNEL); if (!d) return -ENOMEM; d->big = conn->iso_qos.bcast.big; d->bis = conn->iso_qos.bcast.bis; d->big_term = test_and_clear_bit(HCI_CONN_BIG_CREATED, &conn->flags); ret = hci_cmd_sync_queue(hdev, terminate_big_sync, d, terminate_big_destroy); if (ret) kfree(d); return ret; } static int big_terminate_sync(struct hci_dev *hdev, void *data) { struct iso_list_data *d = data; bt_dev_dbg(hdev, "big 0x%2.2x sync_handle 0x%4.4x", d->big, d->sync_handle); if (d->big_sync_term) hci_le_big_terminate_sync(hdev, d->big); if (d->pa_sync_term) return hci_le_pa_terminate_sync(hdev, d->sync_handle); return 0; } static int hci_le_big_terminate(struct hci_dev *hdev, u8 big, struct hci_conn *conn) { struct iso_list_data *d; int ret; bt_dev_dbg(hdev, "big 0x%2.2x sync_handle 0x%4.4x", big, conn->sync_handle); d = kzalloc(sizeof(*d), GFP_KERNEL); if (!d) return -ENOMEM; d->big = big; d->sync_handle = conn->sync_handle; d->pa_sync_term = test_and_clear_bit(HCI_CONN_PA_SYNC, &conn->flags); d->big_sync_term = test_and_clear_bit(HCI_CONN_BIG_SYNC, &conn->flags); ret = hci_cmd_sync_queue(hdev, big_terminate_sync, d, terminate_big_destroy); if (ret) kfree(d); return ret; } /* Cleanup BIS connection * * Detects if there any BIS left connected in a BIG * broadcaster: Remove advertising instance and terminate BIG. * broadcaster receiver: Teminate BIG sync and terminate PA sync. */ static void bis_cleanup(struct hci_conn *conn) { struct hci_dev *hdev = conn->hdev; struct hci_conn *bis; bt_dev_dbg(hdev, "conn %p", conn); if (conn->role == HCI_ROLE_MASTER) { if (!test_and_clear_bit(HCI_CONN_PER_ADV, &conn->flags)) return; /* Check if ISO connection is a BIS and terminate advertising * set and BIG if there are no other connections using it. */ bis = hci_conn_hash_lookup_big(hdev, conn->iso_qos.bcast.big); if (bis) return; hci_le_terminate_big(hdev, conn); } else { bis = hci_conn_hash_lookup_big_any_dst(hdev, conn->iso_qos.bcast.big); if (bis) return; hci_le_big_terminate(hdev, conn->iso_qos.bcast.big, conn); } } static int remove_cig_sync(struct hci_dev *hdev, void *data) { u8 handle = PTR_UINT(data); return hci_le_remove_cig_sync(hdev, handle); } static int hci_le_remove_cig(struct hci_dev *hdev, u8 handle) { bt_dev_dbg(hdev, "handle 0x%2.2x", handle); return hci_cmd_sync_queue(hdev, remove_cig_sync, UINT_PTR(handle), NULL); } static void find_cis(struct hci_conn *conn, void *data) { struct iso_list_data *d = data; /* Ignore broadcast or if CIG don't match */ if (!bacmp(&conn->dst, BDADDR_ANY) || d->cig != conn->iso_qos.ucast.cig) return; d->count++; } /* Cleanup CIS connection: * * Detects if there any CIS left connected in a CIG and remove it. */ static void cis_cleanup(struct hci_conn *conn) { struct hci_dev *hdev = conn->hdev; struct iso_list_data d; if (conn->iso_qos.ucast.cig == BT_ISO_QOS_CIG_UNSET) return; memset(&d, 0, sizeof(d)); d.cig = conn->iso_qos.ucast.cig; /* Check if ISO connection is a CIS and remove CIG if there are * no other connections using it. */ hci_conn_hash_list_state(hdev, find_cis, ISO_LINK, BT_BOUND, &d); hci_conn_hash_list_state(hdev, find_cis, ISO_LINK, BT_CONNECT, &d); hci_conn_hash_list_state(hdev, find_cis, ISO_LINK, BT_CONNECTED, &d); if (d.count) return; hci_le_remove_cig(hdev, conn->iso_qos.ucast.cig); } static u16 hci_conn_hash_alloc_unset(struct hci_dev *hdev) { struct hci_conn_hash *h = &hdev->conn_hash; struct hci_conn *c; u16 handle = HCI_CONN_HANDLE_MAX + 1; rcu_read_lock(); list_for_each_entry_rcu(c, &h->list, list) { /* Find the first unused handle */ if (handle == 0xffff || c->handle != handle) break; handle++; } rcu_read_unlock(); return handle; } struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst, u8 role) { struct hci_conn *conn; BT_DBG("%s dst %pMR", hdev->name, dst); conn = kzalloc(sizeof(*conn), GFP_KERNEL); if (!conn) return NULL; bacpy(&conn->dst, dst); bacpy(&conn->src, &hdev->bdaddr); conn->handle = hci_conn_hash_alloc_unset(hdev); conn->hdev = hdev; conn->type = type; conn->role = role; conn->mode = HCI_CM_ACTIVE; conn->state = BT_OPEN; conn->auth_type = HCI_AT_GENERAL_BONDING; conn->io_capability = hdev->io_capability; conn->remote_auth = 0xff; conn->key_type = 0xff; conn->rssi = HCI_RSSI_INVALID; conn->tx_power = HCI_TX_POWER_INVALID; conn->max_tx_power = HCI_TX_POWER_INVALID; set_bit(HCI_CONN_POWER_SAVE, &conn->flags); conn->disc_timeout = HCI_DISCONN_TIMEOUT; /* Set Default Authenticated payload timeout to 30s */ conn->auth_payload_timeout = DEFAULT_AUTH_PAYLOAD_TIMEOUT; if (conn->role == HCI_ROLE_MASTER) conn->out = true; switch (type) { case ACL_LINK: conn->pkt_type = hdev->pkt_type & ACL_PTYPE_MASK; break; case LE_LINK: /* conn->src should reflect the local identity address */ hci_copy_identity_address(hdev, &conn->src, &conn->src_type); break; case ISO_LINK: /* conn->src should reflect the local identity address */ hci_copy_identity_address(hdev, &conn->src, &conn->src_type); /* set proper cleanup function */ if (!bacmp(dst, BDADDR_ANY)) conn->cleanup = bis_cleanup; else if (conn->role == HCI_ROLE_MASTER) conn->cleanup = cis_cleanup; break; case SCO_LINK: if (lmp_esco_capable(hdev)) conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) | (hdev->esco_type & EDR_ESCO_MASK); else conn->pkt_type = hdev->pkt_type & SCO_PTYPE_MASK; break; case ESCO_LINK: conn->pkt_type = hdev->esco_type & ~EDR_ESCO_MASK; break; } skb_queue_head_init(&conn->data_q); INIT_LIST_HEAD(&conn->chan_list); INIT_LIST_HEAD(&conn->link_list); INIT_DELAYED_WORK(&conn->disc_work, hci_conn_timeout); INIT_DELAYED_WORK(&conn->auto_accept_work, hci_conn_auto_accept); INIT_DELAYED_WORK(&conn->idle_work, hci_conn_idle); INIT_DELAYED_WORK(&conn->le_conn_timeout, le_conn_timeout); atomic_set(&conn->refcnt, 0); hci_dev_hold(hdev); hci_conn_hash_add(hdev, conn); /* The SCO and eSCO connections will only be notified when their * setup has been completed. This is different to ACL links which * can be notified right away. */ if (conn->type != SCO_LINK && conn->type != ESCO_LINK) { if (hdev->notify) hdev->notify(hdev, HCI_NOTIFY_CONN_ADD); } hci_conn_init_sysfs(conn); return conn; } static void hci_conn_cleanup_child(struct hci_conn *conn, u8 reason) { if (!reason) reason = HCI_ERROR_REMOTE_USER_TERM; /* Due to race, SCO/ISO conn might be not established yet at this point, * and nothing else will clean it up. In other cases it is done via HCI * events. */ switch (conn->type) { case SCO_LINK: case ESCO_LINK: if (HCI_CONN_HANDLE_UNSET(conn->handle)) hci_conn_failed(conn, reason); break; case ISO_LINK: if (conn->state != BT_CONNECTED && !test_bit(HCI_CONN_CREATE_CIS, &conn->flags)) hci_conn_failed(conn, reason); break; } } static void hci_conn_unlink(struct hci_conn *conn) { struct hci_dev *hdev = conn->hdev; bt_dev_dbg(hdev, "hcon %p", conn); if (!conn->parent) { struct hci_link *link, *t; list_for_each_entry_safe(link, t, &conn->link_list, list) { struct hci_conn *child = link->conn; hci_conn_unlink(child); /* If hdev is down it means * hci_dev_close_sync/hci_conn_hash_flush is in progress * and links don't need to be cleanup as all connections * would be cleanup. */ if (!test_bit(HCI_UP, &hdev->flags)) continue; hci_conn_cleanup_child(child, conn->abort_reason); } return; } if (!conn->link) return; list_del_rcu(&conn->link->list); synchronize_rcu(); hci_conn_drop(conn->parent); hci_conn_put(conn->parent); conn->parent = NULL; kfree(conn->link); conn->link = NULL; } void hci_conn_del(struct hci_conn *conn) { struct hci_dev *hdev = conn->hdev; BT_DBG("%s hcon %p handle %d", hdev->name, conn, conn->handle); hci_conn_unlink(conn); cancel_delayed_work_sync(&conn->disc_work); cancel_delayed_work_sync(&conn->auto_accept_work); cancel_delayed_work_sync(&conn->idle_work); if (conn->type == ACL_LINK) { /* Unacked frames */ hdev->acl_cnt += conn->sent; } else if (conn->type == LE_LINK) { cancel_delayed_work(&conn->le_conn_timeout); if (hdev->le_pkts) hdev->le_cnt += conn->sent; else hdev->acl_cnt += conn->sent; } else { /* Unacked ISO frames */ if (conn->type == ISO_LINK) { if (hdev->iso_pkts) hdev->iso_cnt += conn->sent; else if (hdev->le_pkts) hdev->le_cnt += conn->sent; else hdev->acl_cnt += conn->sent; } } if (conn->amp_mgr) amp_mgr_put(conn->amp_mgr); skb_queue_purge(&conn->data_q); /* Remove the connection from the list and cleanup its remaining * state. This is a separate function since for some cases like * BT_CONNECT_SCAN we *only* want the cleanup part without the * rest of hci_conn_del. */ hci_conn_cleanup(conn); } struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src, uint8_t src_type) { int use_src = bacmp(src, BDADDR_ANY); struct hci_dev *hdev = NULL, *d; BT_DBG("%pMR -> %pMR", src, dst); read_lock(&hci_dev_list_lock); list_for_each_entry(d, &hci_dev_list, list) { if (!test_bit(HCI_UP, &d->flags) || hci_dev_test_flag(d, HCI_USER_CHANNEL) || d->dev_type != HCI_PRIMARY) continue; /* Simple routing: * No source address - find interface with bdaddr != dst * Source address - find interface with bdaddr == src */ if (use_src) { bdaddr_t id_addr; u8 id_addr_type; if (src_type == BDADDR_BREDR) { if (!lmp_bredr_capable(d)) continue; bacpy(&id_addr, &d->bdaddr); id_addr_type = BDADDR_BREDR; } else { if (!lmp_le_capable(d)) continue; hci_copy_identity_address(d, &id_addr, &id_addr_type); /* Convert from HCI to three-value type */ if (id_addr_type == ADDR_LE_DEV_PUBLIC) id_addr_type = BDADDR_LE_PUBLIC; else id_addr_type = BDADDR_LE_RANDOM; } if (!bacmp(&id_addr, src) && id_addr_type == src_type) { hdev = d; break; } } else { if (bacmp(&d->bdaddr, dst)) { hdev = d; break; } } } if (hdev) hdev = hci_dev_hold(hdev); read_unlock(&hci_dev_list_lock); return hdev; } EXPORT_SYMBOL(hci_get_route); /* This function requires the caller holds hdev->lock */ static void hci_le_conn_failed(struct hci_conn *conn, u8 status) { struct hci_dev *hdev = conn->hdev; hci_connect_le_scan_cleanup(conn, status); /* Enable advertising in case this was a failed connection * attempt as a peripheral. */ hci_enable_advertising(hdev); } /* This function requires the caller holds hdev->lock */ void hci_conn_failed(struct hci_conn *conn, u8 status) { struct hci_dev *hdev = conn->hdev; bt_dev_dbg(hdev, "status 0x%2.2x", status); switch (conn->type) { case LE_LINK: hci_le_conn_failed(conn, status); break; case ACL_LINK: mgmt_connect_failed(hdev, &conn->dst, conn->type, conn->dst_type, status); break; } conn->state = BT_CLOSED; hci_connect_cfm(conn, status); hci_conn_del(conn); } /* This function requires the caller holds hdev->lock */ u8 hci_conn_set_handle(struct hci_conn *conn, u16 handle) { struct hci_dev *hdev = conn->hdev; bt_dev_dbg(hdev, "hcon %p handle 0x%4.4x", conn, handle); if (conn->handle == handle) return 0; if (handle > HCI_CONN_HANDLE_MAX) { bt_dev_err(hdev, "Invalid handle: 0x%4.4x > 0x%4.4x", handle, HCI_CONN_HANDLE_MAX); return HCI_ERROR_INVALID_PARAMETERS; } /* If abort_reason has been sent it means the connection is being * aborted and the handle shall not be changed. */ if (conn->abort_reason) return conn->abort_reason; conn->handle = handle; return 0; } static void create_le_conn_complete(struct hci_dev *hdev, void *data, int err) { struct hci_conn *conn; u16 handle = PTR_UINT(data); conn = hci_conn_hash_lookup_handle(hdev, handle); if (!conn) return; bt_dev_dbg(hdev, "err %d", err); hci_dev_lock(hdev); if (!err) { hci_connect_le_scan_cleanup(conn, 0x00); goto done; } /* Check if connection is still pending */ if (conn != hci_lookup_le_connect(hdev)) goto done; /* Flush to make sure we send create conn cancel command if needed */ flush_delayed_work(&conn->le_conn_timeout); hci_conn_failed(conn, bt_status(err)); done: hci_dev_unlock(hdev); } static int hci_connect_le_sync(struct hci_dev *hdev, void *data) { struct hci_conn *conn; u16 handle = PTR_UINT(data); conn = hci_conn_hash_lookup_handle(hdev, handle); if (!conn) return 0; bt_dev_dbg(hdev, "conn %p", conn); clear_bit(HCI_CONN_SCANNING, &conn->flags); conn->state = BT_CONNECT; return hci_le_create_conn_sync(hdev, conn); } struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst, u8 dst_type, bool dst_resolved, u8 sec_level, u16 conn_timeout, u8 role) { struct hci_conn *conn; struct smp_irk *irk; int err; /* Let's make sure that le is enabled.*/ if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) { if (lmp_le_capable(hdev)) return ERR_PTR(-ECONNREFUSED); return ERR_PTR(-EOPNOTSUPP); } /* Since the controller supports only one LE connection attempt at a * time, we return -EBUSY if there is any connection attempt running. */ if (hci_lookup_le_connect(hdev)) return ERR_PTR(-EBUSY); /* If there's already a connection object but it's not in * scanning state it means it must already be established, in * which case we can't do anything else except report a failure * to connect. */ conn = hci_conn_hash_lookup_le(hdev, dst, dst_type); if (conn && !test_bit(HCI_CONN_SCANNING, &conn->flags)) { return ERR_PTR(-EBUSY); } /* Check if the destination address has been resolved by the controller * since if it did then the identity address shall be used. */ if (!dst_resolved) { /* When given an identity address with existing identity * resolving key, the connection needs to be established * to a resolvable random address. * * Storing the resolvable random address is required here * to handle connection failures. The address will later * be resolved back into the original identity address * from the connect request. */ irk = hci_find_irk_by_addr(hdev, dst, dst_type); if (irk && bacmp(&irk->rpa, BDADDR_ANY)) { dst = &irk->rpa; dst_type = ADDR_LE_DEV_RANDOM; } } if (conn) { bacpy(&conn->dst, dst); } else { conn = hci_conn_add(hdev, LE_LINK, dst, role); if (!conn) return ERR_PTR(-ENOMEM); hci_conn_hold(conn); conn->pending_sec_level = sec_level; } conn->dst_type = dst_type; conn->sec_level = BT_SECURITY_LOW; conn->conn_timeout = conn_timeout; err = hci_cmd_sync_queue(hdev, hci_connect_le_sync, UINT_PTR(conn->handle), create_le_conn_complete); if (err) { hci_conn_del(conn); return ERR_PTR(err); } return conn; } static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type) { struct hci_conn *conn; conn = hci_conn_hash_lookup_le(hdev, addr, type); if (!conn) return false; if (conn->state != BT_CONNECTED) return false; return true; } /* This function requires the caller holds hdev->lock */ static int hci_explicit_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type) { struct hci_conn_params *params; if (is_connected(hdev, addr, addr_type)) return -EISCONN; params = hci_conn_params_lookup(hdev, addr, addr_type); if (!params) { params = hci_conn_params_add(hdev, addr, addr_type); if (!params) return -ENOMEM; /* If we created new params, mark them to be deleted in * hci_connect_le_scan_cleanup. It's different case than * existing disabled params, those will stay after cleanup. */ params->auto_connect = HCI_AUTO_CONN_EXPLICIT; } /* We're trying to connect, so make sure params are at pend_le_conns */ if (params->auto_connect == HCI_AUTO_CONN_DISABLED || params->auto_connect == HCI_AUTO_CONN_REPORT || params->auto_connect == HCI_AUTO_CONN_EXPLICIT) { hci_pend_le_list_del_init(params); hci_pend_le_list_add(params, &hdev->pend_le_conns); } params->explicit_connect = true; BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type, params->auto_connect); return 0; } static int qos_set_big(struct hci_dev *hdev, struct bt_iso_qos *qos) { struct hci_conn *conn; u8 big; /* Allocate a BIG if not set */ if (qos->bcast.big == BT_ISO_QOS_BIG_UNSET) { for (big = 0x00; big < 0xef; big++) { conn = hci_conn_hash_lookup_big(hdev, big); if (!conn) break; } if (big == 0xef) return -EADDRNOTAVAIL; /* Update BIG */ qos->bcast.big = big; } return 0; } static int qos_set_bis(struct hci_dev *hdev, struct bt_iso_qos *qos) { struct hci_conn *conn; u8 bis; /* Allocate BIS if not set */ if (qos->bcast.bis == BT_ISO_QOS_BIS_UNSET) { /* Find an unused adv set to advertise BIS, skip instance 0x00 * since it is reserved as general purpose set. */ for (bis = 0x01; bis < hdev->le_num_of_adv_sets; bis++) { conn = hci_conn_hash_lookup_bis(hdev, BDADDR_ANY, bis); if (!conn) break; } if (bis == hdev->le_num_of_adv_sets) return -EADDRNOTAVAIL; /* Update BIS */ qos->bcast.bis = bis; } return 0; } /* This function requires the caller holds hdev->lock */ static struct hci_conn *hci_add_bis(struct hci_dev *hdev, bdaddr_t *dst, struct bt_iso_qos *qos, __u8 base_len, __u8 *base) { struct hci_conn *conn; int err; /* Let's make sure that le is enabled.*/ if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) { if (lmp_le_capable(hdev)) return ERR_PTR(-ECONNREFUSED); return ERR_PTR(-EOPNOTSUPP); } err = qos_set_big(hdev, qos); if (err) return ERR_PTR(err); err = qos_set_bis(hdev, qos); if (err) return ERR_PTR(err); /* Check if the LE Create BIG command has already been sent */ conn = hci_conn_hash_lookup_per_adv_bis(hdev, dst, qos->bcast.big, qos->bcast.big); if (conn) return ERR_PTR(-EADDRINUSE); /* Check BIS settings against other bound BISes, since all * BISes in a BIG must have the same value for all parameters */ conn = hci_conn_hash_lookup_big(hdev, qos->bcast.big); if (conn && (memcmp(qos, &conn->iso_qos, sizeof(*qos)) || base_len != conn->le_per_adv_data_len || memcmp(conn->le_per_adv_data, base, base_len))) return ERR_PTR(-EADDRINUSE); conn = hci_conn_add(hdev, ISO_LINK, dst, HCI_ROLE_MASTER); if (!conn) return ERR_PTR(-ENOMEM); conn->state = BT_CONNECT; hci_conn_hold(conn); return conn; } /* This function requires the caller holds hdev->lock */ struct hci_conn *hci_connect_le_scan(struct hci_dev *hdev, bdaddr_t *dst, u8 dst_type, u8 sec_level, u16 conn_timeout, enum conn_reasons conn_reason) { struct hci_conn *conn; /* Let's make sure that le is enabled.*/ if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) { if (lmp_le_capable(hdev)) return ERR_PTR(-ECONNREFUSED); return ERR_PTR(-EOPNOTSUPP); } /* Some devices send ATT messages as soon as the physical link is * established. To be able to handle these ATT messages, the user- * space first establishes the connection and then starts the pairing * process. * * So if a hci_conn object already exists for the following connection * attempt, we simply update pending_sec_level and auth_type fields * and return the object found. */ conn = hci_conn_hash_lookup_le(hdev, dst, dst_type); if (conn) { if (conn->pending_sec_level < sec_level) conn->pending_sec_level = sec_level; goto done; } BT_DBG("requesting refresh of dst_addr"); conn = hci_conn_add(hdev, LE_LINK, dst, HCI_ROLE_MASTER); if (!conn) return ERR_PTR(-ENOMEM); if (hci_explicit_conn_params_set(hdev, dst, dst_type) < 0) { hci_conn_del(conn); return ERR_PTR(-EBUSY); } conn->state = BT_CONNECT; set_bit(HCI_CONN_SCANNING, &conn->flags); conn->dst_type = dst_type; conn->sec_level = BT_SECURITY_LOW; conn->pending_sec_level = sec_level; conn->conn_timeout = conn_timeout; conn->conn_reason = conn_reason; hci_update_passive_scan(hdev); done: hci_conn_hold(conn); return conn; } struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst, u8 sec_level, u8 auth_type, enum conn_reasons conn_reason) { struct hci_conn *acl; if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) { if (lmp_bredr_capable(hdev)) return ERR_PTR(-ECONNREFUSED); return ERR_PTR(-EOPNOTSUPP); } acl = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst); if (!acl) { acl = hci_conn_add(hdev, ACL_LINK, dst, HCI_ROLE_MASTER); if (!acl) return ERR_PTR(-ENOMEM); } hci_conn_hold(acl); acl->conn_reason = conn_reason; if (acl->state == BT_OPEN || acl->state == BT_CLOSED) { acl->sec_level = BT_SECURITY_LOW; acl->pending_sec_level = sec_level; acl->auth_type = auth_type; hci_acl_create_connection(acl); } return acl; } static struct hci_link *hci_conn_link(struct hci_conn *parent, struct hci_conn *conn) { struct hci_dev *hdev = parent->hdev; struct hci_link *link; bt_dev_dbg(hdev, "parent %p hcon %p", parent, conn); if (conn->link) return conn->link; if (conn->parent) return NULL; link = kzalloc(sizeof(*link), GFP_KERNEL); if (!link) return NULL; link->conn = hci_conn_hold(conn); conn->link = link; conn->parent = hci_conn_get(parent); /* Use list_add_tail_rcu append to the list */ list_add_tail_rcu(&link->list, &parent->link_list); return link; } struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type, bdaddr_t *dst, __u16 setting, struct bt_codec *codec) { struct hci_conn *acl; struct hci_conn *sco; struct hci_link *link; acl = hci_connect_acl(hdev, dst, BT_SECURITY_LOW, HCI_AT_NO_BONDING, CONN_REASON_SCO_CONNECT); if (IS_ERR(acl)) return acl; sco = hci_conn_hash_lookup_ba(hdev, type, dst); if (!sco) { sco = hci_conn_add(hdev, type, dst, HCI_ROLE_MASTER); if (!sco) { hci_conn_drop(acl); return ERR_PTR(-ENOMEM); } } link = hci_conn_link(acl, sco); if (!link) { hci_conn_drop(acl); hci_conn_drop(sco); return ERR_PTR(-ENOLINK); } sco->setting = setting; sco->codec = *codec; if (acl->state == BT_CONNECTED && (sco->state == BT_OPEN || sco->state == BT_CLOSED)) { set_bit(HCI_CONN_POWER_SAVE, &acl->flags); hci_conn_enter_active_mode(acl, BT_POWER_FORCE_ACTIVE_ON); if (test_bit(HCI_CONN_MODE_CHANGE_PEND, &acl->flags)) { /* defer SCO setup until mode change completed */ set_bit(HCI_CONN_SCO_SETUP_PEND, &acl->flags); return sco; } hci_sco_setup(acl, 0x00); } return sco; } static int hci_le_create_big(struct hci_conn *conn, struct bt_iso_qos *qos) { struct hci_dev *hdev = conn->hdev; struct hci_cp_le_create_big cp; struct iso_list_data data; memset(&cp, 0, sizeof(cp)); data.big = qos->bcast.big; data.bis = qos->bcast.bis; data.count = 0; /* Create a BIS for each bound connection */ hci_conn_hash_list_state(hdev, bis_list, ISO_LINK, BT_BOUND, &data); cp.handle = qos->bcast.big; cp.adv_handle = qos->bcast.bis; cp.num_bis = data.count; hci_cpu_to_le24(qos->bcast.out.interval, cp.bis.sdu_interval); cp.bis.sdu = cpu_to_le16(qos->bcast.out.sdu); cp.bis.latency = cpu_to_le16(qos->bcast.out.latency); cp.bis.rtn = qos->bcast.out.rtn; cp.bis.phy = qos->bcast.out.phy; cp.bis.packing = qos->bcast.packing; cp.bis.framing = qos->bcast.framing; cp.bis.encryption = qos->bcast.encryption; memcpy(cp.bis.bcode, qos->bcast.bcode, sizeof(cp.bis.bcode)); return hci_send_cmd(hdev, HCI_OP_LE_CREATE_BIG, sizeof(cp), &cp); } static int set_cig_params_sync(struct hci_dev *hdev, void *data) { u8 cig_id = PTR_UINT(data); struct hci_conn *conn; struct bt_iso_qos *qos; struct iso_cig_params pdu; u8 cis_id; conn = hci_conn_hash_lookup_cig(hdev, cig_id); if (!conn) return 0; memset(&pdu, 0, sizeof(pdu)); qos = &conn->iso_qos; pdu.cp.cig_id = cig_id; hci_cpu_to_le24(qos->ucast.out.interval, pdu.cp.c_interval); hci_cpu_to_le24(qos->ucast.in.interval, pdu.cp.p_interval); pdu.cp.sca = qos->ucast.sca; pdu.cp.packing = qos->ucast.packing; pdu.cp.framing = qos->ucast.framing; pdu.cp.c_latency = cpu_to_le16(qos->ucast.out.latency); pdu.cp.p_latency = cpu_to_le16(qos->ucast.in.latency); /* Reprogram all CIS(s) with the same CIG, valid range are: * num_cis: 0x00 to 0x1F * cis_id: 0x00 to 0xEF */ for (cis_id = 0x00; cis_id < 0xf0 && pdu.cp.num_cis < ARRAY_SIZE(pdu.cis); cis_id++) { struct hci_cis_params *cis; conn = hci_conn_hash_lookup_cis(hdev, NULL, 0, cig_id, cis_id); if (!conn) continue; qos = &conn->iso_qos; cis = &pdu.cis[pdu.cp.num_cis++]; cis->cis_id = cis_id; cis->c_sdu = cpu_to_le16(conn->iso_qos.ucast.out.sdu); cis->p_sdu = cpu_to_le16(conn->iso_qos.ucast.in.sdu); cis->c_phy = qos->ucast.out.phy ? qos->ucast.out.phy : qos->ucast.in.phy; cis->p_phy = qos->ucast.in.phy ? qos->ucast.in.phy : qos->ucast.out.phy; cis->c_rtn = qos->ucast.out.rtn; cis->p_rtn = qos->ucast.in.rtn; } if (!pdu.cp.num_cis) return 0; return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_CIG_PARAMS, sizeof(pdu.cp) + pdu.cp.num_cis * sizeof(pdu.cis[0]), &pdu, HCI_CMD_TIMEOUT); } static bool hci_le_set_cig_params(struct hci_conn *conn, struct bt_iso_qos *qos) { struct hci_dev *hdev = conn->hdev; struct iso_list_data data; memset(&data, 0, sizeof(data)); /* Allocate first still reconfigurable CIG if not set */ if (qos->ucast.cig == BT_ISO_QOS_CIG_UNSET) { for (data.cig = 0x00; data.cig < 0xf0; data.cig++) { data.count = 0; hci_conn_hash_list_state(hdev, find_cis, ISO_LINK, BT_CONNECT, &data); if (data.count) continue; hci_conn_hash_list_state(hdev, find_cis, ISO_LINK, BT_CONNECTED, &data); if (!data.count) break; } if (data.cig == 0xf0) return false; /* Update CIG */ qos->ucast.cig = data.cig; } if (qos->ucast.cis != BT_ISO_QOS_CIS_UNSET) { if (hci_conn_hash_lookup_cis(hdev, NULL, 0, qos->ucast.cig, qos->ucast.cis)) return false; goto done; } /* Allocate first available CIS if not set */ for (data.cig = qos->ucast.cig, data.cis = 0x00; data.cis < 0xf0; data.cis++) { if (!hci_conn_hash_lookup_cis(hdev, NULL, 0, data.cig, data.cis)) { /* Update CIS */ qos->ucast.cis = data.cis; break; } } if (qos->ucast.cis == BT_ISO_QOS_CIS_UNSET) return false; done: if (hci_cmd_sync_queue(hdev, set_cig_params_sync, UINT_PTR(qos->ucast.cig), NULL) < 0) return false; return true; } struct hci_conn *hci_bind_cis(struct hci_dev *hdev, bdaddr_t *dst, __u8 dst_type, struct bt_iso_qos *qos) { struct hci_conn *cis; cis = hci_conn_hash_lookup_cis(hdev, dst, dst_type, qos->ucast.cig, qos->ucast.cis); if (!cis) { cis = hci_conn_add(hdev, ISO_LINK, dst, HCI_ROLE_MASTER); if (!cis) return ERR_PTR(-ENOMEM); cis->cleanup = cis_cleanup; cis->dst_type = dst_type; cis->iso_qos.ucast.cig = BT_ISO_QOS_CIG_UNSET; cis->iso_qos.ucast.cis = BT_ISO_QOS_CIS_UNSET; } if (cis->state == BT_CONNECTED) return cis; /* Check if CIS has been set and the settings matches */ if (cis->state == BT_BOUND && !memcmp(&cis->iso_qos, qos, sizeof(*qos))) return cis; /* Update LINK PHYs according to QoS preference */ cis->le_tx_phy = qos->ucast.out.phy; cis->le_rx_phy = qos->ucast.in.phy; /* If output interval is not set use the input interval as it cannot be * 0x000000. */ if (!qos->ucast.out.interval) qos->ucast.out.interval = qos->ucast.in.interval; /* If input interval is not set use the output interval as it cannot be * 0x000000. */ if (!qos->ucast.in.interval) qos->ucast.in.interval = qos->ucast.out.interval; /* If output latency is not set use the input latency as it cannot be * 0x0000. */ if (!qos->ucast.out.latency) qos->ucast.out.latency = qos->ucast.in.latency; /* If input latency is not set use the output latency as it cannot be * 0x0000. */ if (!qos->ucast.in.latency) qos->ucast.in.latency = qos->ucast.out.latency; if (!hci_le_set_cig_params(cis, qos)) { hci_conn_drop(cis); return ERR_PTR(-EINVAL); } hci_conn_hold(cis); cis->iso_qos = *qos; cis->state = BT_BOUND; return cis; } bool hci_iso_setup_path(struct hci_conn *conn) { struct hci_dev *hdev = conn->hdev; struct hci_cp_le_setup_iso_path cmd; memset(&cmd, 0, sizeof(cmd)); if (conn->iso_qos.ucast.out.sdu) { cmd.handle = cpu_to_le16(conn->handle); cmd.direction = 0x00; /* Input (Host to Controller) */ cmd.path = 0x00; /* HCI path if enabled */ cmd.codec = 0x03; /* Transparent Data */ if (hci_send_cmd(hdev, HCI_OP_LE_SETUP_ISO_PATH, sizeof(cmd), &cmd) < 0) return false; } if (conn->iso_qos.ucast.in.sdu) { cmd.handle = cpu_to_le16(conn->handle); cmd.direction = 0x01; /* Output (Controller to Host) */ cmd.path = 0x00; /* HCI path if enabled */ cmd.codec = 0x03; /* Transparent Data */ if (hci_send_cmd(hdev, HCI_OP_LE_SETUP_ISO_PATH, sizeof(cmd), &cmd) < 0) return false; } return true; } int hci_conn_check_create_cis(struct hci_conn *conn) { if (conn->type != ISO_LINK || !bacmp(&conn->dst, BDADDR_ANY)) return -EINVAL; if (!conn->parent || conn->parent->state != BT_CONNECTED || conn->state != BT_CONNECT || HCI_CONN_HANDLE_UNSET(conn->handle)) return 1; return 0; } static int hci_create_cis_sync(struct hci_dev *hdev, void *data) { return hci_le_create_cis_sync(hdev); } int hci_le_create_cis_pending(struct hci_dev *hdev) { struct hci_conn *conn; bool pending = false; rcu_read_lock(); list_for_each_entry_rcu(conn, &hdev->conn_hash.list, list) { if (test_bit(HCI_CONN_CREATE_CIS, &conn->flags)) { rcu_read_unlock(); return -EBUSY; } if (!hci_conn_check_create_cis(conn)) pending = true; } rcu_read_unlock(); if (!pending) return 0; /* Queue Create CIS */ return hci_cmd_sync_queue(hdev, hci_create_cis_sync, NULL, NULL); } static void hci_iso_qos_setup(struct hci_dev *hdev, struct hci_conn *conn, struct bt_iso_io_qos *qos, __u8 phy) { /* Only set MTU if PHY is enabled */ if (!qos->sdu && qos->phy) { if (hdev->iso_mtu > 0) qos->sdu = hdev->iso_mtu; else if (hdev->le_mtu > 0) qos->sdu = hdev->le_mtu; else qos->sdu = hdev->acl_mtu; } /* Use the same PHY as ACL if set to any */ if (qos->phy == BT_ISO_PHY_ANY) qos->phy = phy; /* Use LE ACL connection interval if not set */ if (!qos->interval) /* ACL interval unit in 1.25 ms to us */ qos->interval = conn->le_conn_interval * 1250; /* Use LE ACL connection latency if not set */ if (!qos->latency) qos->latency = conn->le_conn_latency; } static int create_big_sync(struct hci_dev *hdev, void *data) { struct hci_conn *conn = data; struct bt_iso_qos *qos = &conn->iso_qos; u16 interval, sync_interval = 0; u32 flags = 0; int err; if (qos->bcast.out.phy == 0x02) flags |= MGMT_ADV_FLAG_SEC_2M; /* Align intervals */ interval = (qos->bcast.out.interval / 1250) * qos->bcast.sync_factor; if (qos->bcast.bis) sync_interval = interval * 4; err = hci_start_per_adv_sync(hdev, qos->bcast.bis, conn->le_per_adv_data_len, conn->le_per_adv_data, flags, interval, interval, sync_interval); if (err) return err; return hci_le_create_big(conn, &conn->iso_qos); } static void create_pa_complete(struct hci_dev *hdev, void *data, int err) { struct hci_cp_le_pa_create_sync *cp = data; bt_dev_dbg(hdev, ""); if (err) bt_dev_err(hdev, "Unable to create PA: %d", err); kfree(cp); } static int create_pa_sync(struct hci_dev *hdev, void *data) { struct hci_cp_le_pa_create_sync *cp = data; int err; err = __hci_cmd_sync_status(hdev, HCI_OP_LE_PA_CREATE_SYNC, sizeof(*cp), cp, HCI_CMD_TIMEOUT); if (err) { hci_dev_clear_flag(hdev, HCI_PA_SYNC); return err; } return hci_update_passive_scan_sync(hdev); } int hci_pa_create_sync(struct hci_dev *hdev, bdaddr_t *dst, __u8 dst_type, __u8 sid, struct bt_iso_qos *qos) { struct hci_cp_le_pa_create_sync *cp; if (hci_dev_test_and_set_flag(hdev, HCI_PA_SYNC)) return -EBUSY; cp = kzalloc(sizeof(*cp), GFP_KERNEL); if (!cp) { hci_dev_clear_flag(hdev, HCI_PA_SYNC); return -ENOMEM; } cp->options = qos->bcast.options; cp->sid = sid; cp->addr_type = dst_type; bacpy(&cp->addr, dst); cp->skip = cpu_to_le16(qos->bcast.skip); cp->sync_timeout = cpu_to_le16(qos->bcast.sync_timeout); cp->sync_cte_type = qos->bcast.sync_cte_type; /* Queue start pa_create_sync and scan */ return hci_cmd_sync_queue(hdev, create_pa_sync, cp, create_pa_complete); } int hci_le_big_create_sync(struct hci_dev *hdev, struct hci_conn *hcon, struct bt_iso_qos *qos, __u16 sync_handle, __u8 num_bis, __u8 bis[]) { struct _packed { struct hci_cp_le_big_create_sync cp; __u8 bis[0x11]; } pdu; int err; if (num_bis > sizeof(pdu.bis)) return -EINVAL; err = qos_set_big(hdev, qos); if (err) return err; if (hcon) hcon->iso_qos.bcast.big = qos->bcast.big; memset(&pdu, 0, sizeof(pdu)); pdu.cp.handle = qos->bcast.big; pdu.cp.sync_handle = cpu_to_le16(sync_handle); pdu.cp.encryption = qos->bcast.encryption; memcpy(pdu.cp.bcode, qos->bcast.bcode, sizeof(pdu.cp.bcode)); pdu.cp.mse = qos->bcast.mse; pdu.cp.timeout = cpu_to_le16(qos->bcast.timeout); pdu.cp.num_bis = num_bis; memcpy(pdu.bis, bis, num_bis); return hci_send_cmd(hdev, HCI_OP_LE_BIG_CREATE_SYNC, sizeof(pdu.cp) + num_bis, &pdu); } static void create_big_complete(struct hci_dev *hdev, void *data, int err) { struct hci_conn *conn = data; bt_dev_dbg(hdev, "conn %p", conn); if (err) { bt_dev_err(hdev, "Unable to create BIG: %d", err); hci_connect_cfm(conn, err); hci_conn_del(conn); } } struct hci_conn *hci_bind_bis(struct hci_dev *hdev, bdaddr_t *dst, struct bt_iso_qos *qos, __u8 base_len, __u8 *base) { struct hci_conn *conn; __u8 eir[HCI_MAX_PER_AD_LENGTH]; if (base_len && base) base_len = eir_append_service_data(eir, 0, 0x1851, base, base_len); /* We need hci_conn object using the BDADDR_ANY as dst */ conn = hci_add_bis(hdev, dst, qos, base_len, eir); if (IS_ERR(conn)) return conn; /* Update LINK PHYs according to QoS preference */ conn->le_tx_phy = qos->bcast.out.phy; conn->le_tx_phy = qos->bcast.out.phy; /* Add Basic Announcement into Peridic Adv Data if BASE is set */ if (base_len && base) { memcpy(conn->le_per_adv_data, eir, sizeof(eir)); conn->le_per_adv_data_len = base_len; } hci_iso_qos_setup(hdev, conn, &qos->bcast.out, conn->le_tx_phy ? conn->le_tx_phy : hdev->le_tx_def_phys); conn->iso_qos = *qos; conn->state = BT_BOUND; return conn; } static void bis_mark_per_adv(struct hci_conn *conn, void *data) { struct iso_list_data *d = data; /* Skip if not broadcast/ANY address */ if (bacmp(&conn->dst, BDADDR_ANY)) return; if (d->big != conn->iso_qos.bcast.big || d->bis == BT_ISO_QOS_BIS_UNSET || d->bis != conn->iso_qos.bcast.bis) return; set_bit(HCI_CONN_PER_ADV, &conn->flags); } struct hci_conn *hci_connect_bis(struct hci_dev *hdev, bdaddr_t *dst, __u8 dst_type, struct bt_iso_qos *qos, __u8 base_len, __u8 *base) { struct hci_conn *conn; int err; struct iso_list_data data; conn = hci_bind_bis(hdev, dst, qos, base_len, base); if (IS_ERR(conn)) return conn; data.big = qos->bcast.big; data.bis = qos->bcast.bis; /* Set HCI_CONN_PER_ADV for all bound connections, to mark that * the start periodic advertising and create BIG commands have * been queued */ hci_conn_hash_list_state(hdev, bis_mark_per_adv, ISO_LINK, BT_BOUND, &data); /* Queue start periodic advertising and create BIG */ err = hci_cmd_sync_queue(hdev, create_big_sync, conn, create_big_complete); if (err < 0) { hci_conn_drop(conn); return ERR_PTR(err); } return conn; } struct hci_conn *hci_connect_cis(struct hci_dev *hdev, bdaddr_t *dst, __u8 dst_type, struct bt_iso_qos *qos) { struct hci_conn *le; struct hci_conn *cis; struct hci_link *link; if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) le = hci_connect_le(hdev, dst, dst_type, false, BT_SECURITY_LOW, HCI_LE_CONN_TIMEOUT, HCI_ROLE_SLAVE); else le = hci_connect_le_scan(hdev, dst, dst_type, BT_SECURITY_LOW, HCI_LE_CONN_TIMEOUT, CONN_REASON_ISO_CONNECT); if (IS_ERR(le)) return le; hci_iso_qos_setup(hdev, le, &qos->ucast.out, le->le_tx_phy ? le->le_tx_phy : hdev->le_tx_def_phys); hci_iso_qos_setup(hdev, le, &qos->ucast.in, le->le_rx_phy ? le->le_rx_phy : hdev->le_rx_def_phys); cis = hci_bind_cis(hdev, dst, dst_type, qos); if (IS_ERR(cis)) { hci_conn_drop(le); return cis; } link = hci_conn_link(le, cis); if (!link) { hci_conn_drop(le); hci_conn_drop(cis); return ERR_PTR(-ENOLINK); } /* Link takes the refcount */ hci_conn_drop(cis); cis->state = BT_CONNECT; hci_le_create_cis_pending(hdev); return cis; } /* Check link security requirement */ int hci_conn_check_link_mode(struct hci_conn *conn) { BT_DBG("hcon %p", conn); /* In Secure Connections Only mode, it is required that Secure * Connections is used and the link is encrypted with AES-CCM * using a P-256 authenticated combination key. */ if (hci_dev_test_flag(conn->hdev, HCI_SC_ONLY)) { if (!hci_conn_sc_enabled(conn) || !test_bit(HCI_CONN_AES_CCM, &conn->flags) || conn->key_type != HCI_LK_AUTH_COMBINATION_P256) return 0; } /* AES encryption is required for Level 4: * * BLUETOOTH CORE SPECIFICATION Version 5.2 | Vol 3, Part C * page 1319: * * 128-bit equivalent strength for link and encryption keys * required using FIPS approved algorithms (E0 not allowed, * SAFER+ not allowed, and P-192 not allowed; encryption key * not shortened) */ if (conn->sec_level == BT_SECURITY_FIPS && !test_bit(HCI_CONN_AES_CCM, &conn->flags)) { bt_dev_err(conn->hdev, "Invalid security: Missing AES-CCM usage"); return 0; } if (hci_conn_ssp_enabled(conn) && !test_bit(HCI_CONN_ENCRYPT, &conn->flags)) return 0; return 1; } /* Authenticate remote device */ static int hci_conn_auth(struct hci_conn *conn, __u8 sec_level, __u8 auth_type) { BT_DBG("hcon %p", conn); if (conn->pending_sec_level > sec_level) sec_level = conn->pending_sec_level; if (sec_level > conn->sec_level) conn->pending_sec_level = sec_level; else if (test_bit(HCI_CONN_AUTH, &conn->flags)) return 1; /* Make sure we preserve an existing MITM requirement*/ auth_type |= (conn->auth_type & 0x01); conn->auth_type = auth_type; if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) { struct hci_cp_auth_requested cp; cp.handle = cpu_to_le16(conn->handle); hci_send_cmd(conn->hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp); /* If we're already encrypted set the REAUTH_PEND flag, * otherwise set the ENCRYPT_PEND. */ if (test_bit(HCI_CONN_ENCRYPT, &conn->flags)) set_bit(HCI_CONN_REAUTH_PEND, &conn->flags); else set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags); } return 0; } /* Encrypt the link */ static void hci_conn_encrypt(struct hci_conn *conn) { BT_DBG("hcon %p", conn); if (!test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) { struct hci_cp_set_conn_encrypt cp; cp.handle = cpu_to_le16(conn->handle); cp.encrypt = 0x01; hci_send_cmd(conn->hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp), &cp); } } /* Enable security */ int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type, bool initiator) { BT_DBG("hcon %p", conn); if (conn->type == LE_LINK) return smp_conn_security(conn, sec_level); /* For sdp we don't need the link key. */ if (sec_level == BT_SECURITY_SDP) return 1; /* For non 2.1 devices and low security level we don't need the link key. */ if (sec_level == BT_SECURITY_LOW && !hci_conn_ssp_enabled(conn)) return 1; /* For other security levels we need the link key. */ if (!test_bit(HCI_CONN_AUTH, &conn->flags)) goto auth; /* An authenticated FIPS approved combination key has sufficient * security for security level 4. */ if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256 && sec_level == BT_SECURITY_FIPS) goto encrypt; /* An authenticated combination key has sufficient security for security level 3. */ if ((conn->key_type == HCI_LK_AUTH_COMBINATION_P192 || conn->key_type == HCI_LK_AUTH_COMBINATION_P256) && sec_level == BT_SECURITY_HIGH) goto encrypt; /* An unauthenticated combination key has sufficient security for security level 1 and 2. */ if ((conn->key_type == HCI_LK_UNAUTH_COMBINATION_P192 || conn->key_type == HCI_LK_UNAUTH_COMBINATION_P256) && (sec_level == BT_SECURITY_MEDIUM || sec_level == BT_SECURITY_LOW)) goto encrypt; /* A combination key has always sufficient security for the security levels 1 or 2. High security level requires the combination key is generated using maximum PIN code length (16). For pre 2.1 units. */ if (conn->key_type == HCI_LK_COMBINATION && (sec_level == BT_SECURITY_MEDIUM || sec_level == BT_SECURITY_LOW || conn->pin_length == 16)) goto encrypt; auth: if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) return 0; if (initiator) set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags); if (!hci_conn_auth(conn, sec_level, auth_type)) return 0; encrypt: if (test_bit(HCI_CONN_ENCRYPT, &conn->flags)) { /* Ensure that the encryption key size has been read, * otherwise stall the upper layer responses. */ if (!conn->enc_key_size) return 0; /* Nothing else needed, all requirements are met */ return 1; } hci_conn_encrypt(conn); return 0; } EXPORT_SYMBOL(hci_conn_security); /* Check secure link requirement */ int hci_conn_check_secure(struct hci_conn *conn, __u8 sec_level) { BT_DBG("hcon %p", conn); /* Accept if non-secure or higher security level is required */ if (sec_level != BT_SECURITY_HIGH && sec_level != BT_SECURITY_FIPS) return 1; /* Accept if secure or higher security level is already present */ if (conn->sec_level == BT_SECURITY_HIGH || conn->sec_level == BT_SECURITY_FIPS) return 1; /* Reject not secure link */ return 0; } EXPORT_SYMBOL(hci_conn_check_secure); /* Switch role */ int hci_conn_switch_role(struct hci_conn *conn, __u8 role) { BT_DBG("hcon %p", conn); if (role == conn->role) return 1; if (!test_and_set_bit(HCI_CONN_RSWITCH_PEND, &conn->flags)) { struct hci_cp_switch_role cp; bacpy(&cp.bdaddr, &conn->dst); cp.role = role; hci_send_cmd(conn->hdev, HCI_OP_SWITCH_ROLE, sizeof(cp), &cp); } return 0; } EXPORT_SYMBOL(hci_conn_switch_role); /* Enter active mode */ void hci_conn_enter_active_mode(struct hci_conn *conn, __u8 force_active) { struct hci_dev *hdev = conn->hdev; BT_DBG("hcon %p mode %d", conn, conn->mode); if (conn->mode != HCI_CM_SNIFF) goto timer; if (!test_bit(HCI_CONN_POWER_SAVE, &conn->flags) && !force_active) goto timer; if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) { struct hci_cp_exit_sniff_mode cp; cp.handle = cpu_to_le16(conn->handle); hci_send_cmd(hdev, HCI_OP_EXIT_SNIFF_MODE, sizeof(cp), &cp); } timer: if (hdev->idle_timeout > 0) queue_delayed_work(hdev->workqueue, &conn->idle_work, msecs_to_jiffies(hdev->idle_timeout)); } /* Drop all connection on the device */ void hci_conn_hash_flush(struct hci_dev *hdev) { struct list_head *head = &hdev->conn_hash.list; struct hci_conn *conn; BT_DBG("hdev %s", hdev->name); /* We should not traverse the list here, because hci_conn_del * can remove extra links, which may cause the list traversal * to hit items that have already been released. */ while ((conn = list_first_entry_or_null(head, struct hci_conn, list)) != NULL) { conn->state = BT_CLOSED; hci_disconn_cfm(conn, HCI_ERROR_LOCAL_HOST_TERM); hci_conn_del(conn); } } /* Check pending connect attempts */ void hci_conn_check_pending(struct hci_dev *hdev) { struct hci_conn *conn; BT_DBG("hdev %s", hdev->name); hci_dev_lock(hdev); conn = hci_conn_hash_lookup_state(hdev, ACL_LINK, BT_CONNECT2); if (conn) hci_acl_create_connection(conn); hci_dev_unlock(hdev); } static u32 get_link_mode(struct hci_conn *conn) { u32 link_mode = 0; if (conn->role == HCI_ROLE_MASTER) link_mode |= HCI_LM_MASTER; if (test_bit(HCI_CONN_ENCRYPT, &conn->flags)) link_mode |= HCI_LM_ENCRYPT; if (test_bit(HCI_CONN_AUTH, &conn->flags)) link_mode |= HCI_LM_AUTH; if (test_bit(HCI_CONN_SECURE, &conn->flags)) link_mode |= HCI_LM_SECURE; if (test_bit(HCI_CONN_FIPS, &conn->flags)) link_mode |= HCI_LM_FIPS; return link_mode; } int hci_get_conn_list(void __user *arg) { struct hci_conn *c; struct hci_conn_list_req req, *cl; struct hci_conn_info *ci; struct hci_dev *hdev; int n = 0, size, err; if (copy_from_user(&req, arg, sizeof(req))) return -EFAULT; if (!req.conn_num || req.conn_num > (PAGE_SIZE * 2) / sizeof(*ci)) return -EINVAL; size = sizeof(req) + req.conn_num * sizeof(*ci); cl = kmalloc(size, GFP_KERNEL); if (!cl) return -ENOMEM; hdev = hci_dev_get(req.dev_id); if (!hdev) { kfree(cl); return -ENODEV; } ci = cl->conn_info; hci_dev_lock(hdev); list_for_each_entry(c, &hdev->conn_hash.list, list) { bacpy(&(ci + n)->bdaddr, &c->dst); (ci + n)->handle = c->handle; (ci + n)->type = c->type; (ci + n)->out = c->out; (ci + n)->state = c->state; (ci + n)->link_mode = get_link_mode(c); if (++n >= req.conn_num) break; } hci_dev_unlock(hdev); cl->dev_id = hdev->id; cl->conn_num = n; size = sizeof(req) + n * sizeof(*ci); hci_dev_put(hdev); err = copy_to_user(arg, cl, size); kfree(cl); return err ? -EFAULT : 0; } int hci_get_conn_info(struct hci_dev *hdev, void __user *arg) { struct hci_conn_info_req req; struct hci_conn_info ci; struct hci_conn *conn; char __user *ptr = arg + sizeof(req); if (copy_from_user(&req, arg, sizeof(req))) return -EFAULT; hci_dev_lock(hdev); conn = hci_conn_hash_lookup_ba(hdev, req.type, &req.bdaddr); if (conn) { bacpy(&ci.bdaddr, &conn->dst); ci.handle = conn->handle; ci.type = conn->type; ci.out = conn->out; ci.state = conn->state; ci.link_mode = get_link_mode(conn); } hci_dev_unlock(hdev); if (!conn) return -ENOENT; return copy_to_user(ptr, &ci, sizeof(ci)) ? -EFAULT : 0; } int hci_get_auth_info(struct hci_dev *hdev, void __user *arg) { struct hci_auth_info_req req; struct hci_conn *conn; if (copy_from_user(&req, arg, sizeof(req))) return -EFAULT; hci_dev_lock(hdev); conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &req.bdaddr); if (conn) req.type = conn->auth_type; hci_dev_unlock(hdev); if (!conn) return -ENOENT; return copy_to_user(arg, &req, sizeof(req)) ? -EFAULT : 0; } struct hci_chan *hci_chan_create(struct hci_conn *conn) { struct hci_dev *hdev = conn->hdev; struct hci_chan *chan; BT_DBG("%s hcon %p", hdev->name, conn); if (test_bit(HCI_CONN_DROP, &conn->flags)) { BT_DBG("Refusing to create new hci_chan"); return NULL; } chan = kzalloc(sizeof(*chan), GFP_KERNEL); if (!chan) return NULL; chan->conn = hci_conn_get(conn); skb_queue_head_init(&chan->data_q); chan->state = BT_CONNECTED; list_add_rcu(&chan->list, &conn->chan_list); return chan; } void hci_chan_del(struct hci_chan *chan) { struct hci_conn *conn = chan->conn; struct hci_dev *hdev = conn->hdev; BT_DBG("%s hcon %p chan %p", hdev->name, conn, chan); list_del_rcu(&chan->list); synchronize_rcu(); /* Prevent new hci_chan's to be created for this hci_conn */ set_bit(HCI_CONN_DROP, &conn->flags); hci_conn_put(conn); skb_queue_purge(&chan->data_q); kfree(chan); } void hci_chan_list_flush(struct hci_conn *conn) { struct hci_chan *chan, *n; BT_DBG("hcon %p", conn); list_for_each_entry_safe(chan, n, &conn->chan_list, list) hci_chan_del(chan); } static struct hci_chan *__hci_chan_lookup_handle(struct hci_conn *hcon, __u16 handle) { struct hci_chan *hchan; list_for_each_entry(hchan, &hcon->chan_list, list) { if (hchan->handle == handle) return hchan; } return NULL; } struct hci_chan *hci_chan_lookup_handle(struct hci_dev *hdev, __u16 handle) { struct hci_conn_hash *h = &hdev->conn_hash; struct hci_conn *hcon; struct hci_chan *hchan = NULL; rcu_read_lock(); list_for_each_entry_rcu(hcon, &h->list, list) { hchan = __hci_chan_lookup_handle(hcon, handle); if (hchan) break; } rcu_read_unlock(); return hchan; } u32 hci_conn_get_phy(struct hci_conn *conn) { u32 phys = 0; /* BLUETOOTH CORE SPECIFICATION Version 5.2 | Vol 2, Part B page 471: * Table 6.2: Packets defined for synchronous, asynchronous, and * CPB logical transport types. */ switch (conn->type) { case SCO_LINK: /* SCO logical transport (1 Mb/s): * HV1, HV2, HV3 and DV. */ phys |= BT_PHY_BR_1M_1SLOT; break; case ACL_LINK: /* ACL logical transport (1 Mb/s) ptt=0: * DH1, DM3, DH3, DM5 and DH5. */ phys |= BT_PHY_BR_1M_1SLOT; if (conn->pkt_type & (HCI_DM3 | HCI_DH3)) phys |= BT_PHY_BR_1M_3SLOT; if (conn->pkt_type & (HCI_DM5 | HCI_DH5)) phys |= BT_PHY_BR_1M_5SLOT; /* ACL logical transport (2 Mb/s) ptt=1: * 2-DH1, 2-DH3 and 2-DH5. */ if (!(conn->pkt_type & HCI_2DH1)) phys |= BT_PHY_EDR_2M_1SLOT; if (!(conn->pkt_type & HCI_2DH3)) phys |= BT_PHY_EDR_2M_3SLOT; if (!(conn->pkt_type & HCI_2DH5)) phys |= BT_PHY_EDR_2M_5SLOT; /* ACL logical transport (3 Mb/s) ptt=1: * 3-DH1, 3-DH3 and 3-DH5. */ if (!(conn->pkt_type & HCI_3DH1)) phys |= BT_PHY_EDR_3M_1SLOT; if (!(conn->pkt_type & HCI_3DH3)) phys |= BT_PHY_EDR_3M_3SLOT; if (!(conn->pkt_type & HCI_3DH5)) phys |= BT_PHY_EDR_3M_5SLOT; break; case ESCO_LINK: /* eSCO logical transport (1 Mb/s): EV3, EV4 and EV5 */ phys |= BT_PHY_BR_1M_1SLOT; if (!(conn->pkt_type & (ESCO_EV4 | ESCO_EV5))) phys |= BT_PHY_BR_1M_3SLOT; /* eSCO logical transport (2 Mb/s): 2-EV3, 2-EV5 */ if (!(conn->pkt_type & ESCO_2EV3)) phys |= BT_PHY_EDR_2M_1SLOT; if (!(conn->pkt_type & ESCO_2EV5)) phys |= BT_PHY_EDR_2M_3SLOT; /* eSCO logical transport (3 Mb/s): 3-EV3, 3-EV5 */ if (!(conn->pkt_type & ESCO_3EV3)) phys |= BT_PHY_EDR_3M_1SLOT; if (!(conn->pkt_type & ESCO_3EV5)) phys |= BT_PHY_EDR_3M_3SLOT; break; case LE_LINK: if (conn->le_tx_phy & HCI_LE_SET_PHY_1M) phys |= BT_PHY_LE_1M_TX; if (conn->le_rx_phy & HCI_LE_SET_PHY_1M) phys |= BT_PHY_LE_1M_RX; if (conn->le_tx_phy & HCI_LE_SET_PHY_2M) phys |= BT_PHY_LE_2M_TX; if (conn->le_rx_phy & HCI_LE_SET_PHY_2M) phys |= BT_PHY_LE_2M_RX; if (conn->le_tx_phy & HCI_LE_SET_PHY_CODED) phys |= BT_PHY_LE_CODED_TX; if (conn->le_rx_phy & HCI_LE_SET_PHY_CODED) phys |= BT_PHY_LE_CODED_RX; break; } return phys; } static int abort_conn_sync(struct hci_dev *hdev, void *data) { struct hci_conn *conn; u16 handle = PTR_UINT(data); conn = hci_conn_hash_lookup_handle(hdev, handle); if (!conn) return 0; return hci_abort_conn_sync(hdev, conn, conn->abort_reason); } int hci_abort_conn(struct hci_conn *conn, u8 reason) { struct hci_dev *hdev = conn->hdev; /* If abort_reason has already been set it means the connection is * already being aborted so don't attempt to overwrite it. */ if (conn->abort_reason) return 0; bt_dev_dbg(hdev, "handle 0x%2.2x reason 0x%2.2x", conn->handle, reason); conn->abort_reason = reason; /* If the connection is pending check the command opcode since that * might be blocking on hci_cmd_sync_work while waiting its respective * event so we need to hci_cmd_sync_cancel to cancel it. * * hci_connect_le serializes the connection attempts so only one * connection can be in BT_CONNECT at time. */ if (conn->state == BT_CONNECT && hdev->req_status == HCI_REQ_PEND) { switch (hci_skb_event(hdev->sent_cmd)) { case HCI_EV_LE_CONN_COMPLETE: case HCI_EV_LE_ENHANCED_CONN_COMPLETE: case HCI_EVT_LE_CIS_ESTABLISHED: hci_cmd_sync_cancel(hdev, -ECANCELED); break; } } return hci_cmd_sync_queue(hdev, abort_conn_sync, UINT_PTR(conn->handle), NULL); }
linux-master
net/bluetooth/hci_conn.c
// SPDX-License-Identifier: GPL-2.0 /* * BlueZ - Bluetooth protocol stack for Linux * * Copyright (C) 2022 Intel Corporation * Copyright 2023 NXP */ #include <linux/module.h> #include <linux/debugfs.h> #include <linux/seq_file.h> #include <linux/sched/signal.h> #include <net/bluetooth/bluetooth.h> #include <net/bluetooth/hci_core.h> #include <net/bluetooth/iso.h> static const struct proto_ops iso_sock_ops; static struct bt_sock_list iso_sk_list = { .lock = __RW_LOCK_UNLOCKED(iso_sk_list.lock) }; /* ---- ISO connections ---- */ struct iso_conn { struct hci_conn *hcon; /* @lock: spinlock protecting changes to iso_conn fields */ spinlock_t lock; struct sock *sk; struct delayed_work timeout_work; struct sk_buff *rx_skb; __u32 rx_len; __u16 tx_sn; }; #define iso_conn_lock(c) spin_lock(&(c)->lock) #define iso_conn_unlock(c) spin_unlock(&(c)->lock) static void iso_sock_close(struct sock *sk); static void iso_sock_kill(struct sock *sk); /* ----- ISO socket info ----- */ #define iso_pi(sk) ((struct iso_pinfo *)sk) #define EIR_SERVICE_DATA_LENGTH 4 #define BASE_MAX_LENGTH (HCI_MAX_PER_AD_LENGTH - EIR_SERVICE_DATA_LENGTH) /* iso_pinfo flags values */ enum { BT_SK_BIG_SYNC, BT_SK_PA_SYNC, }; struct iso_pinfo { struct bt_sock bt; bdaddr_t src; __u8 src_type; bdaddr_t dst; __u8 dst_type; __u8 bc_sid; __u8 bc_num_bis; __u8 bc_bis[ISO_MAX_NUM_BIS]; __u16 sync_handle; unsigned long flags; struct bt_iso_qos qos; bool qos_user_set; __u8 base_len; __u8 base[BASE_MAX_LENGTH]; struct iso_conn *conn; }; static struct bt_iso_qos default_qos; static bool check_ucast_qos(struct bt_iso_qos *qos); static bool check_bcast_qos(struct bt_iso_qos *qos); static bool iso_match_sid(struct sock *sk, void *data); static void iso_sock_disconn(struct sock *sk); /* ---- ISO timers ---- */ #define ISO_CONN_TIMEOUT (HZ * 40) #define ISO_DISCONN_TIMEOUT (HZ * 2) static void iso_sock_timeout(struct work_struct *work) { struct iso_conn *conn = container_of(work, struct iso_conn, timeout_work.work); struct sock *sk; iso_conn_lock(conn); sk = conn->sk; if (sk) sock_hold(sk); iso_conn_unlock(conn); if (!sk) return; BT_DBG("sock %p state %d", sk, sk->sk_state); lock_sock(sk); sk->sk_err = ETIMEDOUT; sk->sk_state_change(sk); release_sock(sk); sock_put(sk); } static void iso_sock_set_timer(struct sock *sk, long timeout) { if (!iso_pi(sk)->conn) return; BT_DBG("sock %p state %d timeout %ld", sk, sk->sk_state, timeout); cancel_delayed_work(&iso_pi(sk)->conn->timeout_work); schedule_delayed_work(&iso_pi(sk)->conn->timeout_work, timeout); } static void iso_sock_clear_timer(struct sock *sk) { if (!iso_pi(sk)->conn) return; BT_DBG("sock %p state %d", sk, sk->sk_state); cancel_delayed_work(&iso_pi(sk)->conn->timeout_work); } /* ---- ISO connections ---- */ static struct iso_conn *iso_conn_add(struct hci_conn *hcon) { struct iso_conn *conn = hcon->iso_data; if (conn) { if (!conn->hcon) conn->hcon = hcon; return conn; } conn = kzalloc(sizeof(*conn), GFP_KERNEL); if (!conn) return NULL; spin_lock_init(&conn->lock); INIT_DELAYED_WORK(&conn->timeout_work, iso_sock_timeout); hcon->iso_data = conn; conn->hcon = hcon; conn->tx_sn = 0; BT_DBG("hcon %p conn %p", hcon, conn); return conn; } /* Delete channel. Must be called on the locked socket. */ static void iso_chan_del(struct sock *sk, int err) { struct iso_conn *conn; struct sock *parent; conn = iso_pi(sk)->conn; BT_DBG("sk %p, conn %p, err %d", sk, conn, err); if (conn) { iso_conn_lock(conn); conn->sk = NULL; iso_pi(sk)->conn = NULL; iso_conn_unlock(conn); if (conn->hcon) hci_conn_drop(conn->hcon); } sk->sk_state = BT_CLOSED; sk->sk_err = err; parent = bt_sk(sk)->parent; if (parent) { bt_accept_unlink(sk); parent->sk_data_ready(parent); } else { sk->sk_state_change(sk); } sock_set_flag(sk, SOCK_ZAPPED); } static void iso_conn_del(struct hci_conn *hcon, int err) { struct iso_conn *conn = hcon->iso_data; struct sock *sk; if (!conn) return; BT_DBG("hcon %p conn %p, err %d", hcon, conn, err); /* Kill socket */ iso_conn_lock(conn); sk = conn->sk; if (sk) sock_hold(sk); iso_conn_unlock(conn); if (sk) { lock_sock(sk); iso_sock_clear_timer(sk); iso_chan_del(sk, err); release_sock(sk); sock_put(sk); } /* Ensure no more work items will run before freeing conn. */ cancel_delayed_work_sync(&conn->timeout_work); hcon->iso_data = NULL; kfree(conn); } static int __iso_chan_add(struct iso_conn *conn, struct sock *sk, struct sock *parent) { BT_DBG("conn %p", conn); if (iso_pi(sk)->conn == conn && conn->sk == sk) return 0; if (conn->sk) { BT_ERR("conn->sk already set"); return -EBUSY; } iso_pi(sk)->conn = conn; conn->sk = sk; if (parent) bt_accept_enqueue(parent, sk, true); return 0; } static int iso_chan_add(struct iso_conn *conn, struct sock *sk, struct sock *parent) { int err; iso_conn_lock(conn); err = __iso_chan_add(conn, sk, parent); iso_conn_unlock(conn); return err; } static inline u8 le_addr_type(u8 bdaddr_type) { if (bdaddr_type == BDADDR_LE_PUBLIC) return ADDR_LE_DEV_PUBLIC; else return ADDR_LE_DEV_RANDOM; } static int iso_connect_bis(struct sock *sk) { struct iso_conn *conn; struct hci_conn *hcon; struct hci_dev *hdev; int err; BT_DBG("%pMR", &iso_pi(sk)->src); hdev = hci_get_route(&iso_pi(sk)->dst, &iso_pi(sk)->src, iso_pi(sk)->src_type); if (!hdev) return -EHOSTUNREACH; hci_dev_lock(hdev); if (!bis_capable(hdev)) { err = -EOPNOTSUPP; goto unlock; } /* Fail if user set invalid QoS */ if (iso_pi(sk)->qos_user_set && !check_bcast_qos(&iso_pi(sk)->qos)) { iso_pi(sk)->qos = default_qos; err = -EINVAL; goto unlock; } /* Fail if out PHYs are marked as disabled */ if (!iso_pi(sk)->qos.bcast.out.phy) { err = -EINVAL; goto unlock; } /* Just bind if DEFER_SETUP has been set */ if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) { hcon = hci_bind_bis(hdev, &iso_pi(sk)->dst, &iso_pi(sk)->qos, iso_pi(sk)->base_len, iso_pi(sk)->base); if (IS_ERR(hcon)) { err = PTR_ERR(hcon); goto unlock; } } else { hcon = hci_connect_bis(hdev, &iso_pi(sk)->dst, le_addr_type(iso_pi(sk)->dst_type), &iso_pi(sk)->qos, iso_pi(sk)->base_len, iso_pi(sk)->base); if (IS_ERR(hcon)) { err = PTR_ERR(hcon); goto unlock; } } conn = iso_conn_add(hcon); if (!conn) { hci_conn_drop(hcon); err = -ENOMEM; goto unlock; } lock_sock(sk); err = iso_chan_add(conn, sk, NULL); if (err) { release_sock(sk); goto unlock; } /* Update source addr of the socket */ bacpy(&iso_pi(sk)->src, &hcon->src); if (hcon->state == BT_CONNECTED) { iso_sock_clear_timer(sk); sk->sk_state = BT_CONNECTED; } else if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) { iso_sock_clear_timer(sk); sk->sk_state = BT_CONNECT; } else { sk->sk_state = BT_CONNECT; iso_sock_set_timer(sk, sk->sk_sndtimeo); } release_sock(sk); unlock: hci_dev_unlock(hdev); hci_dev_put(hdev); return err; } static int iso_connect_cis(struct sock *sk) { struct iso_conn *conn; struct hci_conn *hcon; struct hci_dev *hdev; int err; BT_DBG("%pMR -> %pMR", &iso_pi(sk)->src, &iso_pi(sk)->dst); hdev = hci_get_route(&iso_pi(sk)->dst, &iso_pi(sk)->src, iso_pi(sk)->src_type); if (!hdev) return -EHOSTUNREACH; hci_dev_lock(hdev); if (!cis_central_capable(hdev)) { err = -EOPNOTSUPP; goto unlock; } /* Fail if user set invalid QoS */ if (iso_pi(sk)->qos_user_set && !check_ucast_qos(&iso_pi(sk)->qos)) { iso_pi(sk)->qos = default_qos; err = -EINVAL; goto unlock; } /* Fail if either PHYs are marked as disabled */ if (!iso_pi(sk)->qos.ucast.in.phy && !iso_pi(sk)->qos.ucast.out.phy) { err = -EINVAL; goto unlock; } /* Just bind if DEFER_SETUP has been set */ if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) { hcon = hci_bind_cis(hdev, &iso_pi(sk)->dst, le_addr_type(iso_pi(sk)->dst_type), &iso_pi(sk)->qos); if (IS_ERR(hcon)) { err = PTR_ERR(hcon); goto unlock; } } else { hcon = hci_connect_cis(hdev, &iso_pi(sk)->dst, le_addr_type(iso_pi(sk)->dst_type), &iso_pi(sk)->qos); if (IS_ERR(hcon)) { err = PTR_ERR(hcon); goto unlock; } } conn = iso_conn_add(hcon); if (!conn) { hci_conn_drop(hcon); err = -ENOMEM; goto unlock; } lock_sock(sk); err = iso_chan_add(conn, sk, NULL); if (err) { release_sock(sk); goto unlock; } /* Update source addr of the socket */ bacpy(&iso_pi(sk)->src, &hcon->src); if (hcon->state == BT_CONNECTED) { iso_sock_clear_timer(sk); sk->sk_state = BT_CONNECTED; } else if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) { iso_sock_clear_timer(sk); sk->sk_state = BT_CONNECT; } else { sk->sk_state = BT_CONNECT; iso_sock_set_timer(sk, sk->sk_sndtimeo); } release_sock(sk); unlock: hci_dev_unlock(hdev); hci_dev_put(hdev); return err; } static struct bt_iso_qos *iso_sock_get_qos(struct sock *sk) { if (sk->sk_state == BT_CONNECTED || sk->sk_state == BT_CONNECT2) return &iso_pi(sk)->conn->hcon->iso_qos; return &iso_pi(sk)->qos; } static int iso_send_frame(struct sock *sk, struct sk_buff *skb) { struct iso_conn *conn = iso_pi(sk)->conn; struct bt_iso_qos *qos = iso_sock_get_qos(sk); struct hci_iso_data_hdr *hdr; int len = 0; BT_DBG("sk %p len %d", sk, skb->len); if (skb->len > qos->ucast.out.sdu) return -EMSGSIZE; len = skb->len; /* Push ISO data header */ hdr = skb_push(skb, HCI_ISO_DATA_HDR_SIZE); hdr->sn = cpu_to_le16(conn->tx_sn++); hdr->slen = cpu_to_le16(hci_iso_data_len_pack(len, HCI_ISO_STATUS_VALID)); if (sk->sk_state == BT_CONNECTED) hci_send_iso(conn->hcon, skb); else len = -ENOTCONN; return len; } static void iso_recv_frame(struct iso_conn *conn, struct sk_buff *skb) { struct sock *sk; iso_conn_lock(conn); sk = conn->sk; iso_conn_unlock(conn); if (!sk) goto drop; BT_DBG("sk %p len %d", sk, skb->len); if (sk->sk_state != BT_CONNECTED) goto drop; if (!sock_queue_rcv_skb(sk, skb)) return; drop: kfree_skb(skb); } /* -------- Socket interface ---------- */ static struct sock *__iso_get_sock_listen_by_addr(bdaddr_t *ba) { struct sock *sk; sk_for_each(sk, &iso_sk_list.head) { if (sk->sk_state != BT_LISTEN) continue; if (!bacmp(&iso_pi(sk)->src, ba)) return sk; } return NULL; } static struct sock *__iso_get_sock_listen_by_sid(bdaddr_t *ba, bdaddr_t *bc, __u8 sid) { struct sock *sk; sk_for_each(sk, &iso_sk_list.head) { if (sk->sk_state != BT_LISTEN) continue; if (bacmp(&iso_pi(sk)->src, ba)) continue; if (bacmp(&iso_pi(sk)->dst, bc)) continue; if (iso_pi(sk)->bc_sid == sid) return sk; } return NULL; } typedef bool (*iso_sock_match_t)(struct sock *sk, void *data); /* Find socket listening: * source bdaddr (Unicast) * destination bdaddr (Broadcast only) * match func - pass NULL to ignore * match func data - pass -1 to ignore * Returns closest match. */ static struct sock *iso_get_sock_listen(bdaddr_t *src, bdaddr_t *dst, iso_sock_match_t match, void *data) { struct sock *sk = NULL, *sk1 = NULL; read_lock(&iso_sk_list.lock); sk_for_each(sk, &iso_sk_list.head) { if (sk->sk_state != BT_LISTEN) continue; /* Match Broadcast destination */ if (bacmp(dst, BDADDR_ANY) && bacmp(&iso_pi(sk)->dst, dst)) continue; /* Use Match function if provided */ if (match && !match(sk, data)) continue; /* Exact match. */ if (!bacmp(&iso_pi(sk)->src, src)) break; /* Closest match */ if (!bacmp(&iso_pi(sk)->src, BDADDR_ANY)) sk1 = sk; } read_unlock(&iso_sk_list.lock); return sk ? sk : sk1; } static void iso_sock_destruct(struct sock *sk) { BT_DBG("sk %p", sk); skb_queue_purge(&sk->sk_receive_queue); skb_queue_purge(&sk->sk_write_queue); } static void iso_sock_cleanup_listen(struct sock *parent) { struct sock *sk; BT_DBG("parent %p", parent); /* Close not yet accepted channels */ while ((sk = bt_accept_dequeue(parent, NULL))) { iso_sock_close(sk); iso_sock_kill(sk); } /* If listening socket stands for a PA sync connection, * properly disconnect the hcon and socket. */ if (iso_pi(parent)->conn && iso_pi(parent)->conn->hcon && test_bit(HCI_CONN_PA_SYNC, &iso_pi(parent)->conn->hcon->flags)) { iso_sock_disconn(parent); return; } parent->sk_state = BT_CLOSED; sock_set_flag(parent, SOCK_ZAPPED); } /* Kill socket (only if zapped and orphan) * Must be called on unlocked socket. */ static void iso_sock_kill(struct sock *sk) { if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket || sock_flag(sk, SOCK_DEAD)) return; BT_DBG("sk %p state %d", sk, sk->sk_state); /* Kill poor orphan */ bt_sock_unlink(&iso_sk_list, sk); sock_set_flag(sk, SOCK_DEAD); sock_put(sk); } static void iso_sock_disconn(struct sock *sk) { sk->sk_state = BT_DISCONN; iso_sock_set_timer(sk, ISO_DISCONN_TIMEOUT); iso_conn_lock(iso_pi(sk)->conn); hci_conn_drop(iso_pi(sk)->conn->hcon); iso_pi(sk)->conn->hcon = NULL; iso_conn_unlock(iso_pi(sk)->conn); } static void __iso_sock_close(struct sock *sk) { BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket); switch (sk->sk_state) { case BT_LISTEN: iso_sock_cleanup_listen(sk); break; case BT_CONNECT: case BT_CONNECTED: case BT_CONFIG: if (iso_pi(sk)->conn->hcon) iso_sock_disconn(sk); else iso_chan_del(sk, ECONNRESET); break; case BT_CONNECT2: if (iso_pi(sk)->conn->hcon && (test_bit(HCI_CONN_PA_SYNC, &iso_pi(sk)->conn->hcon->flags) || test_bit(HCI_CONN_PA_SYNC_FAILED, &iso_pi(sk)->conn->hcon->flags))) iso_sock_disconn(sk); else iso_chan_del(sk, ECONNRESET); break; case BT_DISCONN: iso_chan_del(sk, ECONNRESET); break; default: sock_set_flag(sk, SOCK_ZAPPED); break; } } /* Must be called on unlocked socket. */ static void iso_sock_close(struct sock *sk) { iso_sock_clear_timer(sk); lock_sock(sk); __iso_sock_close(sk); release_sock(sk); iso_sock_kill(sk); } static void iso_sock_init(struct sock *sk, struct sock *parent) { BT_DBG("sk %p", sk); if (parent) { sk->sk_type = parent->sk_type; bt_sk(sk)->flags = bt_sk(parent)->flags; security_sk_clone(parent, sk); } } static struct proto iso_proto = { .name = "ISO", .owner = THIS_MODULE, .obj_size = sizeof(struct iso_pinfo) }; #define DEFAULT_IO_QOS \ { \ .interval = 10000u, \ .latency = 10u, \ .sdu = 40u, \ .phy = BT_ISO_PHY_2M, \ .rtn = 2u, \ } static struct bt_iso_qos default_qos = { .bcast = { .big = BT_ISO_QOS_BIG_UNSET, .bis = BT_ISO_QOS_BIS_UNSET, .sync_factor = 0x01, .packing = 0x00, .framing = 0x00, .in = DEFAULT_IO_QOS, .out = DEFAULT_IO_QOS, .encryption = 0x00, .bcode = {0x00}, .options = 0x00, .skip = 0x0000, .sync_timeout = 0x4000, .sync_cte_type = 0x00, .mse = 0x00, .timeout = 0x4000, }, }; static struct sock *iso_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio, int kern) { struct sock *sk; sk = bt_sock_alloc(net, sock, &iso_proto, proto, prio, kern); if (!sk) return NULL; sk->sk_destruct = iso_sock_destruct; sk->sk_sndtimeo = ISO_CONN_TIMEOUT; /* Set address type as public as default src address is BDADDR_ANY */ iso_pi(sk)->src_type = BDADDR_LE_PUBLIC; iso_pi(sk)->qos = default_qos; bt_sock_link(&iso_sk_list, sk); return sk; } static int iso_sock_create(struct net *net, struct socket *sock, int protocol, int kern) { struct sock *sk; BT_DBG("sock %p", sock); sock->state = SS_UNCONNECTED; if (sock->type != SOCK_SEQPACKET) return -ESOCKTNOSUPPORT; sock->ops = &iso_sock_ops; sk = iso_sock_alloc(net, sock, protocol, GFP_ATOMIC, kern); if (!sk) return -ENOMEM; iso_sock_init(sk, NULL); return 0; } static int iso_sock_bind_bc(struct socket *sock, struct sockaddr *addr, int addr_len) { struct sockaddr_iso *sa = (struct sockaddr_iso *)addr; struct sock *sk = sock->sk; int i; BT_DBG("sk %p bc_sid %u bc_num_bis %u", sk, sa->iso_bc->bc_sid, sa->iso_bc->bc_num_bis); if (addr_len > sizeof(*sa) + sizeof(*sa->iso_bc) || sa->iso_bc->bc_num_bis < 0x01 || sa->iso_bc->bc_num_bis > 0x1f) return -EINVAL; bacpy(&iso_pi(sk)->dst, &sa->iso_bc->bc_bdaddr); iso_pi(sk)->dst_type = sa->iso_bc->bc_bdaddr_type; iso_pi(sk)->sync_handle = -1; iso_pi(sk)->bc_sid = sa->iso_bc->bc_sid; iso_pi(sk)->bc_num_bis = sa->iso_bc->bc_num_bis; for (i = 0; i < iso_pi(sk)->bc_num_bis; i++) { if (sa->iso_bc->bc_bis[i] < 0x01 || sa->iso_bc->bc_bis[i] > 0x1f) return -EINVAL; memcpy(iso_pi(sk)->bc_bis, sa->iso_bc->bc_bis, iso_pi(sk)->bc_num_bis); } return 0; } static int iso_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_len) { struct sockaddr_iso *sa = (struct sockaddr_iso *)addr; struct sock *sk = sock->sk; int err = 0; BT_DBG("sk %p %pMR type %u", sk, &sa->iso_bdaddr, sa->iso_bdaddr_type); if (!addr || addr_len < sizeof(struct sockaddr_iso) || addr->sa_family != AF_BLUETOOTH) return -EINVAL; lock_sock(sk); if (sk->sk_state != BT_OPEN) { err = -EBADFD; goto done; } if (sk->sk_type != SOCK_SEQPACKET) { err = -EINVAL; goto done; } /* Check if the address type is of LE type */ if (!bdaddr_type_is_le(sa->iso_bdaddr_type)) { err = -EINVAL; goto done; } bacpy(&iso_pi(sk)->src, &sa->iso_bdaddr); iso_pi(sk)->src_type = sa->iso_bdaddr_type; /* Check for Broadcast address */ if (addr_len > sizeof(*sa)) { err = iso_sock_bind_bc(sock, addr, addr_len); if (err) goto done; } sk->sk_state = BT_BOUND; done: release_sock(sk); return err; } static int iso_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags) { struct sockaddr_iso *sa = (struct sockaddr_iso *)addr; struct sock *sk = sock->sk; int err; BT_DBG("sk %p", sk); if (alen < sizeof(struct sockaddr_iso) || addr->sa_family != AF_BLUETOOTH) return -EINVAL; if (sk->sk_state != BT_OPEN && sk->sk_state != BT_BOUND) return -EBADFD; if (sk->sk_type != SOCK_SEQPACKET) return -EINVAL; /* Check if the address type is of LE type */ if (!bdaddr_type_is_le(sa->iso_bdaddr_type)) return -EINVAL; lock_sock(sk); bacpy(&iso_pi(sk)->dst, &sa->iso_bdaddr); iso_pi(sk)->dst_type = sa->iso_bdaddr_type; release_sock(sk); if (bacmp(&iso_pi(sk)->dst, BDADDR_ANY)) err = iso_connect_cis(sk); else err = iso_connect_bis(sk); if (err) return err; lock_sock(sk); if (!test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) { err = bt_sock_wait_state(sk, BT_CONNECTED, sock_sndtimeo(sk, flags & O_NONBLOCK)); } release_sock(sk); return err; } static int iso_listen_bis(struct sock *sk) { struct hci_dev *hdev; int err = 0; BT_DBG("%pMR -> %pMR (SID 0x%2.2x)", &iso_pi(sk)->src, &iso_pi(sk)->dst, iso_pi(sk)->bc_sid); write_lock(&iso_sk_list.lock); if (__iso_get_sock_listen_by_sid(&iso_pi(sk)->src, &iso_pi(sk)->dst, iso_pi(sk)->bc_sid)) err = -EADDRINUSE; write_unlock(&iso_sk_list.lock); if (err) return err; hdev = hci_get_route(&iso_pi(sk)->dst, &iso_pi(sk)->src, iso_pi(sk)->src_type); if (!hdev) return -EHOSTUNREACH; /* Fail if user set invalid QoS */ if (iso_pi(sk)->qos_user_set && !check_bcast_qos(&iso_pi(sk)->qos)) { iso_pi(sk)->qos = default_qos; return -EINVAL; } err = hci_pa_create_sync(hdev, &iso_pi(sk)->dst, le_addr_type(iso_pi(sk)->dst_type), iso_pi(sk)->bc_sid, &iso_pi(sk)->qos); hci_dev_put(hdev); return err; } static int iso_listen_cis(struct sock *sk) { int err = 0; BT_DBG("%pMR", &iso_pi(sk)->src); write_lock(&iso_sk_list.lock); if (__iso_get_sock_listen_by_addr(&iso_pi(sk)->src)) err = -EADDRINUSE; write_unlock(&iso_sk_list.lock); return err; } static int iso_sock_listen(struct socket *sock, int backlog) { struct sock *sk = sock->sk; int err = 0; BT_DBG("sk %p backlog %d", sk, backlog); lock_sock(sk); if (sk->sk_state != BT_BOUND) { err = -EBADFD; goto done; } if (sk->sk_type != SOCK_SEQPACKET) { err = -EINVAL; goto done; } if (!bacmp(&iso_pi(sk)->dst, BDADDR_ANY)) err = iso_listen_cis(sk); else err = iso_listen_bis(sk); if (err) goto done; sk->sk_max_ack_backlog = backlog; sk->sk_ack_backlog = 0; sk->sk_state = BT_LISTEN; done: release_sock(sk); return err; } static int iso_sock_accept(struct socket *sock, struct socket *newsock, int flags, bool kern) { DEFINE_WAIT_FUNC(wait, woken_wake_function); struct sock *sk = sock->sk, *ch; long timeo; int err = 0; lock_sock(sk); timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK); BT_DBG("sk %p timeo %ld", sk, timeo); /* Wait for an incoming connection. (wake-one). */ add_wait_queue_exclusive(sk_sleep(sk), &wait); while (1) { if (sk->sk_state != BT_LISTEN) { err = -EBADFD; break; } ch = bt_accept_dequeue(sk, newsock); if (ch) break; if (!timeo) { err = -EAGAIN; break; } if (signal_pending(current)) { err = sock_intr_errno(timeo); break; } release_sock(sk); timeo = wait_woken(&wait, TASK_INTERRUPTIBLE, timeo); lock_sock(sk); } remove_wait_queue(sk_sleep(sk), &wait); if (err) goto done; newsock->state = SS_CONNECTED; BT_DBG("new socket %p", ch); done: release_sock(sk); return err; } static int iso_sock_getname(struct socket *sock, struct sockaddr *addr, int peer) { struct sockaddr_iso *sa = (struct sockaddr_iso *)addr; struct sock *sk = sock->sk; BT_DBG("sock %p, sk %p", sock, sk); addr->sa_family = AF_BLUETOOTH; if (peer) { bacpy(&sa->iso_bdaddr, &iso_pi(sk)->dst); sa->iso_bdaddr_type = iso_pi(sk)->dst_type; } else { bacpy(&sa->iso_bdaddr, &iso_pi(sk)->src); sa->iso_bdaddr_type = iso_pi(sk)->src_type; } return sizeof(struct sockaddr_iso); } static int iso_sock_sendmsg(struct socket *sock, struct msghdr *msg, size_t len) { struct sock *sk = sock->sk; struct sk_buff *skb, **frag; size_t mtu; int err; BT_DBG("sock %p, sk %p", sock, sk); err = sock_error(sk); if (err) return err; if (msg->msg_flags & MSG_OOB) return -EOPNOTSUPP; lock_sock(sk); if (sk->sk_state != BT_CONNECTED) { release_sock(sk); return -ENOTCONN; } mtu = iso_pi(sk)->conn->hcon->hdev->iso_mtu; release_sock(sk); skb = bt_skb_sendmsg(sk, msg, len, mtu, HCI_ISO_DATA_HDR_SIZE, 0); if (IS_ERR(skb)) return PTR_ERR(skb); len -= skb->len; BT_DBG("skb %p len %d", sk, skb->len); /* Continuation fragments */ frag = &skb_shinfo(skb)->frag_list; while (len) { struct sk_buff *tmp; tmp = bt_skb_sendmsg(sk, msg, len, mtu, 0, 0); if (IS_ERR(tmp)) { kfree_skb(skb); return PTR_ERR(tmp); } *frag = tmp; len -= tmp->len; skb->len += tmp->len; skb->data_len += tmp->len; BT_DBG("frag %p len %d", *frag, tmp->len); frag = &(*frag)->next; } lock_sock(sk); if (sk->sk_state == BT_CONNECTED) err = iso_send_frame(sk, skb); else err = -ENOTCONN; release_sock(sk); if (err < 0) kfree_skb(skb); return err; } static void iso_conn_defer_accept(struct hci_conn *conn) { struct hci_cp_le_accept_cis cp; struct hci_dev *hdev = conn->hdev; BT_DBG("conn %p", conn); conn->state = BT_CONFIG; cp.handle = cpu_to_le16(conn->handle); hci_send_cmd(hdev, HCI_OP_LE_ACCEPT_CIS, sizeof(cp), &cp); } static void iso_conn_big_sync(struct sock *sk) { int err; struct hci_dev *hdev; hdev = hci_get_route(&iso_pi(sk)->dst, &iso_pi(sk)->src, iso_pi(sk)->src_type); if (!hdev) return; if (!test_and_set_bit(BT_SK_BIG_SYNC, &iso_pi(sk)->flags)) { err = hci_le_big_create_sync(hdev, iso_pi(sk)->conn->hcon, &iso_pi(sk)->qos, iso_pi(sk)->sync_handle, iso_pi(sk)->bc_num_bis, iso_pi(sk)->bc_bis); if (err) bt_dev_err(hdev, "hci_le_big_create_sync: %d", err); } } static int iso_sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, int flags) { struct sock *sk = sock->sk; struct iso_pinfo *pi = iso_pi(sk); BT_DBG("sk %p", sk); if (test_and_clear_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) { lock_sock(sk); switch (sk->sk_state) { case BT_CONNECT2: if (pi->conn->hcon && test_bit(HCI_CONN_PA_SYNC, &pi->conn->hcon->flags)) { iso_conn_big_sync(sk); sk->sk_state = BT_LISTEN; set_bit(BT_SK_PA_SYNC, &iso_pi(sk)->flags); } else { iso_conn_defer_accept(pi->conn->hcon); sk->sk_state = BT_CONFIG; } release_sock(sk); return 0; case BT_CONNECT: release_sock(sk); return iso_connect_cis(sk); default: release_sock(sk); break; } } return bt_sock_recvmsg(sock, msg, len, flags); } static bool check_io_qos(struct bt_iso_io_qos *qos) { /* If no PHY is enable SDU must be 0 */ if (!qos->phy && qos->sdu) return false; if (qos->interval && (qos->interval < 0xff || qos->interval > 0xfffff)) return false; if (qos->latency && (qos->latency < 0x05 || qos->latency > 0xfa0)) return false; if (qos->phy > BT_ISO_PHY_ANY) return false; return true; } static bool check_ucast_qos(struct bt_iso_qos *qos) { if (qos->ucast.cig > 0xef && qos->ucast.cig != BT_ISO_QOS_CIG_UNSET) return false; if (qos->ucast.cis > 0xef && qos->ucast.cis != BT_ISO_QOS_CIS_UNSET) return false; if (qos->ucast.sca > 0x07) return false; if (qos->ucast.packing > 0x01) return false; if (qos->ucast.framing > 0x01) return false; if (!check_io_qos(&qos->ucast.in)) return false; if (!check_io_qos(&qos->ucast.out)) return false; return true; } static bool check_bcast_qos(struct bt_iso_qos *qos) { if (qos->bcast.sync_factor == 0x00) return false; if (qos->bcast.packing > 0x01) return false; if (qos->bcast.framing > 0x01) return false; if (!check_io_qos(&qos->bcast.in)) return false; if (!check_io_qos(&qos->bcast.out)) return false; if (qos->bcast.encryption > 0x01) return false; if (qos->bcast.options > 0x07) return false; if (qos->bcast.skip > 0x01f3) return false; if (qos->bcast.sync_timeout < 0x000a || qos->bcast.sync_timeout > 0x4000) return false; if (qos->bcast.sync_cte_type > 0x1f) return false; if (qos->bcast.mse > 0x1f) return false; if (qos->bcast.timeout < 0x000a || qos->bcast.timeout > 0x4000) return false; return true; } static int iso_sock_setsockopt(struct socket *sock, int level, int optname, sockptr_t optval, unsigned int optlen) { struct sock *sk = sock->sk; int len, err = 0; struct bt_iso_qos qos = default_qos; u32 opt; BT_DBG("sk %p", sk); lock_sock(sk); switch (optname) { case BT_DEFER_SETUP: if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) { err = -EINVAL; break; } if (copy_from_sockptr(&opt, optval, sizeof(u32))) { err = -EFAULT; break; } if (opt) set_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags); else clear_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags); break; case BT_PKT_STATUS: if (copy_from_sockptr(&opt, optval, sizeof(u32))) { err = -EFAULT; break; } if (opt) set_bit(BT_SK_PKT_STATUS, &bt_sk(sk)->flags); else clear_bit(BT_SK_PKT_STATUS, &bt_sk(sk)->flags); break; case BT_ISO_QOS: if (sk->sk_state != BT_OPEN && sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECT2) { err = -EINVAL; break; } len = min_t(unsigned int, sizeof(qos), optlen); if (copy_from_sockptr(&qos, optval, len)) { err = -EFAULT; break; } if (len == sizeof(qos.ucast) && !check_ucast_qos(&qos)) { err = -EINVAL; break; } iso_pi(sk)->qos = qos; iso_pi(sk)->qos_user_set = true; break; case BT_ISO_BASE: if (sk->sk_state != BT_OPEN && sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECT2) { err = -EINVAL; break; } if (optlen > sizeof(iso_pi(sk)->base)) { err = -EOVERFLOW; break; } len = min_t(unsigned int, sizeof(iso_pi(sk)->base), optlen); if (copy_from_sockptr(iso_pi(sk)->base, optval, len)) { err = -EFAULT; break; } iso_pi(sk)->base_len = len; break; default: err = -ENOPROTOOPT; break; } release_sock(sk); return err; } static int iso_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen) { struct sock *sk = sock->sk; int len, err = 0; struct bt_iso_qos *qos; u8 base_len; u8 *base; BT_DBG("sk %p", sk); if (get_user(len, optlen)) return -EFAULT; lock_sock(sk); switch (optname) { case BT_DEFER_SETUP: if (sk->sk_state == BT_CONNECTED) { err = -EINVAL; break; } if (put_user(test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags), (u32 __user *)optval)) err = -EFAULT; break; case BT_PKT_STATUS: if (put_user(test_bit(BT_SK_PKT_STATUS, &bt_sk(sk)->flags), (int __user *)optval)) err = -EFAULT; break; case BT_ISO_QOS: qos = iso_sock_get_qos(sk); len = min_t(unsigned int, len, sizeof(*qos)); if (copy_to_user(optval, qos, len)) err = -EFAULT; break; case BT_ISO_BASE: if (sk->sk_state == BT_CONNECTED && !bacmp(&iso_pi(sk)->dst, BDADDR_ANY)) { base_len = iso_pi(sk)->conn->hcon->le_per_adv_data_len; base = iso_pi(sk)->conn->hcon->le_per_adv_data; } else { base_len = iso_pi(sk)->base_len; base = iso_pi(sk)->base; } len = min_t(unsigned int, len, base_len); if (copy_to_user(optval, base, len)) err = -EFAULT; break; default: err = -ENOPROTOOPT; break; } release_sock(sk); return err; } static int iso_sock_shutdown(struct socket *sock, int how) { struct sock *sk = sock->sk; int err = 0; BT_DBG("sock %p, sk %p, how %d", sock, sk, how); if (!sk) return 0; sock_hold(sk); lock_sock(sk); switch (how) { case SHUT_RD: if (sk->sk_shutdown & RCV_SHUTDOWN) goto unlock; sk->sk_shutdown |= RCV_SHUTDOWN; break; case SHUT_WR: if (sk->sk_shutdown & SEND_SHUTDOWN) goto unlock; sk->sk_shutdown |= SEND_SHUTDOWN; break; case SHUT_RDWR: if (sk->sk_shutdown & SHUTDOWN_MASK) goto unlock; sk->sk_shutdown |= SHUTDOWN_MASK; break; } iso_sock_clear_timer(sk); __iso_sock_close(sk); if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime && !(current->flags & PF_EXITING)) err = bt_sock_wait_state(sk, BT_CLOSED, sk->sk_lingertime); unlock: release_sock(sk); sock_put(sk); return err; } static int iso_sock_release(struct socket *sock) { struct sock *sk = sock->sk; int err = 0; BT_DBG("sock %p, sk %p", sock, sk); if (!sk) return 0; iso_sock_close(sk); if (sock_flag(sk, SOCK_LINGER) && READ_ONCE(sk->sk_lingertime) && !(current->flags & PF_EXITING)) { lock_sock(sk); err = bt_sock_wait_state(sk, BT_CLOSED, sk->sk_lingertime); release_sock(sk); } sock_orphan(sk); iso_sock_kill(sk); return err; } static void iso_sock_ready(struct sock *sk) { BT_DBG("sk %p", sk); if (!sk) return; lock_sock(sk); iso_sock_clear_timer(sk); sk->sk_state = BT_CONNECTED; sk->sk_state_change(sk); release_sock(sk); } struct iso_list_data { struct hci_conn *hcon; int count; }; static bool iso_match_big(struct sock *sk, void *data) { struct hci_evt_le_big_sync_estabilished *ev = data; return ev->handle == iso_pi(sk)->qos.bcast.big; } static bool iso_match_pa_sync_flag(struct sock *sk, void *data) { return test_bit(BT_SK_PA_SYNC, &iso_pi(sk)->flags); } static void iso_conn_ready(struct iso_conn *conn) { struct sock *parent = NULL; struct sock *sk = conn->sk; struct hci_ev_le_big_sync_estabilished *ev = NULL; struct hci_ev_le_pa_sync_established *ev2 = NULL; struct hci_conn *hcon; BT_DBG("conn %p", conn); if (sk) { iso_sock_ready(conn->sk); } else { hcon = conn->hcon; if (!hcon) return; if (test_bit(HCI_CONN_BIG_SYNC, &hcon->flags) || test_bit(HCI_CONN_BIG_SYNC_FAILED, &hcon->flags)) { ev = hci_recv_event_data(hcon->hdev, HCI_EVT_LE_BIG_SYNC_ESTABILISHED); /* Get reference to PA sync parent socket, if it exists */ parent = iso_get_sock_listen(&hcon->src, &hcon->dst, iso_match_pa_sync_flag, NULL); if (!parent && ev) parent = iso_get_sock_listen(&hcon->src, &hcon->dst, iso_match_big, ev); } else if (test_bit(HCI_CONN_PA_SYNC, &hcon->flags) || test_bit(HCI_CONN_PA_SYNC_FAILED, &hcon->flags)) { ev2 = hci_recv_event_data(hcon->hdev, HCI_EV_LE_PA_SYNC_ESTABLISHED); if (ev2) parent = iso_get_sock_listen(&hcon->src, &hcon->dst, iso_match_sid, ev2); } if (!parent) parent = iso_get_sock_listen(&hcon->src, BDADDR_ANY, NULL, NULL); if (!parent) return; lock_sock(parent); sk = iso_sock_alloc(sock_net(parent), NULL, BTPROTO_ISO, GFP_ATOMIC, 0); if (!sk) { release_sock(parent); return; } iso_sock_init(sk, parent); bacpy(&iso_pi(sk)->src, &hcon->src); /* Convert from HCI to three-value type */ if (hcon->src_type == ADDR_LE_DEV_PUBLIC) iso_pi(sk)->src_type = BDADDR_LE_PUBLIC; else iso_pi(sk)->src_type = BDADDR_LE_RANDOM; /* If hcon has no destination address (BDADDR_ANY) it means it * was created by HCI_EV_LE_BIG_SYNC_ESTABILISHED or * HCI_EV_LE_PA_SYNC_ESTABLISHED so we need to initialize using * the parent socket destination address. */ if (!bacmp(&hcon->dst, BDADDR_ANY)) { bacpy(&hcon->dst, &iso_pi(parent)->dst); hcon->dst_type = iso_pi(parent)->dst_type; hcon->sync_handle = iso_pi(parent)->sync_handle; } if (ev2 && !ev2->status) { iso_pi(sk)->sync_handle = iso_pi(parent)->sync_handle; iso_pi(sk)->qos = iso_pi(parent)->qos; iso_pi(sk)->bc_num_bis = iso_pi(parent)->bc_num_bis; memcpy(iso_pi(sk)->bc_bis, iso_pi(parent)->bc_bis, ISO_MAX_NUM_BIS); } bacpy(&iso_pi(sk)->dst, &hcon->dst); iso_pi(sk)->dst_type = hcon->dst_type; iso_pi(sk)->sync_handle = iso_pi(parent)->sync_handle; memcpy(iso_pi(sk)->base, iso_pi(parent)->base, iso_pi(parent)->base_len); iso_pi(sk)->base_len = iso_pi(parent)->base_len; hci_conn_hold(hcon); iso_chan_add(conn, sk, parent); if ((ev && ((struct hci_evt_le_big_sync_estabilished *)ev)->status) || (ev2 && ev2->status)) { /* Trigger error signal on child socket */ sk->sk_err = ECONNREFUSED; sk->sk_error_report(sk); } if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(parent)->flags)) sk->sk_state = BT_CONNECT2; else sk->sk_state = BT_CONNECTED; /* Wake up parent */ parent->sk_data_ready(parent); release_sock(parent); } } static bool iso_match_sid(struct sock *sk, void *data) { struct hci_ev_le_pa_sync_established *ev = data; return ev->sid == iso_pi(sk)->bc_sid; } static bool iso_match_sync_handle(struct sock *sk, void *data) { struct hci_evt_le_big_info_adv_report *ev = data; return le16_to_cpu(ev->sync_handle) == iso_pi(sk)->sync_handle; } static bool iso_match_sync_handle_pa_report(struct sock *sk, void *data) { struct hci_ev_le_per_adv_report *ev = data; return le16_to_cpu(ev->sync_handle) == iso_pi(sk)->sync_handle; } /* ----- ISO interface with lower layer (HCI) ----- */ int iso_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 *flags) { struct hci_ev_le_pa_sync_established *ev1; struct hci_evt_le_big_info_adv_report *ev2; struct hci_ev_le_per_adv_report *ev3; struct sock *sk; int lm = 0; bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr); /* Broadcast receiver requires handling of some events before it can * proceed to establishing a BIG sync: * * 1. HCI_EV_LE_PA_SYNC_ESTABLISHED: The socket may specify a specific * SID to listen to and once sync is estabilished its handle needs to * be stored in iso_pi(sk)->sync_handle so it can be matched once * receiving the BIG Info. * 2. HCI_EVT_LE_BIG_INFO_ADV_REPORT: When connect_ind is triggered by a * a BIG Info it attempts to check if there any listening socket with * the same sync_handle and if it does then attempt to create a sync. * 3. HCI_EV_LE_PER_ADV_REPORT: When a PA report is received, it is stored * in iso_pi(sk)->base so it can be passed up to user, in the case of a * broadcast sink. */ ev1 = hci_recv_event_data(hdev, HCI_EV_LE_PA_SYNC_ESTABLISHED); if (ev1) { sk = iso_get_sock_listen(&hdev->bdaddr, bdaddr, iso_match_sid, ev1); if (sk && !ev1->status) iso_pi(sk)->sync_handle = le16_to_cpu(ev1->handle); goto done; } ev2 = hci_recv_event_data(hdev, HCI_EVT_LE_BIG_INFO_ADV_REPORT); if (ev2) { /* Try to get PA sync listening socket, if it exists */ sk = iso_get_sock_listen(&hdev->bdaddr, bdaddr, iso_match_pa_sync_flag, NULL); if (!sk) sk = iso_get_sock_listen(&hdev->bdaddr, bdaddr, iso_match_sync_handle, ev2); if (sk) { int err; if (ev2->num_bis < iso_pi(sk)->bc_num_bis) iso_pi(sk)->bc_num_bis = ev2->num_bis; if (!test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags) && !test_and_set_bit(BT_SK_BIG_SYNC, &iso_pi(sk)->flags)) { err = hci_le_big_create_sync(hdev, NULL, &iso_pi(sk)->qos, iso_pi(sk)->sync_handle, iso_pi(sk)->bc_num_bis, iso_pi(sk)->bc_bis); if (err) { bt_dev_err(hdev, "hci_le_big_create_sync: %d", err); sk = NULL; } } } } ev3 = hci_recv_event_data(hdev, HCI_EV_LE_PER_ADV_REPORT); if (ev3) { sk = iso_get_sock_listen(&hdev->bdaddr, bdaddr, iso_match_sync_handle_pa_report, ev3); if (sk) { memcpy(iso_pi(sk)->base, ev3->data, ev3->length); iso_pi(sk)->base_len = ev3->length; } } else { sk = iso_get_sock_listen(&hdev->bdaddr, BDADDR_ANY, NULL, NULL); } done: if (!sk) return lm; lm |= HCI_LM_ACCEPT; if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) *flags |= HCI_PROTO_DEFER; return lm; } static void iso_connect_cfm(struct hci_conn *hcon, __u8 status) { if (hcon->type != ISO_LINK) { if (hcon->type != LE_LINK) return; /* Check if LE link has failed */ if (status) { struct hci_link *link, *t; list_for_each_entry_safe(link, t, &hcon->link_list, list) iso_conn_del(link->conn, bt_to_errno(status)); return; } /* Create CIS if pending */ hci_le_create_cis_pending(hcon->hdev); return; } BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status); /* Similar to the success case, if HCI_CONN_BIG_SYNC_FAILED or * HCI_CONN_PA_SYNC_FAILED is set, queue the failed connection * into the accept queue of the listening socket and wake up * userspace, to inform the user about the event. */ if (!status || test_bit(HCI_CONN_BIG_SYNC_FAILED, &hcon->flags) || test_bit(HCI_CONN_PA_SYNC_FAILED, &hcon->flags)) { struct iso_conn *conn; conn = iso_conn_add(hcon); if (conn) iso_conn_ready(conn); } else { iso_conn_del(hcon, bt_to_errno(status)); } } static void iso_disconn_cfm(struct hci_conn *hcon, __u8 reason) { if (hcon->type != ISO_LINK) return; BT_DBG("hcon %p reason %d", hcon, reason); iso_conn_del(hcon, bt_to_errno(reason)); } void iso_recv(struct hci_conn *hcon, struct sk_buff *skb, u16 flags) { struct iso_conn *conn = hcon->iso_data; __u16 pb, ts, len; if (!conn) goto drop; pb = hci_iso_flags_pb(flags); ts = hci_iso_flags_ts(flags); BT_DBG("conn %p len %d pb 0x%x ts 0x%x", conn, skb->len, pb, ts); switch (pb) { case ISO_START: case ISO_SINGLE: if (conn->rx_len) { BT_ERR("Unexpected start frame (len %d)", skb->len); kfree_skb(conn->rx_skb); conn->rx_skb = NULL; conn->rx_len = 0; } if (ts) { struct hci_iso_ts_data_hdr *hdr; /* TODO: add timestamp to the packet? */ hdr = skb_pull_data(skb, HCI_ISO_TS_DATA_HDR_SIZE); if (!hdr) { BT_ERR("Frame is too short (len %d)", skb->len); goto drop; } len = __le16_to_cpu(hdr->slen); } else { struct hci_iso_data_hdr *hdr; hdr = skb_pull_data(skb, HCI_ISO_DATA_HDR_SIZE); if (!hdr) { BT_ERR("Frame is too short (len %d)", skb->len); goto drop; } len = __le16_to_cpu(hdr->slen); } flags = hci_iso_data_flags(len); len = hci_iso_data_len(len); BT_DBG("Start: total len %d, frag len %d flags 0x%4.4x", len, skb->len, flags); if (len == skb->len) { /* Complete frame received */ hci_skb_pkt_status(skb) = flags & 0x03; iso_recv_frame(conn, skb); return; } if (pb == ISO_SINGLE) { BT_ERR("Frame malformed (len %d, expected len %d)", skb->len, len); goto drop; } if (skb->len > len) { BT_ERR("Frame is too long (len %d, expected len %d)", skb->len, len); goto drop; } /* Allocate skb for the complete frame (with header) */ conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL); if (!conn->rx_skb) goto drop; hci_skb_pkt_status(conn->rx_skb) = flags & 0x03; skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len), skb->len); conn->rx_len = len - skb->len; break; case ISO_CONT: BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len); if (!conn->rx_len) { BT_ERR("Unexpected continuation frame (len %d)", skb->len); goto drop; } if (skb->len > conn->rx_len) { BT_ERR("Fragment is too long (len %d, expected %d)", skb->len, conn->rx_len); kfree_skb(conn->rx_skb); conn->rx_skb = NULL; conn->rx_len = 0; goto drop; } skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len), skb->len); conn->rx_len -= skb->len; return; case ISO_END: skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len), skb->len); conn->rx_len -= skb->len; if (!conn->rx_len) { struct sk_buff *rx_skb = conn->rx_skb; /* Complete frame received. iso_recv_frame * takes ownership of the skb so set the global * rx_skb pointer to NULL first. */ conn->rx_skb = NULL; iso_recv_frame(conn, rx_skb); } break; } drop: kfree_skb(skb); } static struct hci_cb iso_cb = { .name = "ISO", .connect_cfm = iso_connect_cfm, .disconn_cfm = iso_disconn_cfm, }; static int iso_debugfs_show(struct seq_file *f, void *p) { struct sock *sk; read_lock(&iso_sk_list.lock); sk_for_each(sk, &iso_sk_list.head) { seq_printf(f, "%pMR %pMR %d\n", &iso_pi(sk)->src, &iso_pi(sk)->dst, sk->sk_state); } read_unlock(&iso_sk_list.lock); return 0; } DEFINE_SHOW_ATTRIBUTE(iso_debugfs); static struct dentry *iso_debugfs; static const struct proto_ops iso_sock_ops = { .family = PF_BLUETOOTH, .owner = THIS_MODULE, .release = iso_sock_release, .bind = iso_sock_bind, .connect = iso_sock_connect, .listen = iso_sock_listen, .accept = iso_sock_accept, .getname = iso_sock_getname, .sendmsg = iso_sock_sendmsg, .recvmsg = iso_sock_recvmsg, .poll = bt_sock_poll, .ioctl = bt_sock_ioctl, .mmap = sock_no_mmap, .socketpair = sock_no_socketpair, .shutdown = iso_sock_shutdown, .setsockopt = iso_sock_setsockopt, .getsockopt = iso_sock_getsockopt }; static const struct net_proto_family iso_sock_family_ops = { .family = PF_BLUETOOTH, .owner = THIS_MODULE, .create = iso_sock_create, }; static bool iso_inited; bool iso_enabled(void) { return iso_inited; } int iso_init(void) { int err; BUILD_BUG_ON(sizeof(struct sockaddr_iso) > sizeof(struct sockaddr)); if (iso_inited) return -EALREADY; err = proto_register(&iso_proto, 0); if (err < 0) return err; err = bt_sock_register(BTPROTO_ISO, &iso_sock_family_ops); if (err < 0) { BT_ERR("ISO socket registration failed"); goto error; } err = bt_procfs_init(&init_net, "iso", &iso_sk_list, NULL); if (err < 0) { BT_ERR("Failed to create ISO proc file"); bt_sock_unregister(BTPROTO_ISO); goto error; } BT_INFO("ISO socket layer initialized"); hci_register_cb(&iso_cb); if (IS_ERR_OR_NULL(bt_debugfs)) return 0; if (!iso_debugfs) { iso_debugfs = debugfs_create_file("iso", 0444, bt_debugfs, NULL, &iso_debugfs_fops); } iso_inited = true; return 0; error: proto_unregister(&iso_proto); return err; } int iso_exit(void) { if (!iso_inited) return -EALREADY; bt_procfs_cleanup(&init_net, "iso"); debugfs_remove(iso_debugfs); iso_debugfs = NULL; hci_unregister_cb(&iso_cb); bt_sock_unregister(BTPROTO_ISO); proto_unregister(&iso_proto); iso_inited = false; return 0; }
linux-master
net/bluetooth/iso.c
/* BlueZ - Bluetooth protocol stack for Linux Copyright (C) 2014 Intel Corporation This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License version 2 as published by the Free Software Foundation; THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS SOFTWARE IS DISCLAIMED. */ #include <linux/sched/signal.h> #include <net/bluetooth/bluetooth.h> #include <net/bluetooth/hci_core.h> #include <net/bluetooth/mgmt.h> #include "smp.h" #include "hci_request.h" #include "msft.h" #include "eir.h" void hci_req_init(struct hci_request *req, struct hci_dev *hdev) { skb_queue_head_init(&req->cmd_q); req->hdev = hdev; req->err = 0; } void hci_req_purge(struct hci_request *req) { skb_queue_purge(&req->cmd_q); } bool hci_req_status_pend(struct hci_dev *hdev) { return hdev->req_status == HCI_REQ_PEND; } static int req_run(struct hci_request *req, hci_req_complete_t complete, hci_req_complete_skb_t complete_skb) { struct hci_dev *hdev = req->hdev; struct sk_buff *skb; unsigned long flags; bt_dev_dbg(hdev, "length %u", skb_queue_len(&req->cmd_q)); /* If an error occurred during request building, remove all HCI * commands queued on the HCI request queue. */ if (req->err) { skb_queue_purge(&req->cmd_q); return req->err; } /* Do not allow empty requests */ if (skb_queue_empty(&req->cmd_q)) return -ENODATA; skb = skb_peek_tail(&req->cmd_q); if (complete) { bt_cb(skb)->hci.req_complete = complete; } else if (complete_skb) { bt_cb(skb)->hci.req_complete_skb = complete_skb; bt_cb(skb)->hci.req_flags |= HCI_REQ_SKB; } spin_lock_irqsave(&hdev->cmd_q.lock, flags); skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q); spin_unlock_irqrestore(&hdev->cmd_q.lock, flags); queue_work(hdev->workqueue, &hdev->cmd_work); return 0; } int hci_req_run(struct hci_request *req, hci_req_complete_t complete) { return req_run(req, complete, NULL); } int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete) { return req_run(req, NULL, complete); } void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode, struct sk_buff *skb) { bt_dev_dbg(hdev, "result 0x%2.2x", result); if (hdev->req_status == HCI_REQ_PEND) { hdev->req_result = result; hdev->req_status = HCI_REQ_DONE; if (skb) hdev->req_skb = skb_get(skb); wake_up_interruptible(&hdev->req_wait_q); } } /* Execute request and wait for completion. */ int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req, unsigned long opt), unsigned long opt, u32 timeout, u8 *hci_status) { struct hci_request req; int err = 0; bt_dev_dbg(hdev, "start"); hci_req_init(&req, hdev); hdev->req_status = HCI_REQ_PEND; err = func(&req, opt); if (err) { if (hci_status) *hci_status = HCI_ERROR_UNSPECIFIED; return err; } err = hci_req_run_skb(&req, hci_req_sync_complete); if (err < 0) { hdev->req_status = 0; /* ENODATA means the HCI request command queue is empty. * This can happen when a request with conditionals doesn't * trigger any commands to be sent. This is normal behavior * and should not trigger an error return. */ if (err == -ENODATA) { if (hci_status) *hci_status = 0; return 0; } if (hci_status) *hci_status = HCI_ERROR_UNSPECIFIED; return err; } err = wait_event_interruptible_timeout(hdev->req_wait_q, hdev->req_status != HCI_REQ_PEND, timeout); if (err == -ERESTARTSYS) return -EINTR; switch (hdev->req_status) { case HCI_REQ_DONE: err = -bt_to_errno(hdev->req_result); if (hci_status) *hci_status = hdev->req_result; break; case HCI_REQ_CANCELED: err = -hdev->req_result; if (hci_status) *hci_status = HCI_ERROR_UNSPECIFIED; break; default: err = -ETIMEDOUT; if (hci_status) *hci_status = HCI_ERROR_UNSPECIFIED; break; } kfree_skb(hdev->req_skb); hdev->req_skb = NULL; hdev->req_status = hdev->req_result = 0; bt_dev_dbg(hdev, "end: err %d", err); return err; } int hci_req_sync(struct hci_dev *hdev, int (*req)(struct hci_request *req, unsigned long opt), unsigned long opt, u32 timeout, u8 *hci_status) { int ret; /* Serialize all requests */ hci_req_sync_lock(hdev); /* check the state after obtaing the lock to protect the HCI_UP * against any races from hci_dev_do_close when the controller * gets removed. */ if (test_bit(HCI_UP, &hdev->flags)) ret = __hci_req_sync(hdev, req, opt, timeout, hci_status); else ret = -ENETDOWN; hci_req_sync_unlock(hdev); return ret; } struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen, const void *param) { int len = HCI_COMMAND_HDR_SIZE + plen; struct hci_command_hdr *hdr; struct sk_buff *skb; skb = bt_skb_alloc(len, GFP_ATOMIC); if (!skb) return NULL; hdr = skb_put(skb, HCI_COMMAND_HDR_SIZE); hdr->opcode = cpu_to_le16(opcode); hdr->plen = plen; if (plen) skb_put_data(skb, param, plen); bt_dev_dbg(hdev, "skb len %d", skb->len); hci_skb_pkt_type(skb) = HCI_COMMAND_PKT; hci_skb_opcode(skb) = opcode; return skb; } /* Queue a command to an asynchronous HCI request */ void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen, const void *param, u8 event) { struct hci_dev *hdev = req->hdev; struct sk_buff *skb; bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen); /* If an error occurred during request building, there is no point in * queueing the HCI command. We can simply return. */ if (req->err) return; skb = hci_prepare_cmd(hdev, opcode, plen, param); if (!skb) { bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)", opcode); req->err = -ENOMEM; return; } if (skb_queue_empty(&req->cmd_q)) bt_cb(skb)->hci.req_flags |= HCI_REQ_START; hci_skb_event(skb) = event; skb_queue_tail(&req->cmd_q, skb); } void hci_req_add(struct hci_request *req, u16 opcode, u32 plen, const void *param) { bt_dev_dbg(req->hdev, "HCI_REQ-0x%4.4x", opcode); hci_req_add_ev(req, opcode, plen, param, 0); } static void start_interleave_scan(struct hci_dev *hdev) { hdev->interleave_scan_state = INTERLEAVE_SCAN_NO_FILTER; queue_delayed_work(hdev->req_workqueue, &hdev->interleave_scan, 0); } static bool is_interleave_scanning(struct hci_dev *hdev) { return hdev->interleave_scan_state != INTERLEAVE_SCAN_NONE; } static void cancel_interleave_scan(struct hci_dev *hdev) { bt_dev_dbg(hdev, "cancelling interleave scan"); cancel_delayed_work_sync(&hdev->interleave_scan); hdev->interleave_scan_state = INTERLEAVE_SCAN_NONE; } /* Return true if interleave_scan wasn't started until exiting this function, * otherwise, return false */ static bool __hci_update_interleaved_scan(struct hci_dev *hdev) { /* Do interleaved scan only if all of the following are true: * - There is at least one ADV monitor * - At least one pending LE connection or one device to be scanned for * - Monitor offloading is not supported * If so, we should alternate between allowlist scan and one without * any filters to save power. */ bool use_interleaving = hci_is_adv_monitoring(hdev) && !(list_empty(&hdev->pend_le_conns) && list_empty(&hdev->pend_le_reports)) && hci_get_adv_monitor_offload_ext(hdev) == HCI_ADV_MONITOR_EXT_NONE; bool is_interleaving = is_interleave_scanning(hdev); if (use_interleaving && !is_interleaving) { start_interleave_scan(hdev); bt_dev_dbg(hdev, "starting interleave scan"); return true; } if (!use_interleaving && is_interleaving) cancel_interleave_scan(hdev); return false; } void hci_req_add_le_scan_disable(struct hci_request *req, bool rpa_le_conn) { struct hci_dev *hdev = req->hdev; if (hdev->scanning_paused) { bt_dev_dbg(hdev, "Scanning is paused for suspend"); return; } if (use_ext_scan(hdev)) { struct hci_cp_le_set_ext_scan_enable cp; memset(&cp, 0, sizeof(cp)); cp.enable = LE_SCAN_DISABLE; hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE, sizeof(cp), &cp); } else { struct hci_cp_le_set_scan_enable cp; memset(&cp, 0, sizeof(cp)); cp.enable = LE_SCAN_DISABLE; hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp); } /* Disable address resolution */ if (hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION) && !rpa_le_conn) { __u8 enable = 0x00; hci_req_add(req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable); } } static void del_from_accept_list(struct hci_request *req, bdaddr_t *bdaddr, u8 bdaddr_type) { struct hci_cp_le_del_from_accept_list cp; cp.bdaddr_type = bdaddr_type; bacpy(&cp.bdaddr, bdaddr); bt_dev_dbg(req->hdev, "Remove %pMR (0x%x) from accept list", &cp.bdaddr, cp.bdaddr_type); hci_req_add(req, HCI_OP_LE_DEL_FROM_ACCEPT_LIST, sizeof(cp), &cp); if (use_ll_privacy(req->hdev)) { struct smp_irk *irk; irk = hci_find_irk_by_addr(req->hdev, bdaddr, bdaddr_type); if (irk) { struct hci_cp_le_del_from_resolv_list cp; cp.bdaddr_type = bdaddr_type; bacpy(&cp.bdaddr, bdaddr); hci_req_add(req, HCI_OP_LE_DEL_FROM_RESOLV_LIST, sizeof(cp), &cp); } } } /* Adds connection to accept list if needed. On error, returns -1. */ static int add_to_accept_list(struct hci_request *req, struct hci_conn_params *params, u8 *num_entries, bool allow_rpa) { struct hci_cp_le_add_to_accept_list cp; struct hci_dev *hdev = req->hdev; /* Already in accept list */ if (hci_bdaddr_list_lookup(&hdev->le_accept_list, &params->addr, params->addr_type)) return 0; /* Select filter policy to accept all advertising */ if (*num_entries >= hdev->le_accept_list_size) return -1; /* Accept list can not be used with RPAs */ if (!allow_rpa && !hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) && hci_find_irk_by_addr(hdev, &params->addr, params->addr_type)) { return -1; } /* During suspend, only wakeable devices can be in accept list */ if (hdev->suspended && !(params->flags & HCI_CONN_FLAG_REMOTE_WAKEUP)) return 0; *num_entries += 1; cp.bdaddr_type = params->addr_type; bacpy(&cp.bdaddr, &params->addr); bt_dev_dbg(hdev, "Add %pMR (0x%x) to accept list", &cp.bdaddr, cp.bdaddr_type); hci_req_add(req, HCI_OP_LE_ADD_TO_ACCEPT_LIST, sizeof(cp), &cp); if (use_ll_privacy(hdev)) { struct smp_irk *irk; irk = hci_find_irk_by_addr(hdev, &params->addr, params->addr_type); if (irk) { struct hci_cp_le_add_to_resolv_list cp; cp.bdaddr_type = params->addr_type; bacpy(&cp.bdaddr, &params->addr); memcpy(cp.peer_irk, irk->val, 16); if (hci_dev_test_flag(hdev, HCI_PRIVACY)) memcpy(cp.local_irk, hdev->irk, 16); else memset(cp.local_irk, 0, 16); hci_req_add(req, HCI_OP_LE_ADD_TO_RESOLV_LIST, sizeof(cp), &cp); } } return 0; } static u8 update_accept_list(struct hci_request *req) { struct hci_dev *hdev = req->hdev; struct hci_conn_params *params; struct bdaddr_list *b; u8 num_entries = 0; bool pend_conn, pend_report; /* We allow usage of accept list even with RPAs in suspend. In the worst * case, we won't be able to wake from devices that use the privacy1.2 * features. Additionally, once we support privacy1.2 and IRK * offloading, we can update this to also check for those conditions. */ bool allow_rpa = hdev->suspended; if (use_ll_privacy(hdev)) allow_rpa = true; /* Go through the current accept list programmed into the * controller one by one and check if that address is still * in the list of pending connections or list of devices to * report. If not present in either list, then queue the * command to remove it from the controller. */ list_for_each_entry(b, &hdev->le_accept_list, list) { pend_conn = hci_pend_le_action_lookup(&hdev->pend_le_conns, &b->bdaddr, b->bdaddr_type); pend_report = hci_pend_le_action_lookup(&hdev->pend_le_reports, &b->bdaddr, b->bdaddr_type); /* If the device is not likely to connect or report, * remove it from the accept list. */ if (!pend_conn && !pend_report) { del_from_accept_list(req, &b->bdaddr, b->bdaddr_type); continue; } /* Accept list can not be used with RPAs */ if (!allow_rpa && !hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) && hci_find_irk_by_addr(hdev, &b->bdaddr, b->bdaddr_type)) { return 0x00; } num_entries++; } /* Since all no longer valid accept list entries have been * removed, walk through the list of pending connections * and ensure that any new device gets programmed into * the controller. * * If the list of the devices is larger than the list of * available accept list entries in the controller, then * just abort and return filer policy value to not use the * accept list. */ list_for_each_entry(params, &hdev->pend_le_conns, action) { if (add_to_accept_list(req, params, &num_entries, allow_rpa)) return 0x00; } /* After adding all new pending connections, walk through * the list of pending reports and also add these to the * accept list if there is still space. Abort if space runs out. */ list_for_each_entry(params, &hdev->pend_le_reports, action) { if (add_to_accept_list(req, params, &num_entries, allow_rpa)) return 0x00; } /* Use the allowlist unless the following conditions are all true: * - We are not currently suspending * - There are 1 or more ADV monitors registered and it's not offloaded * - Interleaved scanning is not currently using the allowlist */ if (!idr_is_empty(&hdev->adv_monitors_idr) && !hdev->suspended && hci_get_adv_monitor_offload_ext(hdev) == HCI_ADV_MONITOR_EXT_NONE && hdev->interleave_scan_state != INTERLEAVE_SCAN_ALLOWLIST) return 0x00; /* Select filter policy to use accept list */ return 0x01; } static bool scan_use_rpa(struct hci_dev *hdev) { return hci_dev_test_flag(hdev, HCI_PRIVACY); } static void hci_req_start_scan(struct hci_request *req, u8 type, u16 interval, u16 window, u8 own_addr_type, u8 filter_policy, bool filter_dup, bool addr_resolv) { struct hci_dev *hdev = req->hdev; if (hdev->scanning_paused) { bt_dev_dbg(hdev, "Scanning is paused for suspend"); return; } if (use_ll_privacy(hdev) && addr_resolv) { u8 enable = 0x01; hci_req_add(req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable); } /* Use ext scanning if set ext scan param and ext scan enable is * supported */ if (use_ext_scan(hdev)) { struct hci_cp_le_set_ext_scan_params *ext_param_cp; struct hci_cp_le_set_ext_scan_enable ext_enable_cp; struct hci_cp_le_scan_phy_params *phy_params; u8 data[sizeof(*ext_param_cp) + sizeof(*phy_params) * 2]; u32 plen; ext_param_cp = (void *)data; phy_params = (void *)ext_param_cp->data; memset(ext_param_cp, 0, sizeof(*ext_param_cp)); ext_param_cp->own_addr_type = own_addr_type; ext_param_cp->filter_policy = filter_policy; plen = sizeof(*ext_param_cp); if (scan_1m(hdev) || scan_2m(hdev)) { ext_param_cp->scanning_phys |= LE_SCAN_PHY_1M; memset(phy_params, 0, sizeof(*phy_params)); phy_params->type = type; phy_params->interval = cpu_to_le16(interval); phy_params->window = cpu_to_le16(window); plen += sizeof(*phy_params); phy_params++; } if (scan_coded(hdev)) { ext_param_cp->scanning_phys |= LE_SCAN_PHY_CODED; memset(phy_params, 0, sizeof(*phy_params)); phy_params->type = type; phy_params->interval = cpu_to_le16(interval); phy_params->window = cpu_to_le16(window); plen += sizeof(*phy_params); phy_params++; } hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_PARAMS, plen, ext_param_cp); memset(&ext_enable_cp, 0, sizeof(ext_enable_cp)); ext_enable_cp.enable = LE_SCAN_ENABLE; ext_enable_cp.filter_dup = filter_dup; hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE, sizeof(ext_enable_cp), &ext_enable_cp); } else { struct hci_cp_le_set_scan_param param_cp; struct hci_cp_le_set_scan_enable enable_cp; memset(&param_cp, 0, sizeof(param_cp)); param_cp.type = type; param_cp.interval = cpu_to_le16(interval); param_cp.window = cpu_to_le16(window); param_cp.own_address_type = own_addr_type; param_cp.filter_policy = filter_policy; hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp), &param_cp); memset(&enable_cp, 0, sizeof(enable_cp)); enable_cp.enable = LE_SCAN_ENABLE; enable_cp.filter_dup = filter_dup; hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp), &enable_cp); } } static void set_random_addr(struct hci_request *req, bdaddr_t *rpa); static int hci_update_random_address(struct hci_request *req, bool require_privacy, bool use_rpa, u8 *own_addr_type) { struct hci_dev *hdev = req->hdev; int err; /* If privacy is enabled use a resolvable private address. If * current RPA has expired or there is something else than * the current RPA in use, then generate a new one. */ if (use_rpa) { /* If Controller supports LL Privacy use own address type is * 0x03 */ if (use_ll_privacy(hdev)) *own_addr_type = ADDR_LE_DEV_RANDOM_RESOLVED; else *own_addr_type = ADDR_LE_DEV_RANDOM; if (rpa_valid(hdev)) return 0; err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa); if (err < 0) { bt_dev_err(hdev, "failed to generate new RPA"); return err; } set_random_addr(req, &hdev->rpa); return 0; } /* In case of required privacy without resolvable private address, * use an non-resolvable private address. This is useful for active * scanning and non-connectable advertising. */ if (require_privacy) { bdaddr_t nrpa; while (true) { /* The non-resolvable private address is generated * from random six bytes with the two most significant * bits cleared. */ get_random_bytes(&nrpa, 6); nrpa.b[5] &= 0x3f; /* The non-resolvable private address shall not be * equal to the public address. */ if (bacmp(&hdev->bdaddr, &nrpa)) break; } *own_addr_type = ADDR_LE_DEV_RANDOM; set_random_addr(req, &nrpa); return 0; } /* If forcing static address is in use or there is no public * address use the static address as random address (but skip * the HCI command if the current random address is already the * static one. * * In case BR/EDR has been disabled on a dual-mode controller * and a static address has been configured, then use that * address instead of the public BR/EDR address. */ if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) || !bacmp(&hdev->bdaddr, BDADDR_ANY) || (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) && bacmp(&hdev->static_addr, BDADDR_ANY))) { *own_addr_type = ADDR_LE_DEV_RANDOM; if (bacmp(&hdev->static_addr, &hdev->random_addr)) hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, &hdev->static_addr); return 0; } /* Neither privacy nor static address is being used so use a * public address. */ *own_addr_type = ADDR_LE_DEV_PUBLIC; return 0; } /* Ensure to call hci_req_add_le_scan_disable() first to disable the * controller based address resolution to be able to reconfigure * resolving list. */ void hci_req_add_le_passive_scan(struct hci_request *req) { struct hci_dev *hdev = req->hdev; u8 own_addr_type; u8 filter_policy; u16 window, interval; /* Default is to enable duplicates filter */ u8 filter_dup = LE_SCAN_FILTER_DUP_ENABLE; /* Background scanning should run with address resolution */ bool addr_resolv = true; if (hdev->scanning_paused) { bt_dev_dbg(hdev, "Scanning is paused for suspend"); return; } /* Set require_privacy to false since no SCAN_REQ are send * during passive scanning. Not using an non-resolvable address * here is important so that peer devices using direct * advertising with our address will be correctly reported * by the controller. */ if (hci_update_random_address(req, false, scan_use_rpa(hdev), &own_addr_type)) return; if (hdev->enable_advmon_interleave_scan && __hci_update_interleaved_scan(hdev)) return; bt_dev_dbg(hdev, "interleave state %d", hdev->interleave_scan_state); /* Adding or removing entries from the accept list must * happen before enabling scanning. The controller does * not allow accept list modification while scanning. */ filter_policy = update_accept_list(req); /* When the controller is using random resolvable addresses and * with that having LE privacy enabled, then controllers with * Extended Scanner Filter Policies support can now enable support * for handling directed advertising. * * So instead of using filter polices 0x00 (no accept list) * and 0x01 (accept list enabled) use the new filter policies * 0x02 (no accept list) and 0x03 (accept list enabled). */ if (hci_dev_test_flag(hdev, HCI_PRIVACY) && (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)) filter_policy |= 0x02; if (hdev->suspended) { window = hdev->le_scan_window_suspend; interval = hdev->le_scan_int_suspend; } else if (hci_is_le_conn_scanning(hdev)) { window = hdev->le_scan_window_connect; interval = hdev->le_scan_int_connect; } else if (hci_is_adv_monitoring(hdev)) { window = hdev->le_scan_window_adv_monitor; interval = hdev->le_scan_int_adv_monitor; /* Disable duplicates filter when scanning for advertisement * monitor for the following reasons. * * For HW pattern filtering (ex. MSFT), Realtek and Qualcomm * controllers ignore RSSI_Sampling_Period when the duplicates * filter is enabled. * * For SW pattern filtering, when we're not doing interleaved * scanning, it is necessary to disable duplicates filter, * otherwise hosts can only receive one advertisement and it's * impossible to know if a peer is still in range. */ filter_dup = LE_SCAN_FILTER_DUP_DISABLE; } else { window = hdev->le_scan_window; interval = hdev->le_scan_interval; } bt_dev_dbg(hdev, "LE passive scan with accept list = %d", filter_policy); hci_req_start_scan(req, LE_SCAN_PASSIVE, interval, window, own_addr_type, filter_policy, filter_dup, addr_resolv); } static int hci_req_add_le_interleaved_scan(struct hci_request *req, unsigned long opt) { struct hci_dev *hdev = req->hdev; int ret = 0; hci_dev_lock(hdev); if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) hci_req_add_le_scan_disable(req, false); hci_req_add_le_passive_scan(req); switch (hdev->interleave_scan_state) { case INTERLEAVE_SCAN_ALLOWLIST: bt_dev_dbg(hdev, "next state: allowlist"); hdev->interleave_scan_state = INTERLEAVE_SCAN_NO_FILTER; break; case INTERLEAVE_SCAN_NO_FILTER: bt_dev_dbg(hdev, "next state: no filter"); hdev->interleave_scan_state = INTERLEAVE_SCAN_ALLOWLIST; break; case INTERLEAVE_SCAN_NONE: BT_ERR("unexpected error"); ret = -1; } hci_dev_unlock(hdev); return ret; } static void interleave_scan_work(struct work_struct *work) { struct hci_dev *hdev = container_of(work, struct hci_dev, interleave_scan.work); u8 status; unsigned long timeout; if (hdev->interleave_scan_state == INTERLEAVE_SCAN_ALLOWLIST) { timeout = msecs_to_jiffies(hdev->advmon_allowlist_duration); } else if (hdev->interleave_scan_state == INTERLEAVE_SCAN_NO_FILTER) { timeout = msecs_to_jiffies(hdev->advmon_no_filter_duration); } else { bt_dev_err(hdev, "unexpected error"); return; } hci_req_sync(hdev, hci_req_add_le_interleaved_scan, 0, HCI_CMD_TIMEOUT, &status); /* Don't continue interleaving if it was canceled */ if (is_interleave_scanning(hdev)) queue_delayed_work(hdev->req_workqueue, &hdev->interleave_scan, timeout); } static void set_random_addr(struct hci_request *req, bdaddr_t *rpa) { struct hci_dev *hdev = req->hdev; /* If we're advertising or initiating an LE connection we can't * go ahead and change the random address at this time. This is * because the eventual initiator address used for the * subsequently created connection will be undefined (some * controllers use the new address and others the one we had * when the operation started). * * In this kind of scenario skip the update and let the random * address be updated at the next cycle. */ if (hci_dev_test_flag(hdev, HCI_LE_ADV) || hci_lookup_le_connect(hdev)) { bt_dev_dbg(hdev, "Deferring random address update"); hci_dev_set_flag(hdev, HCI_RPA_EXPIRED); return; } hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa); } void hci_request_setup(struct hci_dev *hdev) { INIT_DELAYED_WORK(&hdev->interleave_scan, interleave_scan_work); } void hci_request_cancel_all(struct hci_dev *hdev) { __hci_cmd_sync_cancel(hdev, ENODEV); cancel_interleave_scan(hdev); }
linux-master
net/bluetooth/hci_request.c
/* BlueZ - Bluetooth protocol stack for Linux Copyright (C) 2000-2001 Qualcomm Incorporated Copyright (C) 2009-2010 Gustavo F. Padovan <[email protected]> Copyright (C) 2010 Google Inc. Copyright (C) 2011 ProFUSION Embedded Systems Written 2000,2001 by Maxim Krasnyansky <[email protected]> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License version 2 as published by the Free Software Foundation; THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS SOFTWARE IS DISCLAIMED. */ /* Bluetooth L2CAP sockets. */ #include <linux/module.h> #include <linux/export.h> #include <linux/filter.h> #include <linux/sched/signal.h> #include <net/bluetooth/bluetooth.h> #include <net/bluetooth/hci_core.h> #include <net/bluetooth/l2cap.h> #include "smp.h" static struct bt_sock_list l2cap_sk_list = { .lock = __RW_LOCK_UNLOCKED(l2cap_sk_list.lock) }; static const struct proto_ops l2cap_sock_ops; static void l2cap_sock_init(struct sock *sk, struct sock *parent); static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio, int kern); static void l2cap_sock_cleanup_listen(struct sock *parent); bool l2cap_is_socket(struct socket *sock) { return sock && sock->ops == &l2cap_sock_ops; } EXPORT_SYMBOL(l2cap_is_socket); static int l2cap_validate_bredr_psm(u16 psm) { /* PSM must be odd and lsb of upper byte must be 0 */ if ((psm & 0x0101) != 0x0001) return -EINVAL; /* Restrict usage of well-known PSMs */ if (psm < L2CAP_PSM_DYN_START && !capable(CAP_NET_BIND_SERVICE)) return -EACCES; return 0; } static int l2cap_validate_le_psm(u16 psm) { /* Valid LE_PSM ranges are defined only until 0x00ff */ if (psm > L2CAP_PSM_LE_DYN_END) return -EINVAL; /* Restrict fixed, SIG assigned PSM values to CAP_NET_BIND_SERVICE */ if (psm < L2CAP_PSM_LE_DYN_START && !capable(CAP_NET_BIND_SERVICE)) return -EACCES; return 0; } static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen) { struct sock *sk = sock->sk; struct l2cap_chan *chan = l2cap_pi(sk)->chan; struct sockaddr_l2 la; int len, err = 0; BT_DBG("sk %p", sk); if (!addr || alen < offsetofend(struct sockaddr, sa_family) || addr->sa_family != AF_BLUETOOTH) return -EINVAL; memset(&la, 0, sizeof(la)); len = min_t(unsigned int, sizeof(la), alen); memcpy(&la, addr, len); if (la.l2_cid && la.l2_psm) return -EINVAL; if (!bdaddr_type_is_valid(la.l2_bdaddr_type)) return -EINVAL; if (bdaddr_type_is_le(la.l2_bdaddr_type)) { /* We only allow ATT user space socket */ if (la.l2_cid && la.l2_cid != cpu_to_le16(L2CAP_CID_ATT)) return -EINVAL; } lock_sock(sk); if (sk->sk_state != BT_OPEN) { err = -EBADFD; goto done; } if (la.l2_psm) { __u16 psm = __le16_to_cpu(la.l2_psm); if (la.l2_bdaddr_type == BDADDR_BREDR) err = l2cap_validate_bredr_psm(psm); else err = l2cap_validate_le_psm(psm); if (err) goto done; } bacpy(&chan->src, &la.l2_bdaddr); chan->src_type = la.l2_bdaddr_type; if (la.l2_cid) err = l2cap_add_scid(chan, __le16_to_cpu(la.l2_cid)); else err = l2cap_add_psm(chan, &la.l2_bdaddr, la.l2_psm); if (err < 0) goto done; switch (chan->chan_type) { case L2CAP_CHAN_CONN_LESS: if (__le16_to_cpu(la.l2_psm) == L2CAP_PSM_3DSP) chan->sec_level = BT_SECURITY_SDP; break; case L2CAP_CHAN_CONN_ORIENTED: if (__le16_to_cpu(la.l2_psm) == L2CAP_PSM_SDP || __le16_to_cpu(la.l2_psm) == L2CAP_PSM_RFCOMM) chan->sec_level = BT_SECURITY_SDP; break; case L2CAP_CHAN_RAW: chan->sec_level = BT_SECURITY_SDP; break; case L2CAP_CHAN_FIXED: /* Fixed channels default to the L2CAP core not holding a * hci_conn reference for them. For fixed channels mapping to * L2CAP sockets we do want to hold a reference so set the * appropriate flag to request it. */ set_bit(FLAG_HOLD_HCI_CONN, &chan->flags); break; } /* Use L2CAP_MODE_LE_FLOWCTL (CoC) in case of LE address and * L2CAP_MODE_EXT_FLOWCTL (ECRED) has not been set. */ if (chan->psm && bdaddr_type_is_le(chan->src_type) && chan->mode != L2CAP_MODE_EXT_FLOWCTL) chan->mode = L2CAP_MODE_LE_FLOWCTL; chan->state = BT_BOUND; sk->sk_state = BT_BOUND; done: release_sock(sk); return err; } static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags) { struct sock *sk = sock->sk; struct l2cap_chan *chan = l2cap_pi(sk)->chan; struct sockaddr_l2 la; int len, err = 0; bool zapped; BT_DBG("sk %p", sk); lock_sock(sk); zapped = sock_flag(sk, SOCK_ZAPPED); release_sock(sk); if (zapped) return -EINVAL; if (!addr || alen < offsetofend(struct sockaddr, sa_family) || addr->sa_family != AF_BLUETOOTH) return -EINVAL; memset(&la, 0, sizeof(la)); len = min_t(unsigned int, sizeof(la), alen); memcpy(&la, addr, len); if (la.l2_cid && la.l2_psm) return -EINVAL; if (!bdaddr_type_is_valid(la.l2_bdaddr_type)) return -EINVAL; /* Check that the socket wasn't bound to something that * conflicts with the address given to connect(). If chan->src * is BDADDR_ANY it means bind() was never used, in which case * chan->src_type and la.l2_bdaddr_type do not need to match. */ if (chan->src_type == BDADDR_BREDR && bacmp(&chan->src, BDADDR_ANY) && bdaddr_type_is_le(la.l2_bdaddr_type)) { /* Old user space versions will try to incorrectly bind * the ATT socket using BDADDR_BREDR. We need to accept * this and fix up the source address type only when * both the source CID and destination CID indicate * ATT. Anything else is an invalid combination. */ if (chan->scid != L2CAP_CID_ATT || la.l2_cid != cpu_to_le16(L2CAP_CID_ATT)) return -EINVAL; /* We don't have the hdev available here to make a * better decision on random vs public, but since all * user space versions that exhibit this issue anyway do * not support random local addresses assuming public * here is good enough. */ chan->src_type = BDADDR_LE_PUBLIC; } if (chan->src_type != BDADDR_BREDR && la.l2_bdaddr_type == BDADDR_BREDR) return -EINVAL; if (bdaddr_type_is_le(la.l2_bdaddr_type)) { /* We only allow ATT user space socket */ if (la.l2_cid && la.l2_cid != cpu_to_le16(L2CAP_CID_ATT)) return -EINVAL; } /* Use L2CAP_MODE_LE_FLOWCTL (CoC) in case of LE address and * L2CAP_MODE_EXT_FLOWCTL (ECRED) has not been set. */ if (chan->psm && bdaddr_type_is_le(chan->src_type) && chan->mode != L2CAP_MODE_EXT_FLOWCTL) chan->mode = L2CAP_MODE_LE_FLOWCTL; err = l2cap_chan_connect(chan, la.l2_psm, __le16_to_cpu(la.l2_cid), &la.l2_bdaddr, la.l2_bdaddr_type); if (err) return err; lock_sock(sk); err = bt_sock_wait_state(sk, BT_CONNECTED, sock_sndtimeo(sk, flags & O_NONBLOCK)); release_sock(sk); return err; } static int l2cap_sock_listen(struct socket *sock, int backlog) { struct sock *sk = sock->sk; struct l2cap_chan *chan = l2cap_pi(sk)->chan; int err = 0; BT_DBG("sk %p backlog %d", sk, backlog); lock_sock(sk); if (sk->sk_state != BT_BOUND) { err = -EBADFD; goto done; } if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM) { err = -EINVAL; goto done; } switch (chan->mode) { case L2CAP_MODE_BASIC: case L2CAP_MODE_LE_FLOWCTL: break; case L2CAP_MODE_EXT_FLOWCTL: if (!enable_ecred) { err = -EOPNOTSUPP; goto done; } break; case L2CAP_MODE_ERTM: case L2CAP_MODE_STREAMING: if (!disable_ertm) break; fallthrough; default: err = -EOPNOTSUPP; goto done; } sk->sk_max_ack_backlog = backlog; sk->sk_ack_backlog = 0; /* Listening channels need to use nested locking in order not to * cause lockdep warnings when the created child channels end up * being locked in the same thread as the parent channel. */ atomic_set(&chan->nesting, L2CAP_NESTING_PARENT); chan->state = BT_LISTEN; sk->sk_state = BT_LISTEN; done: release_sock(sk); return err; } static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int flags, bool kern) { DEFINE_WAIT_FUNC(wait, woken_wake_function); struct sock *sk = sock->sk, *nsk; long timeo; int err = 0; lock_sock_nested(sk, L2CAP_NESTING_PARENT); timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK); BT_DBG("sk %p timeo %ld", sk, timeo); /* Wait for an incoming connection. (wake-one). */ add_wait_queue_exclusive(sk_sleep(sk), &wait); while (1) { if (sk->sk_state != BT_LISTEN) { err = -EBADFD; break; } nsk = bt_accept_dequeue(sk, newsock); if (nsk) break; if (!timeo) { err = -EAGAIN; break; } if (signal_pending(current)) { err = sock_intr_errno(timeo); break; } release_sock(sk); timeo = wait_woken(&wait, TASK_INTERRUPTIBLE, timeo); lock_sock_nested(sk, L2CAP_NESTING_PARENT); } remove_wait_queue(sk_sleep(sk), &wait); if (err) goto done; newsock->state = SS_CONNECTED; BT_DBG("new socket %p", nsk); done: release_sock(sk); return err; } static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int peer) { struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr; struct sock *sk = sock->sk; struct l2cap_chan *chan = l2cap_pi(sk)->chan; BT_DBG("sock %p, sk %p", sock, sk); if (peer && sk->sk_state != BT_CONNECTED && sk->sk_state != BT_CONNECT && sk->sk_state != BT_CONNECT2 && sk->sk_state != BT_CONFIG) return -ENOTCONN; memset(la, 0, sizeof(struct sockaddr_l2)); addr->sa_family = AF_BLUETOOTH; la->l2_psm = chan->psm; if (peer) { bacpy(&la->l2_bdaddr, &chan->dst); la->l2_cid = cpu_to_le16(chan->dcid); la->l2_bdaddr_type = chan->dst_type; } else { bacpy(&la->l2_bdaddr, &chan->src); la->l2_cid = cpu_to_le16(chan->scid); la->l2_bdaddr_type = chan->src_type; } return sizeof(struct sockaddr_l2); } static int l2cap_get_mode(struct l2cap_chan *chan) { switch (chan->mode) { case L2CAP_MODE_BASIC: return BT_MODE_BASIC; case L2CAP_MODE_ERTM: return BT_MODE_ERTM; case L2CAP_MODE_STREAMING: return BT_MODE_STREAMING; case L2CAP_MODE_LE_FLOWCTL: return BT_MODE_LE_FLOWCTL; case L2CAP_MODE_EXT_FLOWCTL: return BT_MODE_EXT_FLOWCTL; } return -EINVAL; } static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __user *optval, int __user *optlen) { struct sock *sk = sock->sk; struct l2cap_chan *chan = l2cap_pi(sk)->chan; struct l2cap_options opts; struct l2cap_conninfo cinfo; int len, err = 0; u32 opt; BT_DBG("sk %p", sk); if (get_user(len, optlen)) return -EFAULT; lock_sock(sk); switch (optname) { case L2CAP_OPTIONS: /* LE sockets should use BT_SNDMTU/BT_RCVMTU, but since * legacy ATT code depends on getsockopt for * L2CAP_OPTIONS we need to let this pass. */ if (bdaddr_type_is_le(chan->src_type) && chan->scid != L2CAP_CID_ATT) { err = -EINVAL; break; } /* Only BR/EDR modes are supported here */ switch (chan->mode) { case L2CAP_MODE_BASIC: case L2CAP_MODE_ERTM: case L2CAP_MODE_STREAMING: break; default: err = -EINVAL; break; } if (err < 0) break; memset(&opts, 0, sizeof(opts)); opts.imtu = chan->imtu; opts.omtu = chan->omtu; opts.flush_to = chan->flush_to; opts.mode = chan->mode; opts.fcs = chan->fcs; opts.max_tx = chan->max_tx; opts.txwin_size = chan->tx_win; BT_DBG("mode 0x%2.2x", chan->mode); len = min_t(unsigned int, len, sizeof(opts)); if (copy_to_user(optval, (char *) &opts, len)) err = -EFAULT; break; case L2CAP_LM: switch (chan->sec_level) { case BT_SECURITY_LOW: opt = L2CAP_LM_AUTH; break; case BT_SECURITY_MEDIUM: opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT; break; case BT_SECURITY_HIGH: opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT | L2CAP_LM_SECURE; break; case BT_SECURITY_FIPS: opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT | L2CAP_LM_SECURE | L2CAP_LM_FIPS; break; default: opt = 0; break; } if (test_bit(FLAG_ROLE_SWITCH, &chan->flags)) opt |= L2CAP_LM_MASTER; if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags)) opt |= L2CAP_LM_RELIABLE; if (put_user(opt, (u32 __user *) optval)) err = -EFAULT; break; case L2CAP_CONNINFO: if (sk->sk_state != BT_CONNECTED && !(sk->sk_state == BT_CONNECT2 && test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags))) { err = -ENOTCONN; break; } memset(&cinfo, 0, sizeof(cinfo)); cinfo.hci_handle = chan->conn->hcon->handle; memcpy(cinfo.dev_class, chan->conn->hcon->dev_class, 3); len = min_t(unsigned int, len, sizeof(cinfo)); if (copy_to_user(optval, (char *) &cinfo, len)) err = -EFAULT; break; default: err = -ENOPROTOOPT; break; } release_sock(sk); return err; } static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen) { struct sock *sk = sock->sk; struct l2cap_chan *chan = l2cap_pi(sk)->chan; struct bt_security sec; struct bt_power pwr; u32 phys; int len, mode, err = 0; BT_DBG("sk %p", sk); if (level == SOL_L2CAP) return l2cap_sock_getsockopt_old(sock, optname, optval, optlen); if (level != SOL_BLUETOOTH) return -ENOPROTOOPT; if (get_user(len, optlen)) return -EFAULT; lock_sock(sk); switch (optname) { case BT_SECURITY: if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED && chan->chan_type != L2CAP_CHAN_FIXED && chan->chan_type != L2CAP_CHAN_RAW) { err = -EINVAL; break; } memset(&sec, 0, sizeof(sec)); if (chan->conn) { sec.level = chan->conn->hcon->sec_level; if (sk->sk_state == BT_CONNECTED) sec.key_size = chan->conn->hcon->enc_key_size; } else { sec.level = chan->sec_level; } len = min_t(unsigned int, len, sizeof(sec)); if (copy_to_user(optval, (char *) &sec, len)) err = -EFAULT; break; case BT_DEFER_SETUP: if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) { err = -EINVAL; break; } if (put_user(test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags), (u32 __user *) optval)) err = -EFAULT; break; case BT_FLUSHABLE: if (put_user(test_bit(FLAG_FLUSHABLE, &chan->flags), (u32 __user *) optval)) err = -EFAULT; break; case BT_POWER: if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM && sk->sk_type != SOCK_RAW) { err = -EINVAL; break; } pwr.force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags); len = min_t(unsigned int, len, sizeof(pwr)); if (copy_to_user(optval, (char *) &pwr, len)) err = -EFAULT; break; case BT_CHANNEL_POLICY: if (put_user(chan->chan_policy, (u32 __user *) optval)) err = -EFAULT; break; case BT_SNDMTU: if (!bdaddr_type_is_le(chan->src_type)) { err = -EINVAL; break; } if (sk->sk_state != BT_CONNECTED) { err = -ENOTCONN; break; } if (put_user(chan->omtu, (u16 __user *) optval)) err = -EFAULT; break; case BT_RCVMTU: if (!bdaddr_type_is_le(chan->src_type)) { err = -EINVAL; break; } if (put_user(chan->imtu, (u16 __user *) optval)) err = -EFAULT; break; case BT_PHY: if (sk->sk_state != BT_CONNECTED) { err = -ENOTCONN; break; } phys = hci_conn_get_phy(chan->conn->hcon); if (put_user(phys, (u32 __user *) optval)) err = -EFAULT; break; case BT_MODE: if (!enable_ecred) { err = -ENOPROTOOPT; break; } if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) { err = -EINVAL; break; } mode = l2cap_get_mode(chan); if (mode < 0) { err = mode; break; } if (put_user(mode, (u8 __user *) optval)) err = -EFAULT; break; default: err = -ENOPROTOOPT; break; } release_sock(sk); return err; } static bool l2cap_valid_mtu(struct l2cap_chan *chan, u16 mtu) { switch (chan->scid) { case L2CAP_CID_ATT: if (mtu < L2CAP_LE_MIN_MTU) return false; break; default: if (mtu < L2CAP_DEFAULT_MIN_MTU) return false; } return true; } static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, sockptr_t optval, unsigned int optlen) { struct sock *sk = sock->sk; struct l2cap_chan *chan = l2cap_pi(sk)->chan; struct l2cap_options opts; int len, err = 0; u32 opt; BT_DBG("sk %p", sk); lock_sock(sk); switch (optname) { case L2CAP_OPTIONS: if (bdaddr_type_is_le(chan->src_type)) { err = -EINVAL; break; } if (sk->sk_state == BT_CONNECTED) { err = -EINVAL; break; } opts.imtu = chan->imtu; opts.omtu = chan->omtu; opts.flush_to = chan->flush_to; opts.mode = chan->mode; opts.fcs = chan->fcs; opts.max_tx = chan->max_tx; opts.txwin_size = chan->tx_win; len = min_t(unsigned int, sizeof(opts), optlen); if (copy_from_sockptr(&opts, optval, len)) { err = -EFAULT; break; } if (opts.txwin_size > L2CAP_DEFAULT_EXT_WINDOW) { err = -EINVAL; break; } if (!l2cap_valid_mtu(chan, opts.imtu)) { err = -EINVAL; break; } /* Only BR/EDR modes are supported here */ switch (opts.mode) { case L2CAP_MODE_BASIC: clear_bit(CONF_STATE2_DEVICE, &chan->conf_state); break; case L2CAP_MODE_ERTM: case L2CAP_MODE_STREAMING: if (!disable_ertm) break; fallthrough; default: err = -EINVAL; break; } if (err < 0) break; chan->mode = opts.mode; BT_DBG("mode 0x%2.2x", chan->mode); chan->imtu = opts.imtu; chan->omtu = opts.omtu; chan->fcs = opts.fcs; chan->max_tx = opts.max_tx; chan->tx_win = opts.txwin_size; chan->flush_to = opts.flush_to; break; case L2CAP_LM: if (copy_from_sockptr(&opt, optval, sizeof(u32))) { err = -EFAULT; break; } if (opt & L2CAP_LM_FIPS) { err = -EINVAL; break; } if (opt & L2CAP_LM_AUTH) chan->sec_level = BT_SECURITY_LOW; if (opt & L2CAP_LM_ENCRYPT) chan->sec_level = BT_SECURITY_MEDIUM; if (opt & L2CAP_LM_SECURE) chan->sec_level = BT_SECURITY_HIGH; if (opt & L2CAP_LM_MASTER) set_bit(FLAG_ROLE_SWITCH, &chan->flags); else clear_bit(FLAG_ROLE_SWITCH, &chan->flags); if (opt & L2CAP_LM_RELIABLE) set_bit(FLAG_FORCE_RELIABLE, &chan->flags); else clear_bit(FLAG_FORCE_RELIABLE, &chan->flags); break; default: err = -ENOPROTOOPT; break; } release_sock(sk); return err; } static int l2cap_set_mode(struct l2cap_chan *chan, u8 mode) { switch (mode) { case BT_MODE_BASIC: if (bdaddr_type_is_le(chan->src_type)) return -EINVAL; mode = L2CAP_MODE_BASIC; clear_bit(CONF_STATE2_DEVICE, &chan->conf_state); break; case BT_MODE_ERTM: if (!disable_ertm || bdaddr_type_is_le(chan->src_type)) return -EINVAL; mode = L2CAP_MODE_ERTM; break; case BT_MODE_STREAMING: if (!disable_ertm || bdaddr_type_is_le(chan->src_type)) return -EINVAL; mode = L2CAP_MODE_STREAMING; break; case BT_MODE_LE_FLOWCTL: if (!bdaddr_type_is_le(chan->src_type)) return -EINVAL; mode = L2CAP_MODE_LE_FLOWCTL; break; case BT_MODE_EXT_FLOWCTL: /* TODO: Add support for ECRED PDUs to BR/EDR */ if (!bdaddr_type_is_le(chan->src_type)) return -EINVAL; mode = L2CAP_MODE_EXT_FLOWCTL; break; default: return -EINVAL; } chan->mode = mode; return 0; } static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, sockptr_t optval, unsigned int optlen) { struct sock *sk = sock->sk; struct l2cap_chan *chan = l2cap_pi(sk)->chan; struct bt_security sec; struct bt_power pwr; struct l2cap_conn *conn; int len, err = 0; u32 opt; u16 mtu; u8 mode; BT_DBG("sk %p", sk); if (level == SOL_L2CAP) return l2cap_sock_setsockopt_old(sock, optname, optval, optlen); if (level != SOL_BLUETOOTH) return -ENOPROTOOPT; lock_sock(sk); switch (optname) { case BT_SECURITY: if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED && chan->chan_type != L2CAP_CHAN_FIXED && chan->chan_type != L2CAP_CHAN_RAW) { err = -EINVAL; break; } sec.level = BT_SECURITY_LOW; len = min_t(unsigned int, sizeof(sec), optlen); if (copy_from_sockptr(&sec, optval, len)) { err = -EFAULT; break; } if (sec.level < BT_SECURITY_LOW || sec.level > BT_SECURITY_FIPS) { err = -EINVAL; break; } chan->sec_level = sec.level; if (!chan->conn) break; conn = chan->conn; /* change security for LE channels */ if (chan->scid == L2CAP_CID_ATT) { if (smp_conn_security(conn->hcon, sec.level)) { err = -EINVAL; break; } set_bit(FLAG_PENDING_SECURITY, &chan->flags); sk->sk_state = BT_CONFIG; chan->state = BT_CONFIG; /* or for ACL link */ } else if ((sk->sk_state == BT_CONNECT2 && test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) || sk->sk_state == BT_CONNECTED) { if (!l2cap_chan_check_security(chan, true)) set_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags); else sk->sk_state_change(sk); } else { err = -EINVAL; } break; case BT_DEFER_SETUP: if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) { err = -EINVAL; break; } if (copy_from_sockptr(&opt, optval, sizeof(u32))) { err = -EFAULT; break; } if (opt) { set_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags); set_bit(FLAG_DEFER_SETUP, &chan->flags); } else { clear_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags); clear_bit(FLAG_DEFER_SETUP, &chan->flags); } break; case BT_FLUSHABLE: if (copy_from_sockptr(&opt, optval, sizeof(u32))) { err = -EFAULT; break; } if (opt > BT_FLUSHABLE_ON) { err = -EINVAL; break; } if (opt == BT_FLUSHABLE_OFF) { conn = chan->conn; /* proceed further only when we have l2cap_conn and No Flush support in the LM */ if (!conn || !lmp_no_flush_capable(conn->hcon->hdev)) { err = -EINVAL; break; } } if (opt) set_bit(FLAG_FLUSHABLE, &chan->flags); else clear_bit(FLAG_FLUSHABLE, &chan->flags); break; case BT_POWER: if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED && chan->chan_type != L2CAP_CHAN_RAW) { err = -EINVAL; break; } pwr.force_active = BT_POWER_FORCE_ACTIVE_ON; len = min_t(unsigned int, sizeof(pwr), optlen); if (copy_from_sockptr(&pwr, optval, len)) { err = -EFAULT; break; } if (pwr.force_active) set_bit(FLAG_FORCE_ACTIVE, &chan->flags); else clear_bit(FLAG_FORCE_ACTIVE, &chan->flags); break; case BT_CHANNEL_POLICY: if (copy_from_sockptr(&opt, optval, sizeof(u32))) { err = -EFAULT; break; } if (opt > BT_CHANNEL_POLICY_AMP_PREFERRED) { err = -EINVAL; break; } if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING) { err = -EOPNOTSUPP; break; } chan->chan_policy = (u8) opt; if (sk->sk_state == BT_CONNECTED && chan->move_role == L2CAP_MOVE_ROLE_NONE) l2cap_move_start(chan); break; case BT_SNDMTU: if (!bdaddr_type_is_le(chan->src_type)) { err = -EINVAL; break; } /* Setting is not supported as it's the remote side that * decides this. */ err = -EPERM; break; case BT_RCVMTU: if (!bdaddr_type_is_le(chan->src_type)) { err = -EINVAL; break; } if (chan->mode == L2CAP_MODE_LE_FLOWCTL && sk->sk_state == BT_CONNECTED) { err = -EISCONN; break; } if (copy_from_sockptr(&mtu, optval, sizeof(u16))) { err = -EFAULT; break; } if (chan->mode == L2CAP_MODE_EXT_FLOWCTL && sk->sk_state == BT_CONNECTED) err = l2cap_chan_reconfigure(chan, mtu); else chan->imtu = mtu; break; case BT_MODE: if (!enable_ecred) { err = -ENOPROTOOPT; break; } BT_DBG("sk->sk_state %u", sk->sk_state); if (sk->sk_state != BT_BOUND) { err = -EINVAL; break; } if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) { err = -EINVAL; break; } if (copy_from_sockptr(&mode, optval, sizeof(u8))) { err = -EFAULT; break; } BT_DBG("mode %u", mode); err = l2cap_set_mode(chan, mode); if (err) break; BT_DBG("mode 0x%2.2x", chan->mode); break; default: err = -ENOPROTOOPT; break; } release_sock(sk); return err; } static int l2cap_sock_sendmsg(struct socket *sock, struct msghdr *msg, size_t len) { struct sock *sk = sock->sk; struct l2cap_chan *chan = l2cap_pi(sk)->chan; int err; BT_DBG("sock %p, sk %p", sock, sk); err = sock_error(sk); if (err) return err; if (msg->msg_flags & MSG_OOB) return -EOPNOTSUPP; if (sk->sk_state != BT_CONNECTED) return -ENOTCONN; lock_sock(sk); err = bt_sock_wait_ready(sk, msg->msg_flags); release_sock(sk); if (err) return err; l2cap_chan_lock(chan); err = l2cap_chan_send(chan, msg, len); l2cap_chan_unlock(chan); return err; } static int l2cap_sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, int flags) { struct sock *sk = sock->sk; struct l2cap_pinfo *pi = l2cap_pi(sk); int err; lock_sock(sk); if (sk->sk_state == BT_CONNECT2 && test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) { if (pi->chan->mode == L2CAP_MODE_EXT_FLOWCTL) { sk->sk_state = BT_CONNECTED; pi->chan->state = BT_CONNECTED; __l2cap_ecred_conn_rsp_defer(pi->chan); } else if (bdaddr_type_is_le(pi->chan->src_type)) { sk->sk_state = BT_CONNECTED; pi->chan->state = BT_CONNECTED; __l2cap_le_connect_rsp_defer(pi->chan); } else { sk->sk_state = BT_CONFIG; pi->chan->state = BT_CONFIG; __l2cap_connect_rsp_defer(pi->chan); } err = 0; goto done; } release_sock(sk); if (sock->type == SOCK_STREAM) err = bt_sock_stream_recvmsg(sock, msg, len, flags); else err = bt_sock_recvmsg(sock, msg, len, flags); if (pi->chan->mode != L2CAP_MODE_ERTM) return err; /* Attempt to put pending rx data in the socket buffer */ lock_sock(sk); if (!test_bit(CONN_LOCAL_BUSY, &pi->chan->conn_state)) goto done; if (pi->rx_busy_skb) { if (!__sock_queue_rcv_skb(sk, pi->rx_busy_skb)) pi->rx_busy_skb = NULL; else goto done; } /* Restore data flow when half of the receive buffer is * available. This avoids resending large numbers of * frames. */ if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf >> 1) l2cap_chan_busy(pi->chan, 0); done: release_sock(sk); return err; } /* Kill socket (only if zapped and orphan) * Must be called on unlocked socket, with l2cap channel lock. */ static void l2cap_sock_kill(struct sock *sk) { if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket) return; BT_DBG("sk %p state %s", sk, state_to_string(sk->sk_state)); /* Kill poor orphan */ l2cap_chan_put(l2cap_pi(sk)->chan); sock_set_flag(sk, SOCK_DEAD); sock_put(sk); } static int __l2cap_wait_ack(struct sock *sk, struct l2cap_chan *chan) { DECLARE_WAITQUEUE(wait, current); int err = 0; int timeo = L2CAP_WAIT_ACK_POLL_PERIOD; /* Timeout to prevent infinite loop */ unsigned long timeout = jiffies + L2CAP_WAIT_ACK_TIMEOUT; add_wait_queue(sk_sleep(sk), &wait); set_current_state(TASK_INTERRUPTIBLE); do { BT_DBG("Waiting for %d ACKs, timeout %04d ms", chan->unacked_frames, time_after(jiffies, timeout) ? 0 : jiffies_to_msecs(timeout - jiffies)); if (!timeo) timeo = L2CAP_WAIT_ACK_POLL_PERIOD; if (signal_pending(current)) { err = sock_intr_errno(timeo); break; } release_sock(sk); timeo = schedule_timeout(timeo); lock_sock(sk); set_current_state(TASK_INTERRUPTIBLE); err = sock_error(sk); if (err) break; if (time_after(jiffies, timeout)) { err = -ENOLINK; break; } } while (chan->unacked_frames > 0 && chan->state == BT_CONNECTED); set_current_state(TASK_RUNNING); remove_wait_queue(sk_sleep(sk), &wait); return err; } static int l2cap_sock_shutdown(struct socket *sock, int how) { struct sock *sk = sock->sk; struct l2cap_chan *chan; struct l2cap_conn *conn; int err = 0; BT_DBG("sock %p, sk %p, how %d", sock, sk, how); /* 'how' parameter is mapped to sk_shutdown as follows: * SHUT_RD (0) --> RCV_SHUTDOWN (1) * SHUT_WR (1) --> SEND_SHUTDOWN (2) * SHUT_RDWR (2) --> SHUTDOWN_MASK (3) */ how++; if (!sk) return 0; lock_sock(sk); if ((sk->sk_shutdown & how) == how) goto shutdown_already; BT_DBG("Handling sock shutdown"); /* prevent sk structure from being freed whilst unlocked */ sock_hold(sk); chan = l2cap_pi(sk)->chan; /* prevent chan structure from being freed whilst unlocked */ l2cap_chan_hold(chan); BT_DBG("chan %p state %s", chan, state_to_string(chan->state)); if (chan->mode == L2CAP_MODE_ERTM && chan->unacked_frames > 0 && chan->state == BT_CONNECTED) { err = __l2cap_wait_ack(sk, chan); /* After waiting for ACKs, check whether shutdown * has already been actioned to close the L2CAP * link such as by l2cap_disconnection_req(). */ if ((sk->sk_shutdown & how) == how) goto shutdown_matched; } /* Try setting the RCV_SHUTDOWN bit, return early if SEND_SHUTDOWN * is already set */ if ((how & RCV_SHUTDOWN) && !(sk->sk_shutdown & RCV_SHUTDOWN)) { sk->sk_shutdown |= RCV_SHUTDOWN; if ((sk->sk_shutdown & how) == how) goto shutdown_matched; } sk->sk_shutdown |= SEND_SHUTDOWN; release_sock(sk); l2cap_chan_lock(chan); conn = chan->conn; if (conn) /* prevent conn structure from being freed */ l2cap_conn_get(conn); l2cap_chan_unlock(chan); if (conn) /* mutex lock must be taken before l2cap_chan_lock() */ mutex_lock(&conn->chan_lock); l2cap_chan_lock(chan); l2cap_chan_close(chan, 0); l2cap_chan_unlock(chan); if (conn) { mutex_unlock(&conn->chan_lock); l2cap_conn_put(conn); } lock_sock(sk); if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime && !(current->flags & PF_EXITING)) err = bt_sock_wait_state(sk, BT_CLOSED, sk->sk_lingertime); shutdown_matched: l2cap_chan_put(chan); sock_put(sk); shutdown_already: if (!err && sk->sk_err) err = -sk->sk_err; release_sock(sk); BT_DBG("Sock shutdown complete err: %d", err); return err; } static int l2cap_sock_release(struct socket *sock) { struct sock *sk = sock->sk; int err; struct l2cap_chan *chan; BT_DBG("sock %p, sk %p", sock, sk); if (!sk) return 0; l2cap_sock_cleanup_listen(sk); bt_sock_unlink(&l2cap_sk_list, sk); err = l2cap_sock_shutdown(sock, SHUT_RDWR); chan = l2cap_pi(sk)->chan; l2cap_chan_hold(chan); l2cap_chan_lock(chan); sock_orphan(sk); l2cap_sock_kill(sk); l2cap_chan_unlock(chan); l2cap_chan_put(chan); return err; } static void l2cap_sock_cleanup_listen(struct sock *parent) { struct sock *sk; BT_DBG("parent %p state %s", parent, state_to_string(parent->sk_state)); /* Close not yet accepted channels */ while ((sk = bt_accept_dequeue(parent, NULL))) { struct l2cap_chan *chan = l2cap_pi(sk)->chan; BT_DBG("child chan %p state %s", chan, state_to_string(chan->state)); l2cap_chan_hold(chan); l2cap_chan_lock(chan); __clear_chan_timer(chan); l2cap_chan_close(chan, ECONNRESET); l2cap_sock_kill(sk); l2cap_chan_unlock(chan); l2cap_chan_put(chan); } } static struct l2cap_chan *l2cap_sock_new_connection_cb(struct l2cap_chan *chan) { struct sock *sk, *parent = chan->data; lock_sock(parent); /* Check for backlog size */ if (sk_acceptq_is_full(parent)) { BT_DBG("backlog full %d", parent->sk_ack_backlog); release_sock(parent); return NULL; } sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC, 0); if (!sk) { release_sock(parent); return NULL; } bt_sock_reclassify_lock(sk, BTPROTO_L2CAP); l2cap_sock_init(sk, parent); bt_accept_enqueue(parent, sk, false); release_sock(parent); return l2cap_pi(sk)->chan; } static int l2cap_sock_recv_cb(struct l2cap_chan *chan, struct sk_buff *skb) { struct sock *sk = chan->data; int err; lock_sock(sk); if (l2cap_pi(sk)->rx_busy_skb) { err = -ENOMEM; goto done; } if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING) { /* Even if no filter is attached, we could potentially * get errors from security modules, etc. */ err = sk_filter(sk, skb); if (err) goto done; } err = __sock_queue_rcv_skb(sk, skb); /* For ERTM, handle one skb that doesn't fit into the recv * buffer. This is important to do because the data frames * have already been acked, so the skb cannot be discarded. * * Notify the l2cap core that the buffer is full, so the * LOCAL_BUSY state is entered and no more frames are * acked and reassembled until there is buffer space * available. */ if (err < 0 && chan->mode == L2CAP_MODE_ERTM) { l2cap_pi(sk)->rx_busy_skb = skb; l2cap_chan_busy(chan, 1); err = 0; } done: release_sock(sk); return err; } static void l2cap_sock_close_cb(struct l2cap_chan *chan) { struct sock *sk = chan->data; if (!sk) return; l2cap_sock_kill(sk); } static void l2cap_sock_teardown_cb(struct l2cap_chan *chan, int err) { struct sock *sk = chan->data; struct sock *parent; if (!sk) return; BT_DBG("chan %p state %s", chan, state_to_string(chan->state)); /* This callback can be called both for server (BT_LISTEN) * sockets as well as "normal" ones. To avoid lockdep warnings * with child socket locking (through l2cap_sock_cleanup_listen) * we need separation into separate nesting levels. The simplest * way to accomplish this is to inherit the nesting level used * for the channel. */ lock_sock_nested(sk, atomic_read(&chan->nesting)); parent = bt_sk(sk)->parent; switch (chan->state) { case BT_OPEN: case BT_BOUND: case BT_CLOSED: break; case BT_LISTEN: l2cap_sock_cleanup_listen(sk); sk->sk_state = BT_CLOSED; chan->state = BT_CLOSED; break; default: sk->sk_state = BT_CLOSED; chan->state = BT_CLOSED; sk->sk_err = err; if (parent) { bt_accept_unlink(sk); parent->sk_data_ready(parent); } else { sk->sk_state_change(sk); } break; } release_sock(sk); /* Only zap after cleanup to avoid use after free race */ sock_set_flag(sk, SOCK_ZAPPED); } static void l2cap_sock_state_change_cb(struct l2cap_chan *chan, int state, int err) { struct sock *sk = chan->data; sk->sk_state = state; if (err) sk->sk_err = err; } static struct sk_buff *l2cap_sock_alloc_skb_cb(struct l2cap_chan *chan, unsigned long hdr_len, unsigned long len, int nb) { struct sock *sk = chan->data; struct sk_buff *skb; int err; l2cap_chan_unlock(chan); skb = bt_skb_send_alloc(sk, hdr_len + len, nb, &err); l2cap_chan_lock(chan); if (!skb) return ERR_PTR(err); /* Channel lock is released before requesting new skb and then * reacquired thus we need to recheck channel state. */ if (chan->state != BT_CONNECTED) { kfree_skb(skb); return ERR_PTR(-ENOTCONN); } skb->priority = sk->sk_priority; bt_cb(skb)->l2cap.chan = chan; return skb; } static void l2cap_sock_ready_cb(struct l2cap_chan *chan) { struct sock *sk = chan->data; struct sock *parent; lock_sock(sk); parent = bt_sk(sk)->parent; BT_DBG("sk %p, parent %p", sk, parent); sk->sk_state = BT_CONNECTED; sk->sk_state_change(sk); if (parent) parent->sk_data_ready(parent); release_sock(sk); } static void l2cap_sock_defer_cb(struct l2cap_chan *chan) { struct sock *parent, *sk = chan->data; lock_sock(sk); parent = bt_sk(sk)->parent; if (parent) parent->sk_data_ready(parent); release_sock(sk); } static void l2cap_sock_resume_cb(struct l2cap_chan *chan) { struct sock *sk = chan->data; if (test_and_clear_bit(FLAG_PENDING_SECURITY, &chan->flags)) { sk->sk_state = BT_CONNECTED; chan->state = BT_CONNECTED; } clear_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags); sk->sk_state_change(sk); } static void l2cap_sock_set_shutdown_cb(struct l2cap_chan *chan) { struct sock *sk = chan->data; lock_sock(sk); sk->sk_shutdown = SHUTDOWN_MASK; release_sock(sk); } static long l2cap_sock_get_sndtimeo_cb(struct l2cap_chan *chan) { struct sock *sk = chan->data; return sk->sk_sndtimeo; } static struct pid *l2cap_sock_get_peer_pid_cb(struct l2cap_chan *chan) { struct sock *sk = chan->data; return sk->sk_peer_pid; } static void l2cap_sock_suspend_cb(struct l2cap_chan *chan) { struct sock *sk = chan->data; set_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags); sk->sk_state_change(sk); } static int l2cap_sock_filter(struct l2cap_chan *chan, struct sk_buff *skb) { struct sock *sk = chan->data; switch (chan->mode) { case L2CAP_MODE_ERTM: case L2CAP_MODE_STREAMING: return sk_filter(sk, skb); } return 0; } static const struct l2cap_ops l2cap_chan_ops = { .name = "L2CAP Socket Interface", .new_connection = l2cap_sock_new_connection_cb, .recv = l2cap_sock_recv_cb, .close = l2cap_sock_close_cb, .teardown = l2cap_sock_teardown_cb, .state_change = l2cap_sock_state_change_cb, .ready = l2cap_sock_ready_cb, .defer = l2cap_sock_defer_cb, .resume = l2cap_sock_resume_cb, .suspend = l2cap_sock_suspend_cb, .set_shutdown = l2cap_sock_set_shutdown_cb, .get_sndtimeo = l2cap_sock_get_sndtimeo_cb, .get_peer_pid = l2cap_sock_get_peer_pid_cb, .alloc_skb = l2cap_sock_alloc_skb_cb, .filter = l2cap_sock_filter, }; static void l2cap_sock_destruct(struct sock *sk) { BT_DBG("sk %p", sk); if (l2cap_pi(sk)->chan) { l2cap_pi(sk)->chan->data = NULL; l2cap_chan_put(l2cap_pi(sk)->chan); } if (l2cap_pi(sk)->rx_busy_skb) { kfree_skb(l2cap_pi(sk)->rx_busy_skb); l2cap_pi(sk)->rx_busy_skb = NULL; } skb_queue_purge(&sk->sk_receive_queue); skb_queue_purge(&sk->sk_write_queue); } static void l2cap_skb_msg_name(struct sk_buff *skb, void *msg_name, int *msg_namelen) { DECLARE_SOCKADDR(struct sockaddr_l2 *, la, msg_name); memset(la, 0, sizeof(struct sockaddr_l2)); la->l2_family = AF_BLUETOOTH; la->l2_psm = bt_cb(skb)->l2cap.psm; bacpy(&la->l2_bdaddr, &bt_cb(skb)->l2cap.bdaddr); *msg_namelen = sizeof(struct sockaddr_l2); } static void l2cap_sock_init(struct sock *sk, struct sock *parent) { struct l2cap_chan *chan = l2cap_pi(sk)->chan; BT_DBG("sk %p", sk); if (parent) { struct l2cap_chan *pchan = l2cap_pi(parent)->chan; sk->sk_type = parent->sk_type; bt_sk(sk)->flags = bt_sk(parent)->flags; chan->chan_type = pchan->chan_type; chan->imtu = pchan->imtu; chan->omtu = pchan->omtu; chan->conf_state = pchan->conf_state; chan->mode = pchan->mode; chan->fcs = pchan->fcs; chan->max_tx = pchan->max_tx; chan->tx_win = pchan->tx_win; chan->tx_win_max = pchan->tx_win_max; chan->sec_level = pchan->sec_level; chan->flags = pchan->flags; chan->tx_credits = pchan->tx_credits; chan->rx_credits = pchan->rx_credits; if (chan->chan_type == L2CAP_CHAN_FIXED) { chan->scid = pchan->scid; chan->dcid = pchan->scid; } security_sk_clone(parent, sk); } else { switch (sk->sk_type) { case SOCK_RAW: chan->chan_type = L2CAP_CHAN_RAW; break; case SOCK_DGRAM: chan->chan_type = L2CAP_CHAN_CONN_LESS; bt_sk(sk)->skb_msg_name = l2cap_skb_msg_name; break; case SOCK_SEQPACKET: case SOCK_STREAM: chan->chan_type = L2CAP_CHAN_CONN_ORIENTED; break; } chan->imtu = L2CAP_DEFAULT_MTU; chan->omtu = 0; if (!disable_ertm && sk->sk_type == SOCK_STREAM) { chan->mode = L2CAP_MODE_ERTM; set_bit(CONF_STATE2_DEVICE, &chan->conf_state); } else { chan->mode = L2CAP_MODE_BASIC; } l2cap_chan_set_defaults(chan); } /* Default config options */ chan->flush_to = L2CAP_DEFAULT_FLUSH_TO; chan->data = sk; chan->ops = &l2cap_chan_ops; } static struct proto l2cap_proto = { .name = "L2CAP", .owner = THIS_MODULE, .obj_size = sizeof(struct l2cap_pinfo) }; static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio, int kern) { struct sock *sk; struct l2cap_chan *chan; sk = bt_sock_alloc(net, sock, &l2cap_proto, proto, prio, kern); if (!sk) return NULL; sk->sk_destruct = l2cap_sock_destruct; sk->sk_sndtimeo = L2CAP_CONN_TIMEOUT; chan = l2cap_chan_create(); if (!chan) { sk_free(sk); return NULL; } l2cap_chan_hold(chan); l2cap_pi(sk)->chan = chan; return sk; } static int l2cap_sock_create(struct net *net, struct socket *sock, int protocol, int kern) { struct sock *sk; BT_DBG("sock %p", sock); sock->state = SS_UNCONNECTED; if (sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM && sock->type != SOCK_DGRAM && sock->type != SOCK_RAW) return -ESOCKTNOSUPPORT; if (sock->type == SOCK_RAW && !kern && !capable(CAP_NET_RAW)) return -EPERM; sock->ops = &l2cap_sock_ops; sk = l2cap_sock_alloc(net, sock, protocol, GFP_ATOMIC, kern); if (!sk) return -ENOMEM; l2cap_sock_init(sk, NULL); bt_sock_link(&l2cap_sk_list, sk); return 0; } static const struct proto_ops l2cap_sock_ops = { .family = PF_BLUETOOTH, .owner = THIS_MODULE, .release = l2cap_sock_release, .bind = l2cap_sock_bind, .connect = l2cap_sock_connect, .listen = l2cap_sock_listen, .accept = l2cap_sock_accept, .getname = l2cap_sock_getname, .sendmsg = l2cap_sock_sendmsg, .recvmsg = l2cap_sock_recvmsg, .poll = bt_sock_poll, .ioctl = bt_sock_ioctl, .gettstamp = sock_gettstamp, .mmap = sock_no_mmap, .socketpair = sock_no_socketpair, .shutdown = l2cap_sock_shutdown, .setsockopt = l2cap_sock_setsockopt, .getsockopt = l2cap_sock_getsockopt }; static const struct net_proto_family l2cap_sock_family_ops = { .family = PF_BLUETOOTH, .owner = THIS_MODULE, .create = l2cap_sock_create, }; int __init l2cap_init_sockets(void) { int err; BUILD_BUG_ON(sizeof(struct sockaddr_l2) > sizeof(struct sockaddr)); err = proto_register(&l2cap_proto, 0); if (err < 0) return err; err = bt_sock_register(BTPROTO_L2CAP, &l2cap_sock_family_ops); if (err < 0) { BT_ERR("L2CAP socket registration failed"); goto error; } err = bt_procfs_init(&init_net, "l2cap", &l2cap_sk_list, NULL); if (err < 0) { BT_ERR("Failed to create L2CAP proc file"); bt_sock_unregister(BTPROTO_L2CAP); goto error; } BT_INFO("L2CAP socket layer initialized"); return 0; error: proto_unregister(&l2cap_proto); return err; } void l2cap_cleanup_sockets(void) { bt_procfs_cleanup(&init_net, "l2cap"); bt_sock_unregister(BTPROTO_L2CAP); proto_unregister(&l2cap_proto); }
linux-master
net/bluetooth/l2cap_sock.c
/* BlueZ - Bluetooth protocol stack for Linux Copyright (C) 2014 Intel Corporation This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License version 2 as published by the Free Software Foundation; THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS SOFTWARE IS DISCLAIMED. */ #include <linux/debugfs.h> #include <net/bluetooth/bluetooth.h> #include <net/bluetooth/hci_core.h> #include "ecdh_helper.h" #include "smp.h" #include "selftest.h" #if IS_ENABLED(CONFIG_BT_SELFTEST_ECDH) static const u8 priv_a_1[32] __initconst = { 0xbd, 0x1a, 0x3c, 0xcd, 0xa6, 0xb8, 0x99, 0x58, 0x99, 0xb7, 0x40, 0xeb, 0x7b, 0x60, 0xff, 0x4a, 0x50, 0x3f, 0x10, 0xd2, 0xe3, 0xb3, 0xc9, 0x74, 0x38, 0x5f, 0xc5, 0xa3, 0xd4, 0xf6, 0x49, 0x3f, }; static const u8 priv_b_1[32] __initconst = { 0xfd, 0xc5, 0x7f, 0xf4, 0x49, 0xdd, 0x4f, 0x6b, 0xfb, 0x7c, 0x9d, 0xf1, 0xc2, 0x9a, 0xcb, 0x59, 0x2a, 0xe7, 0xd4, 0xee, 0xfb, 0xfc, 0x0a, 0x90, 0x9a, 0xbb, 0xf6, 0x32, 0x3d, 0x8b, 0x18, 0x55, }; static const u8 pub_a_1[64] __initconst = { 0xe6, 0x9d, 0x35, 0x0e, 0x48, 0x01, 0x03, 0xcc, 0xdb, 0xfd, 0xf4, 0xac, 0x11, 0x91, 0xf4, 0xef, 0xb9, 0xa5, 0xf9, 0xe9, 0xa7, 0x83, 0x2c, 0x5e, 0x2c, 0xbe, 0x97, 0xf2, 0xd2, 0x03, 0xb0, 0x20, 0x8b, 0xd2, 0x89, 0x15, 0xd0, 0x8e, 0x1c, 0x74, 0x24, 0x30, 0xed, 0x8f, 0xc2, 0x45, 0x63, 0x76, 0x5c, 0x15, 0x52, 0x5a, 0xbf, 0x9a, 0x32, 0x63, 0x6d, 0xeb, 0x2a, 0x65, 0x49, 0x9c, 0x80, 0xdc, }; static const u8 pub_b_1[64] __initconst = { 0x90, 0xa1, 0xaa, 0x2f, 0xb2, 0x77, 0x90, 0x55, 0x9f, 0xa6, 0x15, 0x86, 0xfd, 0x8a, 0xb5, 0x47, 0x00, 0x4c, 0x9e, 0xf1, 0x84, 0x22, 0x59, 0x09, 0x96, 0x1d, 0xaf, 0x1f, 0xf0, 0xf0, 0xa1, 0x1e, 0x4a, 0x21, 0xb1, 0x15, 0xf9, 0xaf, 0x89, 0x5f, 0x76, 0x36, 0x8e, 0xe2, 0x30, 0x11, 0x2d, 0x47, 0x60, 0x51, 0xb8, 0x9a, 0x3a, 0x70, 0x56, 0x73, 0x37, 0xad, 0x9d, 0x42, 0x3e, 0xf3, 0x55, 0x4c, }; static const u8 dhkey_1[32] __initconst = { 0x98, 0xa6, 0xbf, 0x73, 0xf3, 0x34, 0x8d, 0x86, 0xf1, 0x66, 0xf8, 0xb4, 0x13, 0x6b, 0x79, 0x99, 0x9b, 0x7d, 0x39, 0x0a, 0xa6, 0x10, 0x10, 0x34, 0x05, 0xad, 0xc8, 0x57, 0xa3, 0x34, 0x02, 0xec, }; static const u8 priv_a_2[32] __initconst = { 0x63, 0x76, 0x45, 0xd0, 0xf7, 0x73, 0xac, 0xb7, 0xff, 0xdd, 0x03, 0x72, 0xb9, 0x72, 0x85, 0xb4, 0x41, 0xb6, 0x5d, 0x0c, 0x5d, 0x54, 0x84, 0x60, 0x1a, 0xa3, 0x9a, 0x3c, 0x69, 0x16, 0xa5, 0x06, }; static const u8 priv_b_2[32] __initconst = { 0xba, 0x30, 0x55, 0x50, 0x19, 0xa2, 0xca, 0xa3, 0xa5, 0x29, 0x08, 0xc6, 0xb5, 0x03, 0x88, 0x7e, 0x03, 0x2b, 0x50, 0x73, 0xd4, 0x2e, 0x50, 0x97, 0x64, 0xcd, 0x72, 0x0d, 0x67, 0xa0, 0x9a, 0x52, }; static const u8 pub_a_2[64] __initconst = { 0xdd, 0x78, 0x5c, 0x74, 0x03, 0x9b, 0x7e, 0x98, 0xcb, 0x94, 0x87, 0x4a, 0xad, 0xfa, 0xf8, 0xd5, 0x43, 0x3e, 0x5c, 0xaf, 0xea, 0xb5, 0x4c, 0xf4, 0x9e, 0x80, 0x79, 0x57, 0x7b, 0xa4, 0x31, 0x2c, 0x4f, 0x5d, 0x71, 0x43, 0x77, 0x43, 0xf8, 0xea, 0xd4, 0x3e, 0xbd, 0x17, 0x91, 0x10, 0x21, 0xd0, 0x1f, 0x87, 0x43, 0x8e, 0x40, 0xe2, 0x52, 0xcd, 0xbe, 0xdf, 0x98, 0x38, 0x18, 0x12, 0x95, 0x91, }; static const u8 pub_b_2[64] __initconst = { 0xcc, 0x00, 0x65, 0xe1, 0xf5, 0x6c, 0x0d, 0xcf, 0xec, 0x96, 0x47, 0x20, 0x66, 0xc9, 0xdb, 0x84, 0x81, 0x75, 0xa8, 0x4d, 0xc0, 0xdf, 0xc7, 0x9d, 0x1b, 0x3f, 0x3d, 0xf2, 0x3f, 0xe4, 0x65, 0xf4, 0x79, 0xb2, 0xec, 0xd8, 0xca, 0x55, 0xa1, 0xa8, 0x43, 0x4d, 0x6b, 0xca, 0x10, 0xb0, 0xc2, 0x01, 0xc2, 0x33, 0x4e, 0x16, 0x24, 0xc4, 0xef, 0xee, 0x99, 0xd8, 0xbb, 0xbc, 0x48, 0xd0, 0x01, 0x02, }; static const u8 dhkey_2[32] __initconst = { 0x69, 0xeb, 0x21, 0x32, 0xf2, 0xc6, 0x05, 0x41, 0x60, 0x19, 0xcd, 0x5e, 0x94, 0xe1, 0xe6, 0x5f, 0x33, 0x07, 0xe3, 0x38, 0x4b, 0x68, 0xe5, 0x62, 0x3f, 0x88, 0x6d, 0x2f, 0x3a, 0x84, 0x85, 0xab, }; static const u8 priv_a_3[32] __initconst = { 0xbd, 0x1a, 0x3c, 0xcd, 0xa6, 0xb8, 0x99, 0x58, 0x99, 0xb7, 0x40, 0xeb, 0x7b, 0x60, 0xff, 0x4a, 0x50, 0x3f, 0x10, 0xd2, 0xe3, 0xb3, 0xc9, 0x74, 0x38, 0x5f, 0xc5, 0xa3, 0xd4, 0xf6, 0x49, 0x3f, }; static const u8 pub_a_3[64] __initconst = { 0xe6, 0x9d, 0x35, 0x0e, 0x48, 0x01, 0x03, 0xcc, 0xdb, 0xfd, 0xf4, 0xac, 0x11, 0x91, 0xf4, 0xef, 0xb9, 0xa5, 0xf9, 0xe9, 0xa7, 0x83, 0x2c, 0x5e, 0x2c, 0xbe, 0x97, 0xf2, 0xd2, 0x03, 0xb0, 0x20, 0x8b, 0xd2, 0x89, 0x15, 0xd0, 0x8e, 0x1c, 0x74, 0x24, 0x30, 0xed, 0x8f, 0xc2, 0x45, 0x63, 0x76, 0x5c, 0x15, 0x52, 0x5a, 0xbf, 0x9a, 0x32, 0x63, 0x6d, 0xeb, 0x2a, 0x65, 0x49, 0x9c, 0x80, 0xdc, }; static const u8 dhkey_3[32] __initconst = { 0x2d, 0xab, 0x00, 0x48, 0xcb, 0xb3, 0x7b, 0xda, 0x55, 0x7b, 0x8b, 0x72, 0xa8, 0x57, 0x87, 0xc3, 0x87, 0x27, 0x99, 0x32, 0xfc, 0x79, 0x5f, 0xae, 0x7c, 0x1c, 0xf9, 0x49, 0xe6, 0xd7, 0xaa, 0x70, }; static int __init test_ecdh_sample(struct crypto_kpp *tfm, const u8 priv_a[32], const u8 priv_b[32], const u8 pub_a[64], const u8 pub_b[64], const u8 dhkey[32]) { u8 *tmp, *dhkey_a, *dhkey_b; int ret; tmp = kmalloc(64, GFP_KERNEL); if (!tmp) return -EINVAL; dhkey_a = &tmp[0]; dhkey_b = &tmp[32]; ret = set_ecdh_privkey(tfm, priv_a); if (ret) goto out; ret = compute_ecdh_secret(tfm, pub_b, dhkey_a); if (ret) goto out; if (memcmp(dhkey_a, dhkey, 32)) { ret = -EINVAL; goto out; } ret = set_ecdh_privkey(tfm, priv_b); if (ret) goto out; ret = compute_ecdh_secret(tfm, pub_a, dhkey_b); if (ret) goto out; if (memcmp(dhkey_b, dhkey, 32)) ret = -EINVAL; /* fall through*/ out: kfree(tmp); return ret; } static char test_ecdh_buffer[32]; static ssize_t test_ecdh_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { return simple_read_from_buffer(user_buf, count, ppos, test_ecdh_buffer, strlen(test_ecdh_buffer)); } static const struct file_operations test_ecdh_fops = { .open = simple_open, .read = test_ecdh_read, .llseek = default_llseek, }; static int __init test_ecdh(void) { struct crypto_kpp *tfm; ktime_t calltime, delta, rettime; unsigned long long duration = 0; int err; calltime = ktime_get(); tfm = crypto_alloc_kpp("ecdh-nist-p256", 0, 0); if (IS_ERR(tfm)) { BT_ERR("Unable to create ECDH crypto context"); err = PTR_ERR(tfm); goto done; } err = test_ecdh_sample(tfm, priv_a_1, priv_b_1, pub_a_1, pub_b_1, dhkey_1); if (err) { BT_ERR("ECDH sample 1 failed"); goto done; } err = test_ecdh_sample(tfm, priv_a_2, priv_b_2, pub_a_2, pub_b_2, dhkey_2); if (err) { BT_ERR("ECDH sample 2 failed"); goto done; } err = test_ecdh_sample(tfm, priv_a_3, priv_a_3, pub_a_3, pub_a_3, dhkey_3); if (err) { BT_ERR("ECDH sample 3 failed"); goto done; } crypto_free_kpp(tfm); rettime = ktime_get(); delta = ktime_sub(rettime, calltime); duration = (unsigned long long) ktime_to_ns(delta) >> 10; BT_INFO("ECDH test passed in %llu usecs", duration); done: if (!err) snprintf(test_ecdh_buffer, sizeof(test_ecdh_buffer), "PASS (%llu usecs)\n", duration); else snprintf(test_ecdh_buffer, sizeof(test_ecdh_buffer), "FAIL\n"); debugfs_create_file("selftest_ecdh", 0444, bt_debugfs, NULL, &test_ecdh_fops); return err; } #else static inline int test_ecdh(void) { return 0; } #endif static int __init run_selftest(void) { int err; BT_INFO("Starting self testing"); err = test_ecdh(); if (err) goto done; err = bt_selftest_smp(); done: BT_INFO("Finished self testing"); return err; } #if IS_MODULE(CONFIG_BT) /* This is run when CONFIG_BT_SELFTEST=y and CONFIG_BT=m and is just a * wrapper to allow running this at module init. * * If CONFIG_BT_SELFTEST=n, then this code is not compiled at all. */ int __init bt_selftest(void) { return run_selftest(); } #else /* This is run when CONFIG_BT_SELFTEST=y and CONFIG_BT=y and is run * via late_initcall() as last item in the initialization sequence. * * If CONFIG_BT_SELFTEST=n, then this code is not compiled at all. */ static int __init bt_selftest_init(void) { return run_selftest(); } late_initcall(bt_selftest_init); #endif
linux-master
net/bluetooth/selftest.c
/* BlueZ - Bluetooth protocol stack for Linux Copyright (C) 2000-2001 Qualcomm Incorporated Copyright (C) 2009-2010 Gustavo F. Padovan <[email protected]> Copyright (C) 2010 Google Inc. Copyright (C) 2011 ProFUSION Embedded Systems Copyright (c) 2012 Code Aurora Forum. All rights reserved. Written 2000,2001 by Maxim Krasnyansky <[email protected]> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License version 2 as published by the Free Software Foundation; THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS SOFTWARE IS DISCLAIMED. */ /* Bluetooth L2CAP core. */ #include <linux/module.h> #include <linux/debugfs.h> #include <linux/crc16.h> #include <linux/filter.h> #include <net/bluetooth/bluetooth.h> #include <net/bluetooth/hci_core.h> #include <net/bluetooth/l2cap.h> #include "smp.h" #include "a2mp.h" #include "amp.h" #define LE_FLOWCTL_MAX_CREDITS 65535 bool disable_ertm; bool enable_ecred = IS_ENABLED(CONFIG_BT_LE_L2CAP_ECRED); static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN | L2CAP_FEAT_UCD; static LIST_HEAD(chan_list); static DEFINE_RWLOCK(chan_list_lock); static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code, u8 ident, u16 dlen, void *data); static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data); static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size); static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err); static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control, struct sk_buff_head *skbs, u8 event); static void l2cap_retrans_timeout(struct work_struct *work); static void l2cap_monitor_timeout(struct work_struct *work); static void l2cap_ack_timeout(struct work_struct *work); static inline u8 bdaddr_type(u8 link_type, u8 bdaddr_type) { if (link_type == LE_LINK) { if (bdaddr_type == ADDR_LE_DEV_PUBLIC) return BDADDR_LE_PUBLIC; else return BDADDR_LE_RANDOM; } return BDADDR_BREDR; } static inline u8 bdaddr_src_type(struct hci_conn *hcon) { return bdaddr_type(hcon->type, hcon->src_type); } static inline u8 bdaddr_dst_type(struct hci_conn *hcon) { return bdaddr_type(hcon->type, hcon->dst_type); } /* ---- L2CAP channels ---- */ static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid) { struct l2cap_chan *c; list_for_each_entry(c, &conn->chan_l, list) { if (c->dcid == cid) return c; } return NULL; } static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid) { struct l2cap_chan *c; list_for_each_entry(c, &conn->chan_l, list) { if (c->scid == cid) return c; } return NULL; } /* Find channel with given SCID. * Returns a reference locked channel. */ static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid) { struct l2cap_chan *c; mutex_lock(&conn->chan_lock); c = __l2cap_get_chan_by_scid(conn, cid); if (c) { /* Only lock if chan reference is not 0 */ c = l2cap_chan_hold_unless_zero(c); if (c) l2cap_chan_lock(c); } mutex_unlock(&conn->chan_lock); return c; } /* Find channel with given DCID. * Returns a reference locked channel. */ static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid) { struct l2cap_chan *c; mutex_lock(&conn->chan_lock); c = __l2cap_get_chan_by_dcid(conn, cid); if (c) { /* Only lock if chan reference is not 0 */ c = l2cap_chan_hold_unless_zero(c); if (c) l2cap_chan_lock(c); } mutex_unlock(&conn->chan_lock); return c; } static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident) { struct l2cap_chan *c; list_for_each_entry(c, &conn->chan_l, list) { if (c->ident == ident) return c; } return NULL; } static struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident) { struct l2cap_chan *c; mutex_lock(&conn->chan_lock); c = __l2cap_get_chan_by_ident(conn, ident); if (c) { /* Only lock if chan reference is not 0 */ c = l2cap_chan_hold_unless_zero(c); if (c) l2cap_chan_lock(c); } mutex_unlock(&conn->chan_lock); return c; } static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src, u8 src_type) { struct l2cap_chan *c; list_for_each_entry(c, &chan_list, global_l) { if (src_type == BDADDR_BREDR && c->src_type != BDADDR_BREDR) continue; if (src_type != BDADDR_BREDR && c->src_type == BDADDR_BREDR) continue; if (c->sport == psm && !bacmp(&c->src, src)) return c; } return NULL; } int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm) { int err; write_lock(&chan_list_lock); if (psm && __l2cap_global_chan_by_addr(psm, src, chan->src_type)) { err = -EADDRINUSE; goto done; } if (psm) { chan->psm = psm; chan->sport = psm; err = 0; } else { u16 p, start, end, incr; if (chan->src_type == BDADDR_BREDR) { start = L2CAP_PSM_DYN_START; end = L2CAP_PSM_AUTO_END; incr = 2; } else { start = L2CAP_PSM_LE_DYN_START; end = L2CAP_PSM_LE_DYN_END; incr = 1; } err = -EINVAL; for (p = start; p <= end; p += incr) if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src, chan->src_type)) { chan->psm = cpu_to_le16(p); chan->sport = cpu_to_le16(p); err = 0; break; } } done: write_unlock(&chan_list_lock); return err; } EXPORT_SYMBOL_GPL(l2cap_add_psm); int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid) { write_lock(&chan_list_lock); /* Override the defaults (which are for conn-oriented) */ chan->omtu = L2CAP_DEFAULT_MTU; chan->chan_type = L2CAP_CHAN_FIXED; chan->scid = scid; write_unlock(&chan_list_lock); return 0; } static u16 l2cap_alloc_cid(struct l2cap_conn *conn) { u16 cid, dyn_end; if (conn->hcon->type == LE_LINK) dyn_end = L2CAP_CID_LE_DYN_END; else dyn_end = L2CAP_CID_DYN_END; for (cid = L2CAP_CID_DYN_START; cid <= dyn_end; cid++) { if (!__l2cap_get_chan_by_scid(conn, cid)) return cid; } return 0; } static void l2cap_state_change(struct l2cap_chan *chan, int state) { BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state), state_to_string(state)); chan->state = state; chan->ops->state_change(chan, state, 0); } static inline void l2cap_state_change_and_error(struct l2cap_chan *chan, int state, int err) { chan->state = state; chan->ops->state_change(chan, chan->state, err); } static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err) { chan->ops->state_change(chan, chan->state, err); } static void __set_retrans_timer(struct l2cap_chan *chan) { if (!delayed_work_pending(&chan->monitor_timer) && chan->retrans_timeout) { l2cap_set_timer(chan, &chan->retrans_timer, msecs_to_jiffies(chan->retrans_timeout)); } } static void __set_monitor_timer(struct l2cap_chan *chan) { __clear_retrans_timer(chan); if (chan->monitor_timeout) { l2cap_set_timer(chan, &chan->monitor_timer, msecs_to_jiffies(chan->monitor_timeout)); } } static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head, u16 seq) { struct sk_buff *skb; skb_queue_walk(head, skb) { if (bt_cb(skb)->l2cap.txseq == seq) return skb; } return NULL; } /* ---- L2CAP sequence number lists ---- */ /* For ERTM, ordered lists of sequence numbers must be tracked for * SREJ requests that are received and for frames that are to be * retransmitted. These seq_list functions implement a singly-linked * list in an array, where membership in the list can also be checked * in constant time. Items can also be added to the tail of the list * and removed from the head in constant time, without further memory * allocs or frees. */ static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size) { size_t alloc_size, i; /* Allocated size is a power of 2 to map sequence numbers * (which may be up to 14 bits) in to a smaller array that is * sized for the negotiated ERTM transmit windows. */ alloc_size = roundup_pow_of_two(size); seq_list->list = kmalloc_array(alloc_size, sizeof(u16), GFP_KERNEL); if (!seq_list->list) return -ENOMEM; seq_list->mask = alloc_size - 1; seq_list->head = L2CAP_SEQ_LIST_CLEAR; seq_list->tail = L2CAP_SEQ_LIST_CLEAR; for (i = 0; i < alloc_size; i++) seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR; return 0; } static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list) { kfree(seq_list->list); } static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list, u16 seq) { /* Constant-time check for list membership */ return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR; } static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list) { u16 seq = seq_list->head; u16 mask = seq_list->mask; seq_list->head = seq_list->list[seq & mask]; seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR; if (seq_list->head == L2CAP_SEQ_LIST_TAIL) { seq_list->head = L2CAP_SEQ_LIST_CLEAR; seq_list->tail = L2CAP_SEQ_LIST_CLEAR; } return seq; } static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list) { u16 i; if (seq_list->head == L2CAP_SEQ_LIST_CLEAR) return; for (i = 0; i <= seq_list->mask; i++) seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR; seq_list->head = L2CAP_SEQ_LIST_CLEAR; seq_list->tail = L2CAP_SEQ_LIST_CLEAR; } static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq) { u16 mask = seq_list->mask; /* All appends happen in constant time */ if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR) return; if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR) seq_list->head = seq; else seq_list->list[seq_list->tail & mask] = seq; seq_list->tail = seq; seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL; } static void l2cap_chan_timeout(struct work_struct *work) { struct l2cap_chan *chan = container_of(work, struct l2cap_chan, chan_timer.work); struct l2cap_conn *conn = chan->conn; int reason; BT_DBG("chan %p state %s", chan, state_to_string(chan->state)); mutex_lock(&conn->chan_lock); /* __set_chan_timer() calls l2cap_chan_hold(chan) while scheduling * this work. No need to call l2cap_chan_hold(chan) here again. */ l2cap_chan_lock(chan); if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG) reason = ECONNREFUSED; else if (chan->state == BT_CONNECT && chan->sec_level != BT_SECURITY_SDP) reason = ECONNREFUSED; else reason = ETIMEDOUT; l2cap_chan_close(chan, reason); chan->ops->close(chan); l2cap_chan_unlock(chan); l2cap_chan_put(chan); mutex_unlock(&conn->chan_lock); } struct l2cap_chan *l2cap_chan_create(void) { struct l2cap_chan *chan; chan = kzalloc(sizeof(*chan), GFP_ATOMIC); if (!chan) return NULL; skb_queue_head_init(&chan->tx_q); skb_queue_head_init(&chan->srej_q); mutex_init(&chan->lock); /* Set default lock nesting level */ atomic_set(&chan->nesting, L2CAP_NESTING_NORMAL); write_lock(&chan_list_lock); list_add(&chan->global_l, &chan_list); write_unlock(&chan_list_lock); INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout); INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout); INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout); INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout); chan->state = BT_OPEN; kref_init(&chan->kref); /* This flag is cleared in l2cap_chan_ready() */ set_bit(CONF_NOT_COMPLETE, &chan->conf_state); BT_DBG("chan %p", chan); return chan; } EXPORT_SYMBOL_GPL(l2cap_chan_create); static void l2cap_chan_destroy(struct kref *kref) { struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref); BT_DBG("chan %p", chan); write_lock(&chan_list_lock); list_del(&chan->global_l); write_unlock(&chan_list_lock); kfree(chan); } void l2cap_chan_hold(struct l2cap_chan *c) { BT_DBG("chan %p orig refcnt %u", c, kref_read(&c->kref)); kref_get(&c->kref); } struct l2cap_chan *l2cap_chan_hold_unless_zero(struct l2cap_chan *c) { BT_DBG("chan %p orig refcnt %u", c, kref_read(&c->kref)); if (!kref_get_unless_zero(&c->kref)) return NULL; return c; } void l2cap_chan_put(struct l2cap_chan *c) { BT_DBG("chan %p orig refcnt %u", c, kref_read(&c->kref)); kref_put(&c->kref, l2cap_chan_destroy); } EXPORT_SYMBOL_GPL(l2cap_chan_put); void l2cap_chan_set_defaults(struct l2cap_chan *chan) { chan->fcs = L2CAP_FCS_CRC16; chan->max_tx = L2CAP_DEFAULT_MAX_TX; chan->tx_win = L2CAP_DEFAULT_TX_WINDOW; chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW; chan->remote_max_tx = chan->max_tx; chan->remote_tx_win = chan->tx_win; chan->ack_win = L2CAP_DEFAULT_TX_WINDOW; chan->sec_level = BT_SECURITY_LOW; chan->flush_to = L2CAP_DEFAULT_FLUSH_TO; chan->retrans_timeout = L2CAP_DEFAULT_RETRANS_TO; chan->monitor_timeout = L2CAP_DEFAULT_MONITOR_TO; chan->conf_state = 0; set_bit(CONF_NOT_COMPLETE, &chan->conf_state); set_bit(FLAG_FORCE_ACTIVE, &chan->flags); } EXPORT_SYMBOL_GPL(l2cap_chan_set_defaults); static void l2cap_le_flowctl_init(struct l2cap_chan *chan, u16 tx_credits) { chan->sdu = NULL; chan->sdu_last_frag = NULL; chan->sdu_len = 0; chan->tx_credits = tx_credits; /* Derive MPS from connection MTU to stop HCI fragmentation */ chan->mps = min_t(u16, chan->imtu, chan->conn->mtu - L2CAP_HDR_SIZE); /* Give enough credits for a full packet */ chan->rx_credits = (chan->imtu / chan->mps) + 1; skb_queue_head_init(&chan->tx_q); } static void l2cap_ecred_init(struct l2cap_chan *chan, u16 tx_credits) { l2cap_le_flowctl_init(chan, tx_credits); /* L2CAP implementations shall support a minimum MPS of 64 octets */ if (chan->mps < L2CAP_ECRED_MIN_MPS) { chan->mps = L2CAP_ECRED_MIN_MPS; chan->rx_credits = (chan->imtu / chan->mps) + 1; } } void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan) { BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn, __le16_to_cpu(chan->psm), chan->dcid); conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM; chan->conn = conn; switch (chan->chan_type) { case L2CAP_CHAN_CONN_ORIENTED: /* Alloc CID for connection-oriented socket */ chan->scid = l2cap_alloc_cid(conn); if (conn->hcon->type == ACL_LINK) chan->omtu = L2CAP_DEFAULT_MTU; break; case L2CAP_CHAN_CONN_LESS: /* Connectionless socket */ chan->scid = L2CAP_CID_CONN_LESS; chan->dcid = L2CAP_CID_CONN_LESS; chan->omtu = L2CAP_DEFAULT_MTU; break; case L2CAP_CHAN_FIXED: /* Caller will set CID and CID specific MTU values */ break; default: /* Raw socket can send/recv signalling messages only */ chan->scid = L2CAP_CID_SIGNALING; chan->dcid = L2CAP_CID_SIGNALING; chan->omtu = L2CAP_DEFAULT_MTU; } chan->local_id = L2CAP_BESTEFFORT_ID; chan->local_stype = L2CAP_SERV_BESTEFFORT; chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE; chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME; chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT; chan->local_flush_to = L2CAP_EFS_DEFAULT_FLUSH_TO; l2cap_chan_hold(chan); /* Only keep a reference for fixed channels if they requested it */ if (chan->chan_type != L2CAP_CHAN_FIXED || test_bit(FLAG_HOLD_HCI_CONN, &chan->flags)) hci_conn_hold(conn->hcon); list_add(&chan->list, &conn->chan_l); } void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan) { mutex_lock(&conn->chan_lock); __l2cap_chan_add(conn, chan); mutex_unlock(&conn->chan_lock); } void l2cap_chan_del(struct l2cap_chan *chan, int err) { struct l2cap_conn *conn = chan->conn; __clear_chan_timer(chan); BT_DBG("chan %p, conn %p, err %d, state %s", chan, conn, err, state_to_string(chan->state)); chan->ops->teardown(chan, err); if (conn) { struct amp_mgr *mgr = conn->hcon->amp_mgr; /* Delete from channel list */ list_del(&chan->list); l2cap_chan_put(chan); chan->conn = NULL; /* Reference was only held for non-fixed channels or * fixed channels that explicitly requested it using the * FLAG_HOLD_HCI_CONN flag. */ if (chan->chan_type != L2CAP_CHAN_FIXED || test_bit(FLAG_HOLD_HCI_CONN, &chan->flags)) hci_conn_drop(conn->hcon); if (mgr && mgr->bredr_chan == chan) mgr->bredr_chan = NULL; } if (chan->hs_hchan) { struct hci_chan *hs_hchan = chan->hs_hchan; BT_DBG("chan %p disconnect hs_hchan %p", chan, hs_hchan); amp_disconnect_logical_link(hs_hchan); } if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state)) return; switch (chan->mode) { case L2CAP_MODE_BASIC: break; case L2CAP_MODE_LE_FLOWCTL: case L2CAP_MODE_EXT_FLOWCTL: skb_queue_purge(&chan->tx_q); break; case L2CAP_MODE_ERTM: __clear_retrans_timer(chan); __clear_monitor_timer(chan); __clear_ack_timer(chan); skb_queue_purge(&chan->srej_q); l2cap_seq_list_free(&chan->srej_list); l2cap_seq_list_free(&chan->retrans_list); fallthrough; case L2CAP_MODE_STREAMING: skb_queue_purge(&chan->tx_q); break; } } EXPORT_SYMBOL_GPL(l2cap_chan_del); static void __l2cap_chan_list_id(struct l2cap_conn *conn, u16 id, l2cap_chan_func_t func, void *data) { struct l2cap_chan *chan, *l; list_for_each_entry_safe(chan, l, &conn->chan_l, list) { if (chan->ident == id) func(chan, data); } } static void __l2cap_chan_list(struct l2cap_conn *conn, l2cap_chan_func_t func, void *data) { struct l2cap_chan *chan; list_for_each_entry(chan, &conn->chan_l, list) { func(chan, data); } } void l2cap_chan_list(struct l2cap_conn *conn, l2cap_chan_func_t func, void *data) { if (!conn) return; mutex_lock(&conn->chan_lock); __l2cap_chan_list(conn, func, data); mutex_unlock(&conn->chan_lock); } EXPORT_SYMBOL_GPL(l2cap_chan_list); static void l2cap_conn_update_id_addr(struct work_struct *work) { struct l2cap_conn *conn = container_of(work, struct l2cap_conn, id_addr_timer.work); struct hci_conn *hcon = conn->hcon; struct l2cap_chan *chan; mutex_lock(&conn->chan_lock); list_for_each_entry(chan, &conn->chan_l, list) { l2cap_chan_lock(chan); bacpy(&chan->dst, &hcon->dst); chan->dst_type = bdaddr_dst_type(hcon); l2cap_chan_unlock(chan); } mutex_unlock(&conn->chan_lock); } static void l2cap_chan_le_connect_reject(struct l2cap_chan *chan) { struct l2cap_conn *conn = chan->conn; struct l2cap_le_conn_rsp rsp; u16 result; if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) result = L2CAP_CR_LE_AUTHORIZATION; else result = L2CAP_CR_LE_BAD_PSM; l2cap_state_change(chan, BT_DISCONN); rsp.dcid = cpu_to_le16(chan->scid); rsp.mtu = cpu_to_le16(chan->imtu); rsp.mps = cpu_to_le16(chan->mps); rsp.credits = cpu_to_le16(chan->rx_credits); rsp.result = cpu_to_le16(result); l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp), &rsp); } static void l2cap_chan_ecred_connect_reject(struct l2cap_chan *chan) { l2cap_state_change(chan, BT_DISCONN); __l2cap_ecred_conn_rsp_defer(chan); } static void l2cap_chan_connect_reject(struct l2cap_chan *chan) { struct l2cap_conn *conn = chan->conn; struct l2cap_conn_rsp rsp; u16 result; if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) result = L2CAP_CR_SEC_BLOCK; else result = L2CAP_CR_BAD_PSM; l2cap_state_change(chan, BT_DISCONN); rsp.scid = cpu_to_le16(chan->dcid); rsp.dcid = cpu_to_le16(chan->scid); rsp.result = cpu_to_le16(result); rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO); l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp); } void l2cap_chan_close(struct l2cap_chan *chan, int reason) { struct l2cap_conn *conn = chan->conn; BT_DBG("chan %p state %s", chan, state_to_string(chan->state)); switch (chan->state) { case BT_LISTEN: chan->ops->teardown(chan, 0); break; case BT_CONNECTED: case BT_CONFIG: if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) { __set_chan_timer(chan, chan->ops->get_sndtimeo(chan)); l2cap_send_disconn_req(chan, reason); } else l2cap_chan_del(chan, reason); break; case BT_CONNECT2: if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) { if (conn->hcon->type == ACL_LINK) l2cap_chan_connect_reject(chan); else if (conn->hcon->type == LE_LINK) { switch (chan->mode) { case L2CAP_MODE_LE_FLOWCTL: l2cap_chan_le_connect_reject(chan); break; case L2CAP_MODE_EXT_FLOWCTL: l2cap_chan_ecred_connect_reject(chan); return; } } } l2cap_chan_del(chan, reason); break; case BT_CONNECT: case BT_DISCONN: l2cap_chan_del(chan, reason); break; default: chan->ops->teardown(chan, 0); break; } } EXPORT_SYMBOL(l2cap_chan_close); static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan) { switch (chan->chan_type) { case L2CAP_CHAN_RAW: switch (chan->sec_level) { case BT_SECURITY_HIGH: case BT_SECURITY_FIPS: return HCI_AT_DEDICATED_BONDING_MITM; case BT_SECURITY_MEDIUM: return HCI_AT_DEDICATED_BONDING; default: return HCI_AT_NO_BONDING; } break; case L2CAP_CHAN_CONN_LESS: if (chan->psm == cpu_to_le16(L2CAP_PSM_3DSP)) { if (chan->sec_level == BT_SECURITY_LOW) chan->sec_level = BT_SECURITY_SDP; } if (chan->sec_level == BT_SECURITY_HIGH || chan->sec_level == BT_SECURITY_FIPS) return HCI_AT_NO_BONDING_MITM; else return HCI_AT_NO_BONDING; break; case L2CAP_CHAN_CONN_ORIENTED: if (chan->psm == cpu_to_le16(L2CAP_PSM_SDP)) { if (chan->sec_level == BT_SECURITY_LOW) chan->sec_level = BT_SECURITY_SDP; if (chan->sec_level == BT_SECURITY_HIGH || chan->sec_level == BT_SECURITY_FIPS) return HCI_AT_NO_BONDING_MITM; else return HCI_AT_NO_BONDING; } fallthrough; default: switch (chan->sec_level) { case BT_SECURITY_HIGH: case BT_SECURITY_FIPS: return HCI_AT_GENERAL_BONDING_MITM; case BT_SECURITY_MEDIUM: return HCI_AT_GENERAL_BONDING; default: return HCI_AT_NO_BONDING; } break; } } /* Service level security */ int l2cap_chan_check_security(struct l2cap_chan *chan, bool initiator) { struct l2cap_conn *conn = chan->conn; __u8 auth_type; if (conn->hcon->type == LE_LINK) return smp_conn_security(conn->hcon, chan->sec_level); auth_type = l2cap_get_auth_type(chan); return hci_conn_security(conn->hcon, chan->sec_level, auth_type, initiator); } static u8 l2cap_get_ident(struct l2cap_conn *conn) { u8 id; /* Get next available identificator. * 1 - 128 are used by kernel. * 129 - 199 are reserved. * 200 - 254 are used by utilities like l2ping, etc. */ mutex_lock(&conn->ident_lock); if (++conn->tx_ident > 128) conn->tx_ident = 1; id = conn->tx_ident; mutex_unlock(&conn->ident_lock); return id; } static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data) { struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data); u8 flags; BT_DBG("code 0x%2.2x", code); if (!skb) return; /* Use NO_FLUSH if supported or we have an LE link (which does * not support auto-flushing packets) */ if (lmp_no_flush_capable(conn->hcon->hdev) || conn->hcon->type == LE_LINK) flags = ACL_START_NO_FLUSH; else flags = ACL_START; bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON; skb->priority = HCI_PRIO_MAX; hci_send_acl(conn->hchan, skb, flags); } static bool __chan_is_moving(struct l2cap_chan *chan) { return chan->move_state != L2CAP_MOVE_STABLE && chan->move_state != L2CAP_MOVE_WAIT_PREPARE; } static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb) { struct hci_conn *hcon = chan->conn->hcon; u16 flags; BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len, skb->priority); if (chan->hs_hcon && !__chan_is_moving(chan)) { if (chan->hs_hchan) hci_send_acl(chan->hs_hchan, skb, ACL_COMPLETE); else kfree_skb(skb); return; } /* Use NO_FLUSH for LE links (where this is the only option) or * if the BR/EDR link supports it and flushing has not been * explicitly requested (through FLAG_FLUSHABLE). */ if (hcon->type == LE_LINK || (!test_bit(FLAG_FLUSHABLE, &chan->flags) && lmp_no_flush_capable(hcon->hdev))) flags = ACL_START_NO_FLUSH; else flags = ACL_START; bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags); hci_send_acl(chan->conn->hchan, skb, flags); } static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control) { control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT; control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT; if (enh & L2CAP_CTRL_FRAME_TYPE) { /* S-Frame */ control->sframe = 1; control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT; control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT; control->sar = 0; control->txseq = 0; } else { /* I-Frame */ control->sframe = 0; control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT; control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT; control->poll = 0; control->super = 0; } } static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control) { control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT; control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT; if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) { /* S-Frame */ control->sframe = 1; control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT; control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT; control->sar = 0; control->txseq = 0; } else { /* I-Frame */ control->sframe = 0; control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT; control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT; control->poll = 0; control->super = 0; } } static inline void __unpack_control(struct l2cap_chan *chan, struct sk_buff *skb) { if (test_bit(FLAG_EXT_CTRL, &chan->flags)) { __unpack_extended_control(get_unaligned_le32(skb->data), &bt_cb(skb)->l2cap); skb_pull(skb, L2CAP_EXT_CTRL_SIZE); } else { __unpack_enhanced_control(get_unaligned_le16(skb->data), &bt_cb(skb)->l2cap); skb_pull(skb, L2CAP_ENH_CTRL_SIZE); } } static u32 __pack_extended_control(struct l2cap_ctrl *control) { u32 packed; packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT; packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT; if (control->sframe) { packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT; packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT; packed |= L2CAP_EXT_CTRL_FRAME_TYPE; } else { packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT; packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT; } return packed; } static u16 __pack_enhanced_control(struct l2cap_ctrl *control) { u16 packed; packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT; packed |= control->final << L2CAP_CTRL_FINAL_SHIFT; if (control->sframe) { packed |= control->poll << L2CAP_CTRL_POLL_SHIFT; packed |= control->super << L2CAP_CTRL_SUPER_SHIFT; packed |= L2CAP_CTRL_FRAME_TYPE; } else { packed |= control->sar << L2CAP_CTRL_SAR_SHIFT; packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT; } return packed; } static inline void __pack_control(struct l2cap_chan *chan, struct l2cap_ctrl *control, struct sk_buff *skb) { if (test_bit(FLAG_EXT_CTRL, &chan->flags)) { put_unaligned_le32(__pack_extended_control(control), skb->data + L2CAP_HDR_SIZE); } else { put_unaligned_le16(__pack_enhanced_control(control), skb->data + L2CAP_HDR_SIZE); } } static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan) { if (test_bit(FLAG_EXT_CTRL, &chan->flags)) return L2CAP_EXT_HDR_SIZE; else return L2CAP_ENH_HDR_SIZE; } static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan, u32 control) { struct sk_buff *skb; struct l2cap_hdr *lh; int hlen = __ertm_hdr_size(chan); if (chan->fcs == L2CAP_FCS_CRC16) hlen += L2CAP_FCS_SIZE; skb = bt_skb_alloc(hlen, GFP_KERNEL); if (!skb) return ERR_PTR(-ENOMEM); lh = skb_put(skb, L2CAP_HDR_SIZE); lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE); lh->cid = cpu_to_le16(chan->dcid); if (test_bit(FLAG_EXT_CTRL, &chan->flags)) put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE)); else put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE)); if (chan->fcs == L2CAP_FCS_CRC16) { u16 fcs = crc16(0, (u8 *)skb->data, skb->len); put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE)); } skb->priority = HCI_PRIO_MAX; return skb; } static void l2cap_send_sframe(struct l2cap_chan *chan, struct l2cap_ctrl *control) { struct sk_buff *skb; u32 control_field; BT_DBG("chan %p, control %p", chan, control); if (!control->sframe) return; if (__chan_is_moving(chan)) return; if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) && !control->poll) control->final = 1; if (control->super == L2CAP_SUPER_RR) clear_bit(CONN_RNR_SENT, &chan->conn_state); else if (control->super == L2CAP_SUPER_RNR) set_bit(CONN_RNR_SENT, &chan->conn_state); if (control->super != L2CAP_SUPER_SREJ) { chan->last_acked_seq = control->reqseq; __clear_ack_timer(chan); } BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq, control->final, control->poll, control->super); if (test_bit(FLAG_EXT_CTRL, &chan->flags)) control_field = __pack_extended_control(control); else control_field = __pack_enhanced_control(control); skb = l2cap_create_sframe_pdu(chan, control_field); if (!IS_ERR(skb)) l2cap_do_send(chan, skb); } static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll) { struct l2cap_ctrl control; BT_DBG("chan %p, poll %d", chan, poll); memset(&control, 0, sizeof(control)); control.sframe = 1; control.poll = poll; if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) control.super = L2CAP_SUPER_RNR; else control.super = L2CAP_SUPER_RR; control.reqseq = chan->buffer_seq; l2cap_send_sframe(chan, &control); } static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan) { if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) return true; return !test_bit(CONF_CONNECT_PEND, &chan->conf_state); } static bool __amp_capable(struct l2cap_chan *chan) { struct l2cap_conn *conn = chan->conn; struct hci_dev *hdev; bool amp_available = false; if (!(conn->local_fixed_chan & L2CAP_FC_A2MP)) return false; if (!(conn->remote_fixed_chan & L2CAP_FC_A2MP)) return false; read_lock(&hci_dev_list_lock); list_for_each_entry(hdev, &hci_dev_list, list) { if (hdev->amp_type != AMP_TYPE_BREDR && test_bit(HCI_UP, &hdev->flags)) { amp_available = true; break; } } read_unlock(&hci_dev_list_lock); if (chan->chan_policy == BT_CHANNEL_POLICY_AMP_PREFERRED) return amp_available; return false; } static bool l2cap_check_efs(struct l2cap_chan *chan) { /* Check EFS parameters */ return true; } void l2cap_send_conn_req(struct l2cap_chan *chan) { struct l2cap_conn *conn = chan->conn; struct l2cap_conn_req req; req.scid = cpu_to_le16(chan->scid); req.psm = chan->psm; chan->ident = l2cap_get_ident(conn); set_bit(CONF_CONNECT_PEND, &chan->conf_state); l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req); } static void l2cap_send_create_chan_req(struct l2cap_chan *chan, u8 amp_id) { struct l2cap_create_chan_req req; req.scid = cpu_to_le16(chan->scid); req.psm = chan->psm; req.amp_id = amp_id; chan->ident = l2cap_get_ident(chan->conn); l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_REQ, sizeof(req), &req); } static void l2cap_move_setup(struct l2cap_chan *chan) { struct sk_buff *skb; BT_DBG("chan %p", chan); if (chan->mode != L2CAP_MODE_ERTM) return; __clear_retrans_timer(chan); __clear_monitor_timer(chan); __clear_ack_timer(chan); chan->retry_count = 0; skb_queue_walk(&chan->tx_q, skb) { if (bt_cb(skb)->l2cap.retries) bt_cb(skb)->l2cap.retries = 1; else break; } chan->expected_tx_seq = chan->buffer_seq; clear_bit(CONN_REJ_ACT, &chan->conn_state); clear_bit(CONN_SREJ_ACT, &chan->conn_state); l2cap_seq_list_clear(&chan->retrans_list); l2cap_seq_list_clear(&chan->srej_list); skb_queue_purge(&chan->srej_q); chan->tx_state = L2CAP_TX_STATE_XMIT; chan->rx_state = L2CAP_RX_STATE_MOVE; set_bit(CONN_REMOTE_BUSY, &chan->conn_state); } static void l2cap_move_done(struct l2cap_chan *chan) { u8 move_role = chan->move_role; BT_DBG("chan %p", chan); chan->move_state = L2CAP_MOVE_STABLE; chan->move_role = L2CAP_MOVE_ROLE_NONE; if (chan->mode != L2CAP_MODE_ERTM) return; switch (move_role) { case L2CAP_MOVE_ROLE_INITIATOR: l2cap_tx(chan, NULL, NULL, L2CAP_EV_EXPLICIT_POLL); chan->rx_state = L2CAP_RX_STATE_WAIT_F; break; case L2CAP_MOVE_ROLE_RESPONDER: chan->rx_state = L2CAP_RX_STATE_WAIT_P; break; } } static void l2cap_chan_ready(struct l2cap_chan *chan) { /* The channel may have already been flagged as connected in * case of receiving data before the L2CAP info req/rsp * procedure is complete. */ if (chan->state == BT_CONNECTED) return; /* This clears all conf flags, including CONF_NOT_COMPLETE */ chan->conf_state = 0; __clear_chan_timer(chan); switch (chan->mode) { case L2CAP_MODE_LE_FLOWCTL: case L2CAP_MODE_EXT_FLOWCTL: if (!chan->tx_credits) chan->ops->suspend(chan); break; } chan->state = BT_CONNECTED; chan->ops->ready(chan); } static void l2cap_le_connect(struct l2cap_chan *chan) { struct l2cap_conn *conn = chan->conn; struct l2cap_le_conn_req req; if (test_and_set_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags)) return; if (!chan->imtu) chan->imtu = chan->conn->mtu; l2cap_le_flowctl_init(chan, 0); memset(&req, 0, sizeof(req)); req.psm = chan->psm; req.scid = cpu_to_le16(chan->scid); req.mtu = cpu_to_le16(chan->imtu); req.mps = cpu_to_le16(chan->mps); req.credits = cpu_to_le16(chan->rx_credits); chan->ident = l2cap_get_ident(conn); l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_REQ, sizeof(req), &req); } struct l2cap_ecred_conn_data { struct { struct l2cap_ecred_conn_req req; __le16 scid[5]; } __packed pdu; struct l2cap_chan *chan; struct pid *pid; int count; }; static void l2cap_ecred_defer_connect(struct l2cap_chan *chan, void *data) { struct l2cap_ecred_conn_data *conn = data; struct pid *pid; if (chan == conn->chan) return; if (!test_and_clear_bit(FLAG_DEFER_SETUP, &chan->flags)) return; pid = chan->ops->get_peer_pid(chan); /* Only add deferred channels with the same PID/PSM */ if (conn->pid != pid || chan->psm != conn->chan->psm || chan->ident || chan->mode != L2CAP_MODE_EXT_FLOWCTL || chan->state != BT_CONNECT) return; if (test_and_set_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags)) return; l2cap_ecred_init(chan, 0); /* Set the same ident so we can match on the rsp */ chan->ident = conn->chan->ident; /* Include all channels deferred */ conn->pdu.scid[conn->count] = cpu_to_le16(chan->scid); conn->count++; } static void l2cap_ecred_connect(struct l2cap_chan *chan) { struct l2cap_conn *conn = chan->conn; struct l2cap_ecred_conn_data data; if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) return; if (test_and_set_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags)) return; l2cap_ecred_init(chan, 0); memset(&data, 0, sizeof(data)); data.pdu.req.psm = chan->psm; data.pdu.req.mtu = cpu_to_le16(chan->imtu); data.pdu.req.mps = cpu_to_le16(chan->mps); data.pdu.req.credits = cpu_to_le16(chan->rx_credits); data.pdu.scid[0] = cpu_to_le16(chan->scid); chan->ident = l2cap_get_ident(conn); data.count = 1; data.chan = chan; data.pid = chan->ops->get_peer_pid(chan); __l2cap_chan_list(conn, l2cap_ecred_defer_connect, &data); l2cap_send_cmd(conn, chan->ident, L2CAP_ECRED_CONN_REQ, sizeof(data.pdu.req) + data.count * sizeof(__le16), &data.pdu); } static void l2cap_le_start(struct l2cap_chan *chan) { struct l2cap_conn *conn = chan->conn; if (!smp_conn_security(conn->hcon, chan->sec_level)) return; if (!chan->psm) { l2cap_chan_ready(chan); return; } if (chan->state == BT_CONNECT) { if (chan->mode == L2CAP_MODE_EXT_FLOWCTL) l2cap_ecred_connect(chan); else l2cap_le_connect(chan); } } static void l2cap_start_connection(struct l2cap_chan *chan) { if (__amp_capable(chan)) { BT_DBG("chan %p AMP capable: discover AMPs", chan); a2mp_discover_amp(chan); } else if (chan->conn->hcon->type == LE_LINK) { l2cap_le_start(chan); } else { l2cap_send_conn_req(chan); } } static void l2cap_request_info(struct l2cap_conn *conn) { struct l2cap_info_req req; if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) return; req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK); conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT; conn->info_ident = l2cap_get_ident(conn); schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT); l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ, sizeof(req), &req); } static bool l2cap_check_enc_key_size(struct hci_conn *hcon) { /* The minimum encryption key size needs to be enforced by the * host stack before establishing any L2CAP connections. The * specification in theory allows a minimum of 1, but to align * BR/EDR and LE transports, a minimum of 7 is chosen. * * This check might also be called for unencrypted connections * that have no key size requirements. Ensure that the link is * actually encrypted before enforcing a key size. */ int min_key_size = hcon->hdev->min_enc_key_size; /* On FIPS security level, key size must be 16 bytes */ if (hcon->sec_level == BT_SECURITY_FIPS) min_key_size = 16; return (!test_bit(HCI_CONN_ENCRYPT, &hcon->flags) || hcon->enc_key_size >= min_key_size); } static void l2cap_do_start(struct l2cap_chan *chan) { struct l2cap_conn *conn = chan->conn; if (conn->hcon->type == LE_LINK) { l2cap_le_start(chan); return; } if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)) { l2cap_request_info(conn); return; } if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)) return; if (!l2cap_chan_check_security(chan, true) || !__l2cap_no_conn_pending(chan)) return; if (l2cap_check_enc_key_size(conn->hcon)) l2cap_start_connection(chan); else __set_chan_timer(chan, L2CAP_DISC_TIMEOUT); } static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask) { u32 local_feat_mask = l2cap_feat_mask; if (!disable_ertm) local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING; switch (mode) { case L2CAP_MODE_ERTM: return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask; case L2CAP_MODE_STREAMING: return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask; default: return 0x00; } } static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err) { struct l2cap_conn *conn = chan->conn; struct l2cap_disconn_req req; if (!conn) return; if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) { __clear_retrans_timer(chan); __clear_monitor_timer(chan); __clear_ack_timer(chan); } if (chan->scid == L2CAP_CID_A2MP) { l2cap_state_change(chan, BT_DISCONN); return; } req.dcid = cpu_to_le16(chan->dcid); req.scid = cpu_to_le16(chan->scid); l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ, sizeof(req), &req); l2cap_state_change_and_error(chan, BT_DISCONN, err); } /* ---- L2CAP connections ---- */ static void l2cap_conn_start(struct l2cap_conn *conn) { struct l2cap_chan *chan, *tmp; BT_DBG("conn %p", conn); mutex_lock(&conn->chan_lock); list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) { l2cap_chan_lock(chan); if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) { l2cap_chan_ready(chan); l2cap_chan_unlock(chan); continue; } if (chan->state == BT_CONNECT) { if (!l2cap_chan_check_security(chan, true) || !__l2cap_no_conn_pending(chan)) { l2cap_chan_unlock(chan); continue; } if (!l2cap_mode_supported(chan->mode, conn->feat_mask) && test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) { l2cap_chan_close(chan, ECONNRESET); l2cap_chan_unlock(chan); continue; } if (l2cap_check_enc_key_size(conn->hcon)) l2cap_start_connection(chan); else l2cap_chan_close(chan, ECONNREFUSED); } else if (chan->state == BT_CONNECT2) { struct l2cap_conn_rsp rsp; char buf[128]; rsp.scid = cpu_to_le16(chan->dcid); rsp.dcid = cpu_to_le16(chan->scid); if (l2cap_chan_check_security(chan, false)) { if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) { rsp.result = cpu_to_le16(L2CAP_CR_PEND); rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND); chan->ops->defer(chan); } else { l2cap_state_change(chan, BT_CONFIG); rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS); rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO); } } else { rsp.result = cpu_to_le16(L2CAP_CR_PEND); rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND); } l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp); if (test_bit(CONF_REQ_SENT, &chan->conf_state) || rsp.result != L2CAP_CR_SUCCESS) { l2cap_chan_unlock(chan); continue; } set_bit(CONF_REQ_SENT, &chan->conf_state); l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ, l2cap_build_conf_req(chan, buf, sizeof(buf)), buf); chan->num_conf_req++; } l2cap_chan_unlock(chan); } mutex_unlock(&conn->chan_lock); } static void l2cap_le_conn_ready(struct l2cap_conn *conn) { struct hci_conn *hcon = conn->hcon; struct hci_dev *hdev = hcon->hdev; BT_DBG("%s conn %p", hdev->name, conn); /* For outgoing pairing which doesn't necessarily have an * associated socket (e.g. mgmt_pair_device). */ if (hcon->out) smp_conn_security(hcon, hcon->pending_sec_level); /* For LE peripheral connections, make sure the connection interval * is in the range of the minimum and maximum interval that has * been configured for this connection. If not, then trigger * the connection update procedure. */ if (hcon->role == HCI_ROLE_SLAVE && (hcon->le_conn_interval < hcon->le_conn_min_interval || hcon->le_conn_interval > hcon->le_conn_max_interval)) { struct l2cap_conn_param_update_req req; req.min = cpu_to_le16(hcon->le_conn_min_interval); req.max = cpu_to_le16(hcon->le_conn_max_interval); req.latency = cpu_to_le16(hcon->le_conn_latency); req.to_multiplier = cpu_to_le16(hcon->le_supv_timeout); l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONN_PARAM_UPDATE_REQ, sizeof(req), &req); } } static void l2cap_conn_ready(struct l2cap_conn *conn) { struct l2cap_chan *chan; struct hci_conn *hcon = conn->hcon; BT_DBG("conn %p", conn); if (hcon->type == ACL_LINK) l2cap_request_info(conn); mutex_lock(&conn->chan_lock); list_for_each_entry(chan, &conn->chan_l, list) { l2cap_chan_lock(chan); if (chan->scid == L2CAP_CID_A2MP) { l2cap_chan_unlock(chan); continue; } if (hcon->type == LE_LINK) { l2cap_le_start(chan); } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) { if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) l2cap_chan_ready(chan); } else if (chan->state == BT_CONNECT) { l2cap_do_start(chan); } l2cap_chan_unlock(chan); } mutex_unlock(&conn->chan_lock); if (hcon->type == LE_LINK) l2cap_le_conn_ready(conn); queue_work(hcon->hdev->workqueue, &conn->pending_rx_work); } /* Notify sockets that we cannot guaranty reliability anymore */ static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err) { struct l2cap_chan *chan; BT_DBG("conn %p", conn); mutex_lock(&conn->chan_lock); list_for_each_entry(chan, &conn->chan_l, list) { if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags)) l2cap_chan_set_err(chan, err); } mutex_unlock(&conn->chan_lock); } static void l2cap_info_timeout(struct work_struct *work) { struct l2cap_conn *conn = container_of(work, struct l2cap_conn, info_timer.work); conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE; conn->info_ident = 0; l2cap_conn_start(conn); } /* * l2cap_user * External modules can register l2cap_user objects on l2cap_conn. The ->probe * callback is called during registration. The ->remove callback is called * during unregistration. * An l2cap_user object can either be explicitly unregistered or when the * underlying l2cap_conn object is deleted. This guarantees that l2cap->hcon, * l2cap->hchan, .. are valid as long as the remove callback hasn't been called. * External modules must own a reference to the l2cap_conn object if they intend * to call l2cap_unregister_user(). The l2cap_conn object might get destroyed at * any time if they don't. */ int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user) { struct hci_dev *hdev = conn->hcon->hdev; int ret; /* We need to check whether l2cap_conn is registered. If it is not, we * must not register the l2cap_user. l2cap_conn_del() is unregisters * l2cap_conn objects, but doesn't provide its own locking. Instead, it * relies on the parent hci_conn object to be locked. This itself relies * on the hci_dev object to be locked. So we must lock the hci device * here, too. */ hci_dev_lock(hdev); if (!list_empty(&user->list)) { ret = -EINVAL; goto out_unlock; } /* conn->hchan is NULL after l2cap_conn_del() was called */ if (!conn->hchan) { ret = -ENODEV; goto out_unlock; } ret = user->probe(conn, user); if (ret) goto out_unlock; list_add(&user->list, &conn->users); ret = 0; out_unlock: hci_dev_unlock(hdev); return ret; } EXPORT_SYMBOL(l2cap_register_user); void l2cap_unregister_user(struct l2cap_conn *conn, struct l2cap_user *user) { struct hci_dev *hdev = conn->hcon->hdev; hci_dev_lock(hdev); if (list_empty(&user->list)) goto out_unlock; list_del_init(&user->list); user->remove(conn, user); out_unlock: hci_dev_unlock(hdev); } EXPORT_SYMBOL(l2cap_unregister_user); static void l2cap_unregister_all_users(struct l2cap_conn *conn) { struct l2cap_user *user; while (!list_empty(&conn->users)) { user = list_first_entry(&conn->users, struct l2cap_user, list); list_del_init(&user->list); user->remove(conn, user); } } static void l2cap_conn_del(struct hci_conn *hcon, int err) { struct l2cap_conn *conn = hcon->l2cap_data; struct l2cap_chan *chan, *l; if (!conn) return; BT_DBG("hcon %p conn %p, err %d", hcon, conn, err); kfree_skb(conn->rx_skb); skb_queue_purge(&conn->pending_rx); /* We can not call flush_work(&conn->pending_rx_work) here since we * might block if we are running on a worker from the same workqueue * pending_rx_work is waiting on. */ if (work_pending(&conn->pending_rx_work)) cancel_work_sync(&conn->pending_rx_work); cancel_delayed_work_sync(&conn->id_addr_timer); l2cap_unregister_all_users(conn); /* Force the connection to be immediately dropped */ hcon->disc_timeout = 0; mutex_lock(&conn->chan_lock); /* Kill channels */ list_for_each_entry_safe(chan, l, &conn->chan_l, list) { l2cap_chan_hold(chan); l2cap_chan_lock(chan); l2cap_chan_del(chan, err); chan->ops->close(chan); l2cap_chan_unlock(chan); l2cap_chan_put(chan); } mutex_unlock(&conn->chan_lock); hci_chan_del(conn->hchan); if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) cancel_delayed_work_sync(&conn->info_timer); hcon->l2cap_data = NULL; conn->hchan = NULL; l2cap_conn_put(conn); } static void l2cap_conn_free(struct kref *ref) { struct l2cap_conn *conn = container_of(ref, struct l2cap_conn, ref); hci_conn_put(conn->hcon); kfree(conn); } struct l2cap_conn *l2cap_conn_get(struct l2cap_conn *conn) { kref_get(&conn->ref); return conn; } EXPORT_SYMBOL(l2cap_conn_get); void l2cap_conn_put(struct l2cap_conn *conn) { kref_put(&conn->ref, l2cap_conn_free); } EXPORT_SYMBOL(l2cap_conn_put); /* ---- Socket interface ---- */ /* Find socket with psm and source / destination bdaddr. * Returns closest match. */ static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm, bdaddr_t *src, bdaddr_t *dst, u8 link_type) { struct l2cap_chan *c, *tmp, *c1 = NULL; read_lock(&chan_list_lock); list_for_each_entry_safe(c, tmp, &chan_list, global_l) { if (state && c->state != state) continue; if (link_type == ACL_LINK && c->src_type != BDADDR_BREDR) continue; if (link_type == LE_LINK && c->src_type == BDADDR_BREDR) continue; if (c->chan_type != L2CAP_CHAN_FIXED && c->psm == psm) { int src_match, dst_match; int src_any, dst_any; /* Exact match. */ src_match = !bacmp(&c->src, src); dst_match = !bacmp(&c->dst, dst); if (src_match && dst_match) { if (!l2cap_chan_hold_unless_zero(c)) continue; read_unlock(&chan_list_lock); return c; } /* Closest match */ src_any = !bacmp(&c->src, BDADDR_ANY); dst_any = !bacmp(&c->dst, BDADDR_ANY); if ((src_match && dst_any) || (src_any && dst_match) || (src_any && dst_any)) c1 = c; } } if (c1) c1 = l2cap_chan_hold_unless_zero(c1); read_unlock(&chan_list_lock); return c1; } static void l2cap_monitor_timeout(struct work_struct *work) { struct l2cap_chan *chan = container_of(work, struct l2cap_chan, monitor_timer.work); BT_DBG("chan %p", chan); l2cap_chan_lock(chan); if (!chan->conn) { l2cap_chan_unlock(chan); l2cap_chan_put(chan); return; } l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO); l2cap_chan_unlock(chan); l2cap_chan_put(chan); } static void l2cap_retrans_timeout(struct work_struct *work) { struct l2cap_chan *chan = container_of(work, struct l2cap_chan, retrans_timer.work); BT_DBG("chan %p", chan); l2cap_chan_lock(chan); if (!chan->conn) { l2cap_chan_unlock(chan); l2cap_chan_put(chan); return; } l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO); l2cap_chan_unlock(chan); l2cap_chan_put(chan); } static void l2cap_streaming_send(struct l2cap_chan *chan, struct sk_buff_head *skbs) { struct sk_buff *skb; struct l2cap_ctrl *control; BT_DBG("chan %p, skbs %p", chan, skbs); if (__chan_is_moving(chan)) return; skb_queue_splice_tail_init(skbs, &chan->tx_q); while (!skb_queue_empty(&chan->tx_q)) { skb = skb_dequeue(&chan->tx_q); bt_cb(skb)->l2cap.retries = 1; control = &bt_cb(skb)->l2cap; control->reqseq = 0; control->txseq = chan->next_tx_seq; __pack_control(chan, control, skb); if (chan->fcs == L2CAP_FCS_CRC16) { u16 fcs = crc16(0, (u8 *) skb->data, skb->len); put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE)); } l2cap_do_send(chan, skb); BT_DBG("Sent txseq %u", control->txseq); chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq); chan->frames_sent++; } } static int l2cap_ertm_send(struct l2cap_chan *chan) { struct sk_buff *skb, *tx_skb; struct l2cap_ctrl *control; int sent = 0; BT_DBG("chan %p", chan); if (chan->state != BT_CONNECTED) return -ENOTCONN; if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) return 0; if (__chan_is_moving(chan)) return 0; while (chan->tx_send_head && chan->unacked_frames < chan->remote_tx_win && chan->tx_state == L2CAP_TX_STATE_XMIT) { skb = chan->tx_send_head; bt_cb(skb)->l2cap.retries = 1; control = &bt_cb(skb)->l2cap; if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state)) control->final = 1; control->reqseq = chan->buffer_seq; chan->last_acked_seq = chan->buffer_seq; control->txseq = chan->next_tx_seq; __pack_control(chan, control, skb); if (chan->fcs == L2CAP_FCS_CRC16) { u16 fcs = crc16(0, (u8 *) skb->data, skb->len); put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE)); } /* Clone after data has been modified. Data is assumed to be read-only (for locking purposes) on cloned sk_buffs. */ tx_skb = skb_clone(skb, GFP_KERNEL); if (!tx_skb) break; __set_retrans_timer(chan); chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq); chan->unacked_frames++; chan->frames_sent++; sent++; if (skb_queue_is_last(&chan->tx_q, skb)) chan->tx_send_head = NULL; else chan->tx_send_head = skb_queue_next(&chan->tx_q, skb); l2cap_do_send(chan, tx_skb); BT_DBG("Sent txseq %u", control->txseq); } BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent, chan->unacked_frames, skb_queue_len(&chan->tx_q)); return sent; } static void l2cap_ertm_resend(struct l2cap_chan *chan) { struct l2cap_ctrl control; struct sk_buff *skb; struct sk_buff *tx_skb; u16 seq; BT_DBG("chan %p", chan); if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) return; if (__chan_is_moving(chan)) return; while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) { seq = l2cap_seq_list_pop(&chan->retrans_list); skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq); if (!skb) { BT_DBG("Error: Can't retransmit seq %d, frame missing", seq); continue; } bt_cb(skb)->l2cap.retries++; control = bt_cb(skb)->l2cap; if (chan->max_tx != 0 && bt_cb(skb)->l2cap.retries > chan->max_tx) { BT_DBG("Retry limit exceeded (%d)", chan->max_tx); l2cap_send_disconn_req(chan, ECONNRESET); l2cap_seq_list_clear(&chan->retrans_list); break; } control.reqseq = chan->buffer_seq; if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state)) control.final = 1; else control.final = 0; if (skb_cloned(skb)) { /* Cloned sk_buffs are read-only, so we need a * writeable copy */ tx_skb = skb_copy(skb, GFP_KERNEL); } else { tx_skb = skb_clone(skb, GFP_KERNEL); } if (!tx_skb) { l2cap_seq_list_clear(&chan->retrans_list); break; } /* Update skb contents */ if (test_bit(FLAG_EXT_CTRL, &chan->flags)) { put_unaligned_le32(__pack_extended_control(&control), tx_skb->data + L2CAP_HDR_SIZE); } else { put_unaligned_le16(__pack_enhanced_control(&control), tx_skb->data + L2CAP_HDR_SIZE); } /* Update FCS */ if (chan->fcs == L2CAP_FCS_CRC16) { u16 fcs = crc16(0, (u8 *) tx_skb->data, tx_skb->len - L2CAP_FCS_SIZE); put_unaligned_le16(fcs, skb_tail_pointer(tx_skb) - L2CAP_FCS_SIZE); } l2cap_do_send(chan, tx_skb); BT_DBG("Resent txseq %d", control.txseq); chan->last_acked_seq = chan->buffer_seq; } } static void l2cap_retransmit(struct l2cap_chan *chan, struct l2cap_ctrl *control) { BT_DBG("chan %p, control %p", chan, control); l2cap_seq_list_append(&chan->retrans_list, control->reqseq); l2cap_ertm_resend(chan); } static void l2cap_retransmit_all(struct l2cap_chan *chan, struct l2cap_ctrl *control) { struct sk_buff *skb; BT_DBG("chan %p, control %p", chan, control); if (control->poll) set_bit(CONN_SEND_FBIT, &chan->conn_state); l2cap_seq_list_clear(&chan->retrans_list); if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) return; if (chan->unacked_frames) { skb_queue_walk(&chan->tx_q, skb) { if (bt_cb(skb)->l2cap.txseq == control->reqseq || skb == chan->tx_send_head) break; } skb_queue_walk_from(&chan->tx_q, skb) { if (skb == chan->tx_send_head) break; l2cap_seq_list_append(&chan->retrans_list, bt_cb(skb)->l2cap.txseq); } l2cap_ertm_resend(chan); } } static void l2cap_send_ack(struct l2cap_chan *chan) { struct l2cap_ctrl control; u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq, chan->last_acked_seq); int threshold; BT_DBG("chan %p last_acked_seq %d buffer_seq %d", chan, chan->last_acked_seq, chan->buffer_seq); memset(&control, 0, sizeof(control)); control.sframe = 1; if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) && chan->rx_state == L2CAP_RX_STATE_RECV) { __clear_ack_timer(chan); control.super = L2CAP_SUPER_RNR; control.reqseq = chan->buffer_seq; l2cap_send_sframe(chan, &control); } else { if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) { l2cap_ertm_send(chan); /* If any i-frames were sent, they included an ack */ if (chan->buffer_seq == chan->last_acked_seq) frames_to_ack = 0; } /* Ack now if the window is 3/4ths full. * Calculate without mul or div */ threshold = chan->ack_win; threshold += threshold << 1; threshold >>= 2; BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack, threshold); if (frames_to_ack >= threshold) { __clear_ack_timer(chan); control.super = L2CAP_SUPER_RR; control.reqseq = chan->buffer_seq; l2cap_send_sframe(chan, &control); frames_to_ack = 0; } if (frames_to_ack) __set_ack_timer(chan); } } static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan, struct msghdr *msg, int len, int count, struct sk_buff *skb) { struct l2cap_conn *conn = chan->conn; struct sk_buff **frag; int sent = 0; if (!copy_from_iter_full(skb_put(skb, count), count, &msg->msg_iter)) return -EFAULT; sent += count; len -= count; /* Continuation fragments (no L2CAP header) */ frag = &skb_shinfo(skb)->frag_list; while (len) { struct sk_buff *tmp; count = min_t(unsigned int, conn->mtu, len); tmp = chan->ops->alloc_skb(chan, 0, count, msg->msg_flags & MSG_DONTWAIT); if (IS_ERR(tmp)) return PTR_ERR(tmp); *frag = tmp; if (!copy_from_iter_full(skb_put(*frag, count), count, &msg->msg_iter)) return -EFAULT; sent += count; len -= count; skb->len += (*frag)->len; skb->data_len += (*frag)->len; frag = &(*frag)->next; } return sent; } static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len) { struct l2cap_conn *conn = chan->conn; struct sk_buff *skb; int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE; struct l2cap_hdr *lh; BT_DBG("chan %p psm 0x%2.2x len %zu", chan, __le16_to_cpu(chan->psm), len); count = min_t(unsigned int, (conn->mtu - hlen), len); skb = chan->ops->alloc_skb(chan, hlen, count, msg->msg_flags & MSG_DONTWAIT); if (IS_ERR(skb)) return skb; /* Create L2CAP header */ lh = skb_put(skb, L2CAP_HDR_SIZE); lh->cid = cpu_to_le16(chan->dcid); lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE); put_unaligned(chan->psm, (__le16 *) skb_put(skb, L2CAP_PSMLEN_SIZE)); err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb); if (unlikely(err < 0)) { kfree_skb(skb); return ERR_PTR(err); } return skb; } static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len) { struct l2cap_conn *conn = chan->conn; struct sk_buff *skb; int err, count; struct l2cap_hdr *lh; BT_DBG("chan %p len %zu", chan, len); count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len); skb = chan->ops->alloc_skb(chan, L2CAP_HDR_SIZE, count, msg->msg_flags & MSG_DONTWAIT); if (IS_ERR(skb)) return skb; /* Create L2CAP header */ lh = skb_put(skb, L2CAP_HDR_SIZE); lh->cid = cpu_to_le16(chan->dcid); lh->len = cpu_to_le16(len); err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb); if (unlikely(err < 0)) { kfree_skb(skb); return ERR_PTR(err); } return skb; } static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len, u16 sdulen) { struct l2cap_conn *conn = chan->conn; struct sk_buff *skb; int err, count, hlen; struct l2cap_hdr *lh; BT_DBG("chan %p len %zu", chan, len); if (!conn) return ERR_PTR(-ENOTCONN); hlen = __ertm_hdr_size(chan); if (sdulen) hlen += L2CAP_SDULEN_SIZE; if (chan->fcs == L2CAP_FCS_CRC16) hlen += L2CAP_FCS_SIZE; count = min_t(unsigned int, (conn->mtu - hlen), len); skb = chan->ops->alloc_skb(chan, hlen, count, msg->msg_flags & MSG_DONTWAIT); if (IS_ERR(skb)) return skb; /* Create L2CAP header */ lh = skb_put(skb, L2CAP_HDR_SIZE); lh->cid = cpu_to_le16(chan->dcid); lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE)); /* Control header is populated later */ if (test_bit(FLAG_EXT_CTRL, &chan->flags)) put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE)); else put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE)); if (sdulen) put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE)); err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb); if (unlikely(err < 0)) { kfree_skb(skb); return ERR_PTR(err); } bt_cb(skb)->l2cap.fcs = chan->fcs; bt_cb(skb)->l2cap.retries = 0; return skb; } static int l2cap_segment_sdu(struct l2cap_chan *chan, struct sk_buff_head *seg_queue, struct msghdr *msg, size_t len) { struct sk_buff *skb; u16 sdu_len; size_t pdu_len; u8 sar; BT_DBG("chan %p, msg %p, len %zu", chan, msg, len); /* It is critical that ERTM PDUs fit in a single HCI fragment, * so fragmented skbs are not used. The HCI layer's handling * of fragmented skbs is not compatible with ERTM's queueing. */ /* PDU size is derived from the HCI MTU */ pdu_len = chan->conn->mtu; /* Constrain PDU size for BR/EDR connections */ if (!chan->hs_hcon) pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD); /* Adjust for largest possible L2CAP overhead. */ if (chan->fcs) pdu_len -= L2CAP_FCS_SIZE; pdu_len -= __ertm_hdr_size(chan); /* Remote device may have requested smaller PDUs */ pdu_len = min_t(size_t, pdu_len, chan->remote_mps); if (len <= pdu_len) { sar = L2CAP_SAR_UNSEGMENTED; sdu_len = 0; pdu_len = len; } else { sar = L2CAP_SAR_START; sdu_len = len; } while (len > 0) { skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len); if (IS_ERR(skb)) { __skb_queue_purge(seg_queue); return PTR_ERR(skb); } bt_cb(skb)->l2cap.sar = sar; __skb_queue_tail(seg_queue, skb); len -= pdu_len; if (sdu_len) sdu_len = 0; if (len <= pdu_len) { sar = L2CAP_SAR_END; pdu_len = len; } else { sar = L2CAP_SAR_CONTINUE; } } return 0; } static struct sk_buff *l2cap_create_le_flowctl_pdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len, u16 sdulen) { struct l2cap_conn *conn = chan->conn; struct sk_buff *skb; int err, count, hlen; struct l2cap_hdr *lh; BT_DBG("chan %p len %zu", chan, len); if (!conn) return ERR_PTR(-ENOTCONN); hlen = L2CAP_HDR_SIZE; if (sdulen) hlen += L2CAP_SDULEN_SIZE; count = min_t(unsigned int, (conn->mtu - hlen), len); skb = chan->ops->alloc_skb(chan, hlen, count, msg->msg_flags & MSG_DONTWAIT); if (IS_ERR(skb)) return skb; /* Create L2CAP header */ lh = skb_put(skb, L2CAP_HDR_SIZE); lh->cid = cpu_to_le16(chan->dcid); lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE)); if (sdulen) put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE)); err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb); if (unlikely(err < 0)) { kfree_skb(skb); return ERR_PTR(err); } return skb; } static int l2cap_segment_le_sdu(struct l2cap_chan *chan, struct sk_buff_head *seg_queue, struct msghdr *msg, size_t len) { struct sk_buff *skb; size_t pdu_len; u16 sdu_len; BT_DBG("chan %p, msg %p, len %zu", chan, msg, len); sdu_len = len; pdu_len = chan->remote_mps - L2CAP_SDULEN_SIZE; while (len > 0) { if (len <= pdu_len) pdu_len = len; skb = l2cap_create_le_flowctl_pdu(chan, msg, pdu_len, sdu_len); if (IS_ERR(skb)) { __skb_queue_purge(seg_queue); return PTR_ERR(skb); } __skb_queue_tail(seg_queue, skb); len -= pdu_len; if (sdu_len) { sdu_len = 0; pdu_len += L2CAP_SDULEN_SIZE; } } return 0; } static void l2cap_le_flowctl_send(struct l2cap_chan *chan) { int sent = 0; BT_DBG("chan %p", chan); while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) { l2cap_do_send(chan, skb_dequeue(&chan->tx_q)); chan->tx_credits--; sent++; } BT_DBG("Sent %d credits %u queued %u", sent, chan->tx_credits, skb_queue_len(&chan->tx_q)); } int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len) { struct sk_buff *skb; int err; struct sk_buff_head seg_queue; if (!chan->conn) return -ENOTCONN; /* Connectionless channel */ if (chan->chan_type == L2CAP_CHAN_CONN_LESS) { skb = l2cap_create_connless_pdu(chan, msg, len); if (IS_ERR(skb)) return PTR_ERR(skb); l2cap_do_send(chan, skb); return len; } switch (chan->mode) { case L2CAP_MODE_LE_FLOWCTL: case L2CAP_MODE_EXT_FLOWCTL: /* Check outgoing MTU */ if (len > chan->omtu) return -EMSGSIZE; __skb_queue_head_init(&seg_queue); err = l2cap_segment_le_sdu(chan, &seg_queue, msg, len); if (chan->state != BT_CONNECTED) { __skb_queue_purge(&seg_queue); err = -ENOTCONN; } if (err) return err; skb_queue_splice_tail_init(&seg_queue, &chan->tx_q); l2cap_le_flowctl_send(chan); if (!chan->tx_credits) chan->ops->suspend(chan); err = len; break; case L2CAP_MODE_BASIC: /* Check outgoing MTU */ if (len > chan->omtu) return -EMSGSIZE; /* Create a basic PDU */ skb = l2cap_create_basic_pdu(chan, msg, len); if (IS_ERR(skb)) return PTR_ERR(skb); l2cap_do_send(chan, skb); err = len; break; case L2CAP_MODE_ERTM: case L2CAP_MODE_STREAMING: /* Check outgoing MTU */ if (len > chan->omtu) { err = -EMSGSIZE; break; } __skb_queue_head_init(&seg_queue); /* Do segmentation before calling in to the state machine, * since it's possible to block while waiting for memory * allocation. */ err = l2cap_segment_sdu(chan, &seg_queue, msg, len); if (err) break; if (chan->mode == L2CAP_MODE_ERTM) l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST); else l2cap_streaming_send(chan, &seg_queue); err = len; /* If the skbs were not queued for sending, they'll still be in * seg_queue and need to be purged. */ __skb_queue_purge(&seg_queue); break; default: BT_DBG("bad state %1.1x", chan->mode); err = -EBADFD; } return err; } EXPORT_SYMBOL_GPL(l2cap_chan_send); static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq) { struct l2cap_ctrl control; u16 seq; BT_DBG("chan %p, txseq %u", chan, txseq); memset(&control, 0, sizeof(control)); control.sframe = 1; control.super = L2CAP_SUPER_SREJ; for (seq = chan->expected_tx_seq; seq != txseq; seq = __next_seq(chan, seq)) { if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) { control.reqseq = seq; l2cap_send_sframe(chan, &control); l2cap_seq_list_append(&chan->srej_list, seq); } } chan->expected_tx_seq = __next_seq(chan, txseq); } static void l2cap_send_srej_tail(struct l2cap_chan *chan) { struct l2cap_ctrl control; BT_DBG("chan %p", chan); if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR) return; memset(&control, 0, sizeof(control)); control.sframe = 1; control.super = L2CAP_SUPER_SREJ; control.reqseq = chan->srej_list.tail; l2cap_send_sframe(chan, &control); } static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq) { struct l2cap_ctrl control; u16 initial_head; u16 seq; BT_DBG("chan %p, txseq %u", chan, txseq); memset(&control, 0, sizeof(control)); control.sframe = 1; control.super = L2CAP_SUPER_SREJ; /* Capture initial list head to allow only one pass through the list. */ initial_head = chan->srej_list.head; do { seq = l2cap_seq_list_pop(&chan->srej_list); if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR) break; control.reqseq = seq; l2cap_send_sframe(chan, &control); l2cap_seq_list_append(&chan->srej_list, seq); } while (chan->srej_list.head != initial_head); } static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq) { struct sk_buff *acked_skb; u16 ackseq; BT_DBG("chan %p, reqseq %u", chan, reqseq); if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq) return; BT_DBG("expected_ack_seq %u, unacked_frames %u", chan->expected_ack_seq, chan->unacked_frames); for (ackseq = chan->expected_ack_seq; ackseq != reqseq; ackseq = __next_seq(chan, ackseq)) { acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq); if (acked_skb) { skb_unlink(acked_skb, &chan->tx_q); kfree_skb(acked_skb); chan->unacked_frames--; } } chan->expected_ack_seq = reqseq; if (chan->unacked_frames == 0) __clear_retrans_timer(chan); BT_DBG("unacked_frames %u", chan->unacked_frames); } static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan) { BT_DBG("chan %p", chan); chan->expected_tx_seq = chan->buffer_seq; l2cap_seq_list_clear(&chan->srej_list); skb_queue_purge(&chan->srej_q); chan->rx_state = L2CAP_RX_STATE_RECV; } static void l2cap_tx_state_xmit(struct l2cap_chan *chan, struct l2cap_ctrl *control, struct sk_buff_head *skbs, u8 event) { BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs, event); switch (event) { case L2CAP_EV_DATA_REQUEST: if (chan->tx_send_head == NULL) chan->tx_send_head = skb_peek(skbs); skb_queue_splice_tail_init(skbs, &chan->tx_q); l2cap_ertm_send(chan); break; case L2CAP_EV_LOCAL_BUSY_DETECTED: BT_DBG("Enter LOCAL_BUSY"); set_bit(CONN_LOCAL_BUSY, &chan->conn_state); if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) { /* The SREJ_SENT state must be aborted if we are to * enter the LOCAL_BUSY state. */ l2cap_abort_rx_srej_sent(chan); } l2cap_send_ack(chan); break; case L2CAP_EV_LOCAL_BUSY_CLEAR: BT_DBG("Exit LOCAL_BUSY"); clear_bit(CONN_LOCAL_BUSY, &chan->conn_state); if (test_bit(CONN_RNR_SENT, &chan->conn_state)) { struct l2cap_ctrl local_control; memset(&local_control, 0, sizeof(local_control)); local_control.sframe = 1; local_control.super = L2CAP_SUPER_RR; local_control.poll = 1; local_control.reqseq = chan->buffer_seq; l2cap_send_sframe(chan, &local_control); chan->retry_count = 1; __set_monitor_timer(chan); chan->tx_state = L2CAP_TX_STATE_WAIT_F; } break; case L2CAP_EV_RECV_REQSEQ_AND_FBIT: l2cap_process_reqseq(chan, control->reqseq); break; case L2CAP_EV_EXPLICIT_POLL: l2cap_send_rr_or_rnr(chan, 1); chan->retry_count = 1; __set_monitor_timer(chan); __clear_ack_timer(chan); chan->tx_state = L2CAP_TX_STATE_WAIT_F; break; case L2CAP_EV_RETRANS_TO: l2cap_send_rr_or_rnr(chan, 1); chan->retry_count = 1; __set_monitor_timer(chan); chan->tx_state = L2CAP_TX_STATE_WAIT_F; break; case L2CAP_EV_RECV_FBIT: /* Nothing to process */ break; default: break; } } static void l2cap_tx_state_wait_f(struct l2cap_chan *chan, struct l2cap_ctrl *control, struct sk_buff_head *skbs, u8 event) { BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs, event); switch (event) { case L2CAP_EV_DATA_REQUEST: if (chan->tx_send_head == NULL) chan->tx_send_head = skb_peek(skbs); /* Queue data, but don't send. */ skb_queue_splice_tail_init(skbs, &chan->tx_q); break; case L2CAP_EV_LOCAL_BUSY_DETECTED: BT_DBG("Enter LOCAL_BUSY"); set_bit(CONN_LOCAL_BUSY, &chan->conn_state); if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) { /* The SREJ_SENT state must be aborted if we are to * enter the LOCAL_BUSY state. */ l2cap_abort_rx_srej_sent(chan); } l2cap_send_ack(chan); break; case L2CAP_EV_LOCAL_BUSY_CLEAR: BT_DBG("Exit LOCAL_BUSY"); clear_bit(CONN_LOCAL_BUSY, &chan->conn_state); if (test_bit(CONN_RNR_SENT, &chan->conn_state)) { struct l2cap_ctrl local_control; memset(&local_control, 0, sizeof(local_control)); local_control.sframe = 1; local_control.super = L2CAP_SUPER_RR; local_control.poll = 1; local_control.reqseq = chan->buffer_seq; l2cap_send_sframe(chan, &local_control); chan->retry_count = 1; __set_monitor_timer(chan); chan->tx_state = L2CAP_TX_STATE_WAIT_F; } break; case L2CAP_EV_RECV_REQSEQ_AND_FBIT: l2cap_process_reqseq(chan, control->reqseq); fallthrough; case L2CAP_EV_RECV_FBIT: if (control && control->final) { __clear_monitor_timer(chan); if (chan->unacked_frames > 0) __set_retrans_timer(chan); chan->retry_count = 0; chan->tx_state = L2CAP_TX_STATE_XMIT; BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state); } break; case L2CAP_EV_EXPLICIT_POLL: /* Ignore */ break; case L2CAP_EV_MONITOR_TO: if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) { l2cap_send_rr_or_rnr(chan, 1); __set_monitor_timer(chan); chan->retry_count++; } else { l2cap_send_disconn_req(chan, ECONNABORTED); } break; default: break; } } static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control, struct sk_buff_head *skbs, u8 event) { BT_DBG("chan %p, control %p, skbs %p, event %d, state %d", chan, control, skbs, event, chan->tx_state); switch (chan->tx_state) { case L2CAP_TX_STATE_XMIT: l2cap_tx_state_xmit(chan, control, skbs, event); break; case L2CAP_TX_STATE_WAIT_F: l2cap_tx_state_wait_f(chan, control, skbs, event); break; default: /* Ignore event */ break; } } static void l2cap_pass_to_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control) { BT_DBG("chan %p, control %p", chan, control); l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT); } static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan, struct l2cap_ctrl *control) { BT_DBG("chan %p, control %p", chan, control); l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT); } /* Copy frame to all raw sockets on that connection */ static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb) { struct sk_buff *nskb; struct l2cap_chan *chan; BT_DBG("conn %p", conn); mutex_lock(&conn->chan_lock); list_for_each_entry(chan, &conn->chan_l, list) { if (chan->chan_type != L2CAP_CHAN_RAW) continue; /* Don't send frame to the channel it came from */ if (bt_cb(skb)->l2cap.chan == chan) continue; nskb = skb_clone(skb, GFP_KERNEL); if (!nskb) continue; if (chan->ops->recv(chan, nskb)) kfree_skb(nskb); } mutex_unlock(&conn->chan_lock); } /* ---- L2CAP signalling commands ---- */ static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code, u8 ident, u16 dlen, void *data) { struct sk_buff *skb, **frag; struct l2cap_cmd_hdr *cmd; struct l2cap_hdr *lh; int len, count; BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u", conn, code, ident, dlen); if (conn->mtu < L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE) return NULL; len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen; count = min_t(unsigned int, conn->mtu, len); skb = bt_skb_alloc(count, GFP_KERNEL); if (!skb) return NULL; lh = skb_put(skb, L2CAP_HDR_SIZE); lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen); if (conn->hcon->type == LE_LINK) lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING); else lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING); cmd = skb_put(skb, L2CAP_CMD_HDR_SIZE); cmd->code = code; cmd->ident = ident; cmd->len = cpu_to_le16(dlen); if (dlen) { count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE; skb_put_data(skb, data, count); data += count; } len -= skb->len; /* Continuation fragments (no L2CAP header) */ frag = &skb_shinfo(skb)->frag_list; while (len) { count = min_t(unsigned int, conn->mtu, len); *frag = bt_skb_alloc(count, GFP_KERNEL); if (!*frag) goto fail; skb_put_data(*frag, data, count); len -= count; data += count; frag = &(*frag)->next; } return skb; fail: kfree_skb(skb); return NULL; } static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val) { struct l2cap_conf_opt *opt = *ptr; int len; len = L2CAP_CONF_OPT_SIZE + opt->len; *ptr += len; *type = opt->type; *olen = opt->len; switch (opt->len) { case 1: *val = *((u8 *) opt->val); break; case 2: *val = get_unaligned_le16(opt->val); break; case 4: *val = get_unaligned_le32(opt->val); break; default: *val = (unsigned long) opt->val; break; } BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val); return len; } static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val, size_t size) { struct l2cap_conf_opt *opt = *ptr; BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val); if (size < L2CAP_CONF_OPT_SIZE + len) return; opt->type = type; opt->len = len; switch (len) { case 1: *((u8 *) opt->val) = val; break; case 2: put_unaligned_le16(val, opt->val); break; case 4: put_unaligned_le32(val, opt->val); break; default: memcpy(opt->val, (void *) val, len); break; } *ptr += L2CAP_CONF_OPT_SIZE + len; } static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan, size_t size) { struct l2cap_conf_efs efs; switch (chan->mode) { case L2CAP_MODE_ERTM: efs.id = chan->local_id; efs.stype = chan->local_stype; efs.msdu = cpu_to_le16(chan->local_msdu); efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime); efs.acc_lat = cpu_to_le32(L2CAP_DEFAULT_ACC_LAT); efs.flush_to = cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO); break; case L2CAP_MODE_STREAMING: efs.id = 1; efs.stype = L2CAP_SERV_BESTEFFORT; efs.msdu = cpu_to_le16(chan->local_msdu); efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime); efs.acc_lat = 0; efs.flush_to = 0; break; default: return; } l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs), (unsigned long) &efs, size); } static void l2cap_ack_timeout(struct work_struct *work) { struct l2cap_chan *chan = container_of(work, struct l2cap_chan, ack_timer.work); u16 frames_to_ack; BT_DBG("chan %p", chan); l2cap_chan_lock(chan); frames_to_ack = __seq_offset(chan, chan->buffer_seq, chan->last_acked_seq); if (frames_to_ack) l2cap_send_rr_or_rnr(chan, 0); l2cap_chan_unlock(chan); l2cap_chan_put(chan); } int l2cap_ertm_init(struct l2cap_chan *chan) { int err; chan->next_tx_seq = 0; chan->expected_tx_seq = 0; chan->expected_ack_seq = 0; chan->unacked_frames = 0; chan->buffer_seq = 0; chan->frames_sent = 0; chan->last_acked_seq = 0; chan->sdu = NULL; chan->sdu_last_frag = NULL; chan->sdu_len = 0; skb_queue_head_init(&chan->tx_q); chan->local_amp_id = AMP_ID_BREDR; chan->move_id = AMP_ID_BREDR; chan->move_state = L2CAP_MOVE_STABLE; chan->move_role = L2CAP_MOVE_ROLE_NONE; if (chan->mode != L2CAP_MODE_ERTM) return 0; chan->rx_state = L2CAP_RX_STATE_RECV; chan->tx_state = L2CAP_TX_STATE_XMIT; skb_queue_head_init(&chan->srej_q); err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win); if (err < 0) return err; err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win); if (err < 0) l2cap_seq_list_free(&chan->srej_list); return err; } static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask) { switch (mode) { case L2CAP_MODE_STREAMING: case L2CAP_MODE_ERTM: if (l2cap_mode_supported(mode, remote_feat_mask)) return mode; fallthrough; default: return L2CAP_MODE_BASIC; } } static inline bool __l2cap_ews_supported(struct l2cap_conn *conn) { return ((conn->local_fixed_chan & L2CAP_FC_A2MP) && (conn->feat_mask & L2CAP_FEAT_EXT_WINDOW)); } static inline bool __l2cap_efs_supported(struct l2cap_conn *conn) { return ((conn->local_fixed_chan & L2CAP_FC_A2MP) && (conn->feat_mask & L2CAP_FEAT_EXT_FLOW)); } static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan, struct l2cap_conf_rfc *rfc) { if (chan->local_amp_id != AMP_ID_BREDR && chan->hs_hcon) { u64 ertm_to = chan->hs_hcon->hdev->amp_be_flush_to; /* Class 1 devices have must have ERTM timeouts * exceeding the Link Supervision Timeout. The * default Link Supervision Timeout for AMP * controllers is 10 seconds. * * Class 1 devices use 0xffffffff for their * best-effort flush timeout, so the clamping logic * will result in a timeout that meets the above * requirement. ERTM timeouts are 16-bit values, so * the maximum timeout is 65.535 seconds. */ /* Convert timeout to milliseconds and round */ ertm_to = DIV_ROUND_UP_ULL(ertm_to, 1000); /* This is the recommended formula for class 2 devices * that start ERTM timers when packets are sent to the * controller. */ ertm_to = 3 * ertm_to + 500; if (ertm_to > 0xffff) ertm_to = 0xffff; rfc->retrans_timeout = cpu_to_le16((u16) ertm_to); rfc->monitor_timeout = rfc->retrans_timeout; } else { rfc->retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO); rfc->monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO); } } static inline void l2cap_txwin_setup(struct l2cap_chan *chan) { if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW && __l2cap_ews_supported(chan->conn)) { /* use extended control field */ set_bit(FLAG_EXT_CTRL, &chan->flags); chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW; } else { chan->tx_win = min_t(u16, chan->tx_win, L2CAP_DEFAULT_TX_WINDOW); chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW; } chan->ack_win = chan->tx_win; } static void l2cap_mtu_auto(struct l2cap_chan *chan) { struct hci_conn *conn = chan->conn->hcon; chan->imtu = L2CAP_DEFAULT_MIN_MTU; /* The 2-DH1 packet has between 2 and 56 information bytes * (including the 2-byte payload header) */ if (!(conn->pkt_type & HCI_2DH1)) chan->imtu = 54; /* The 3-DH1 packet has between 2 and 85 information bytes * (including the 2-byte payload header) */ if (!(conn->pkt_type & HCI_3DH1)) chan->imtu = 83; /* The 2-DH3 packet has between 2 and 369 information bytes * (including the 2-byte payload header) */ if (!(conn->pkt_type & HCI_2DH3)) chan->imtu = 367; /* The 3-DH3 packet has between 2 and 554 information bytes * (including the 2-byte payload header) */ if (!(conn->pkt_type & HCI_3DH3)) chan->imtu = 552; /* The 2-DH5 packet has between 2 and 681 information bytes * (including the 2-byte payload header) */ if (!(conn->pkt_type & HCI_2DH5)) chan->imtu = 679; /* The 3-DH5 packet has between 2 and 1023 information bytes * (including the 2-byte payload header) */ if (!(conn->pkt_type & HCI_3DH5)) chan->imtu = 1021; } static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size) { struct l2cap_conf_req *req = data; struct l2cap_conf_rfc rfc = { .mode = chan->mode }; void *ptr = req->data; void *endptr = data + data_size; u16 size; BT_DBG("chan %p", chan); if (chan->num_conf_req || chan->num_conf_rsp) goto done; switch (chan->mode) { case L2CAP_MODE_STREAMING: case L2CAP_MODE_ERTM: if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) break; if (__l2cap_efs_supported(chan->conn)) set_bit(FLAG_EFS_ENABLE, &chan->flags); fallthrough; default: chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask); break; } done: if (chan->imtu != L2CAP_DEFAULT_MTU) { if (!chan->imtu) l2cap_mtu_auto(chan); l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu, endptr - ptr); } switch (chan->mode) { case L2CAP_MODE_BASIC: if (disable_ertm) break; if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) && !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING)) break; rfc.mode = L2CAP_MODE_BASIC; rfc.txwin_size = 0; rfc.max_transmit = 0; rfc.retrans_timeout = 0; rfc.monitor_timeout = 0; rfc.max_pdu_size = 0; l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc), (unsigned long) &rfc, endptr - ptr); break; case L2CAP_MODE_ERTM: rfc.mode = L2CAP_MODE_ERTM; rfc.max_transmit = chan->max_tx; __l2cap_set_ertm_timeouts(chan, &rfc); size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu - L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE); rfc.max_pdu_size = cpu_to_le16(size); l2cap_txwin_setup(chan); rfc.txwin_size = min_t(u16, chan->tx_win, L2CAP_DEFAULT_TX_WINDOW); l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc), (unsigned long) &rfc, endptr - ptr); if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) l2cap_add_opt_efs(&ptr, chan, endptr - ptr); if (test_bit(FLAG_EXT_CTRL, &chan->flags)) l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2, chan->tx_win, endptr - ptr); if (chan->conn->feat_mask & L2CAP_FEAT_FCS) if (chan->fcs == L2CAP_FCS_NONE || test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) { chan->fcs = L2CAP_FCS_NONE; l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs, endptr - ptr); } break; case L2CAP_MODE_STREAMING: l2cap_txwin_setup(chan); rfc.mode = L2CAP_MODE_STREAMING; rfc.txwin_size = 0; rfc.max_transmit = 0; rfc.retrans_timeout = 0; rfc.monitor_timeout = 0; size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu - L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE); rfc.max_pdu_size = cpu_to_le16(size); l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc), (unsigned long) &rfc, endptr - ptr); if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) l2cap_add_opt_efs(&ptr, chan, endptr - ptr); if (chan->conn->feat_mask & L2CAP_FEAT_FCS) if (chan->fcs == L2CAP_FCS_NONE || test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) { chan->fcs = L2CAP_FCS_NONE; l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs, endptr - ptr); } break; } req->dcid = cpu_to_le16(chan->dcid); req->flags = cpu_to_le16(0); return ptr - data; } static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data, size_t data_size) { struct l2cap_conf_rsp *rsp = data; void *ptr = rsp->data; void *endptr = data + data_size; void *req = chan->conf_req; int len = chan->conf_len; int type, hint, olen; unsigned long val; struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC }; struct l2cap_conf_efs efs; u8 remote_efs = 0; u16 mtu = L2CAP_DEFAULT_MTU; u16 result = L2CAP_CONF_SUCCESS; u16 size; BT_DBG("chan %p", chan); while (len >= L2CAP_CONF_OPT_SIZE) { len -= l2cap_get_conf_opt(&req, &type, &olen, &val); if (len < 0) break; hint = type & L2CAP_CONF_HINT; type &= L2CAP_CONF_MASK; switch (type) { case L2CAP_CONF_MTU: if (olen != 2) break; mtu = val; break; case L2CAP_CONF_FLUSH_TO: if (olen != 2) break; chan->flush_to = val; break; case L2CAP_CONF_QOS: break; case L2CAP_CONF_RFC: if (olen != sizeof(rfc)) break; memcpy(&rfc, (void *) val, olen); break; case L2CAP_CONF_FCS: if (olen != 1) break; if (val == L2CAP_FCS_NONE) set_bit(CONF_RECV_NO_FCS, &chan->conf_state); break; case L2CAP_CONF_EFS: if (olen != sizeof(efs)) break; remote_efs = 1; memcpy(&efs, (void *) val, olen); break; case L2CAP_CONF_EWS: if (olen != 2) break; if (!(chan->conn->local_fixed_chan & L2CAP_FC_A2MP)) return -ECONNREFUSED; set_bit(FLAG_EXT_CTRL, &chan->flags); set_bit(CONF_EWS_RECV, &chan->conf_state); chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW; chan->remote_tx_win = val; break; default: if (hint) break; result = L2CAP_CONF_UNKNOWN; l2cap_add_conf_opt(&ptr, (u8)type, sizeof(u8), type, endptr - ptr); break; } } if (chan->num_conf_rsp || chan->num_conf_req > 1) goto done; switch (chan->mode) { case L2CAP_MODE_STREAMING: case L2CAP_MODE_ERTM: if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) { chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask); break; } if (remote_efs) { if (__l2cap_efs_supported(chan->conn)) set_bit(FLAG_EFS_ENABLE, &chan->flags); else return -ECONNREFUSED; } if (chan->mode != rfc.mode) return -ECONNREFUSED; break; } done: if (chan->mode != rfc.mode) { result = L2CAP_CONF_UNACCEPT; rfc.mode = chan->mode; if (chan->num_conf_rsp == 1) return -ECONNREFUSED; l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc), (unsigned long) &rfc, endptr - ptr); } if (result == L2CAP_CONF_SUCCESS) { /* Configure output options and let the other side know * which ones we don't like. */ if (mtu < L2CAP_DEFAULT_MIN_MTU) result = L2CAP_CONF_UNACCEPT; else { chan->omtu = mtu; set_bit(CONF_MTU_DONE, &chan->conf_state); } l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu, endptr - ptr); if (remote_efs) { if (chan->local_stype != L2CAP_SERV_NOTRAFIC && efs.stype != L2CAP_SERV_NOTRAFIC && efs.stype != chan->local_stype) { result = L2CAP_CONF_UNACCEPT; if (chan->num_conf_req >= 1) return -ECONNREFUSED; l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs), (unsigned long) &efs, endptr - ptr); } else { /* Send PENDING Conf Rsp */ result = L2CAP_CONF_PENDING; set_bit(CONF_LOC_CONF_PEND, &chan->conf_state); } } switch (rfc.mode) { case L2CAP_MODE_BASIC: chan->fcs = L2CAP_FCS_NONE; set_bit(CONF_MODE_DONE, &chan->conf_state); break; case L2CAP_MODE_ERTM: if (!test_bit(CONF_EWS_RECV, &chan->conf_state)) chan->remote_tx_win = rfc.txwin_size; else rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW; chan->remote_max_tx = rfc.max_transmit; size = min_t(u16, le16_to_cpu(rfc.max_pdu_size), chan->conn->mtu - L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE); rfc.max_pdu_size = cpu_to_le16(size); chan->remote_mps = size; __l2cap_set_ertm_timeouts(chan, &rfc); set_bit(CONF_MODE_DONE, &chan->conf_state); l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc), (unsigned long) &rfc, endptr - ptr); if (remote_efs && test_bit(FLAG_EFS_ENABLE, &chan->flags)) { chan->remote_id = efs.id; chan->remote_stype = efs.stype; chan->remote_msdu = le16_to_cpu(efs.msdu); chan->remote_flush_to = le32_to_cpu(efs.flush_to); chan->remote_acc_lat = le32_to_cpu(efs.acc_lat); chan->remote_sdu_itime = le32_to_cpu(efs.sdu_itime); l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs), (unsigned long) &efs, endptr - ptr); } break; case L2CAP_MODE_STREAMING: size = min_t(u16, le16_to_cpu(rfc.max_pdu_size), chan->conn->mtu - L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE); rfc.max_pdu_size = cpu_to_le16(size); chan->remote_mps = size; set_bit(CONF_MODE_DONE, &chan->conf_state); l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc), (unsigned long) &rfc, endptr - ptr); break; default: result = L2CAP_CONF_UNACCEPT; memset(&rfc, 0, sizeof(rfc)); rfc.mode = chan->mode; } if (result == L2CAP_CONF_SUCCESS) set_bit(CONF_OUTPUT_DONE, &chan->conf_state); } rsp->scid = cpu_to_le16(chan->dcid); rsp->result = cpu_to_le16(result); rsp->flags = cpu_to_le16(0); return ptr - data; } static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, void *data, size_t size, u16 *result) { struct l2cap_conf_req *req = data; void *ptr = req->data; void *endptr = data + size; int type, olen; unsigned long val; struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC }; struct l2cap_conf_efs efs; BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data); while (len >= L2CAP_CONF_OPT_SIZE) { len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val); if (len < 0) break; switch (type) { case L2CAP_CONF_MTU: if (olen != 2) break; if (val < L2CAP_DEFAULT_MIN_MTU) { *result = L2CAP_CONF_UNACCEPT; chan->imtu = L2CAP_DEFAULT_MIN_MTU; } else chan->imtu = val; l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu, endptr - ptr); break; case L2CAP_CONF_FLUSH_TO: if (olen != 2) break; chan->flush_to = val; l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2, chan->flush_to, endptr - ptr); break; case L2CAP_CONF_RFC: if (olen != sizeof(rfc)) break; memcpy(&rfc, (void *)val, olen); if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) && rfc.mode != chan->mode) return -ECONNREFUSED; chan->fcs = 0; l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc), (unsigned long) &rfc, endptr - ptr); break; case L2CAP_CONF_EWS: if (olen != 2) break; chan->ack_win = min_t(u16, val, chan->ack_win); l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2, chan->tx_win, endptr - ptr); break; case L2CAP_CONF_EFS: if (olen != sizeof(efs)) break; memcpy(&efs, (void *)val, olen); if (chan->local_stype != L2CAP_SERV_NOTRAFIC && efs.stype != L2CAP_SERV_NOTRAFIC && efs.stype != chan->local_stype) return -ECONNREFUSED; l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs), (unsigned long) &efs, endptr - ptr); break; case L2CAP_CONF_FCS: if (olen != 1) break; if (*result == L2CAP_CONF_PENDING) if (val == L2CAP_FCS_NONE) set_bit(CONF_RECV_NO_FCS, &chan->conf_state); break; } } if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode) return -ECONNREFUSED; chan->mode = rfc.mode; if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) { switch (rfc.mode) { case L2CAP_MODE_ERTM: chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout); chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout); chan->mps = le16_to_cpu(rfc.max_pdu_size); if (!test_bit(FLAG_EXT_CTRL, &chan->flags)) chan->ack_win = min_t(u16, chan->ack_win, rfc.txwin_size); if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) { chan->local_msdu = le16_to_cpu(efs.msdu); chan->local_sdu_itime = le32_to_cpu(efs.sdu_itime); chan->local_acc_lat = le32_to_cpu(efs.acc_lat); chan->local_flush_to = le32_to_cpu(efs.flush_to); } break; case L2CAP_MODE_STREAMING: chan->mps = le16_to_cpu(rfc.max_pdu_size); } } req->dcid = cpu_to_le16(chan->dcid); req->flags = cpu_to_le16(0); return ptr - data; } static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data, u16 result, u16 flags) { struct l2cap_conf_rsp *rsp = data; void *ptr = rsp->data; BT_DBG("chan %p", chan); rsp->scid = cpu_to_le16(chan->dcid); rsp->result = cpu_to_le16(result); rsp->flags = cpu_to_le16(flags); return ptr - data; } void __l2cap_le_connect_rsp_defer(struct l2cap_chan *chan) { struct l2cap_le_conn_rsp rsp; struct l2cap_conn *conn = chan->conn; BT_DBG("chan %p", chan); rsp.dcid = cpu_to_le16(chan->scid); rsp.mtu = cpu_to_le16(chan->imtu); rsp.mps = cpu_to_le16(chan->mps); rsp.credits = cpu_to_le16(chan->rx_credits); rsp.result = cpu_to_le16(L2CAP_CR_LE_SUCCESS); l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp), &rsp); } static void l2cap_ecred_list_defer(struct l2cap_chan *chan, void *data) { int *result = data; if (*result || test_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags)) return; switch (chan->state) { case BT_CONNECT2: /* If channel still pending accept add to result */ (*result)++; return; case BT_CONNECTED: return; default: /* If not connected or pending accept it has been refused */ *result = -ECONNREFUSED; return; } } struct l2cap_ecred_rsp_data { struct { struct l2cap_ecred_conn_rsp rsp; __le16 scid[L2CAP_ECRED_MAX_CID]; } __packed pdu; int count; }; static void l2cap_ecred_rsp_defer(struct l2cap_chan *chan, void *data) { struct l2cap_ecred_rsp_data *rsp = data; if (test_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags)) return; /* Reset ident so only one response is sent */ chan->ident = 0; /* Include all channels pending with the same ident */ if (!rsp->pdu.rsp.result) rsp->pdu.rsp.dcid[rsp->count++] = cpu_to_le16(chan->scid); else l2cap_chan_del(chan, ECONNRESET); } void __l2cap_ecred_conn_rsp_defer(struct l2cap_chan *chan) { struct l2cap_conn *conn = chan->conn; struct l2cap_ecred_rsp_data data; u16 id = chan->ident; int result = 0; if (!id) return; BT_DBG("chan %p id %d", chan, id); memset(&data, 0, sizeof(data)); data.pdu.rsp.mtu = cpu_to_le16(chan->imtu); data.pdu.rsp.mps = cpu_to_le16(chan->mps); data.pdu.rsp.credits = cpu_to_le16(chan->rx_credits); data.pdu.rsp.result = cpu_to_le16(L2CAP_CR_LE_SUCCESS); /* Verify that all channels are ready */ __l2cap_chan_list_id(conn, id, l2cap_ecred_list_defer, &result); if (result > 0) return; if (result < 0) data.pdu.rsp.result = cpu_to_le16(L2CAP_CR_LE_AUTHORIZATION); /* Build response */ __l2cap_chan_list_id(conn, id, l2cap_ecred_rsp_defer, &data); l2cap_send_cmd(conn, id, L2CAP_ECRED_CONN_RSP, sizeof(data.pdu.rsp) + (data.count * sizeof(__le16)), &data.pdu); } void __l2cap_connect_rsp_defer(struct l2cap_chan *chan) { struct l2cap_conn_rsp rsp; struct l2cap_conn *conn = chan->conn; u8 buf[128]; u8 rsp_code; rsp.scid = cpu_to_le16(chan->dcid); rsp.dcid = cpu_to_le16(chan->scid); rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS); rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO); if (chan->hs_hcon) rsp_code = L2CAP_CREATE_CHAN_RSP; else rsp_code = L2CAP_CONN_RSP; BT_DBG("chan %p rsp_code %u", chan, rsp_code); l2cap_send_cmd(conn, chan->ident, rsp_code, sizeof(rsp), &rsp); if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) return; l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ, l2cap_build_conf_req(chan, buf, sizeof(buf)), buf); chan->num_conf_req++; } static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len) { int type, olen; unsigned long val; /* Use sane default values in case a misbehaving remote device * did not send an RFC or extended window size option. */ u16 txwin_ext = chan->ack_win; struct l2cap_conf_rfc rfc = { .mode = chan->mode, .retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO), .monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO), .max_pdu_size = cpu_to_le16(chan->imtu), .txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW), }; BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len); if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING)) return; while (len >= L2CAP_CONF_OPT_SIZE) { len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val); if (len < 0) break; switch (type) { case L2CAP_CONF_RFC: if (olen != sizeof(rfc)) break; memcpy(&rfc, (void *)val, olen); break; case L2CAP_CONF_EWS: if (olen != 2) break; txwin_ext = val; break; } } switch (rfc.mode) { case L2CAP_MODE_ERTM: chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout); chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout); chan->mps = le16_to_cpu(rfc.max_pdu_size); if (test_bit(FLAG_EXT_CTRL, &chan->flags)) chan->ack_win = min_t(u16, chan->ack_win, txwin_ext); else chan->ack_win = min_t(u16, chan->ack_win, rfc.txwin_size); break; case L2CAP_MODE_STREAMING: chan->mps = le16_to_cpu(rfc.max_pdu_size); } } static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data) { struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data; if (cmd_len < sizeof(*rej)) return -EPROTO; if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD) return 0; if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) && cmd->ident == conn->info_ident) { cancel_delayed_work(&conn->info_timer); conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE; conn->info_ident = 0; l2cap_conn_start(conn); } return 0; } static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data, u8 rsp_code, u8 amp_id) { struct l2cap_conn_req *req = (struct l2cap_conn_req *) data; struct l2cap_conn_rsp rsp; struct l2cap_chan *chan = NULL, *pchan; int result, status = L2CAP_CS_NO_INFO; u16 dcid = 0, scid = __le16_to_cpu(req->scid); __le16 psm = req->psm; BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid); /* Check if we have socket listening on psm */ pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src, &conn->hcon->dst, ACL_LINK); if (!pchan) { result = L2CAP_CR_BAD_PSM; goto sendresp; } mutex_lock(&conn->chan_lock); l2cap_chan_lock(pchan); /* Check if the ACL is secure enough (if not SDP) */ if (psm != cpu_to_le16(L2CAP_PSM_SDP) && !hci_conn_check_link_mode(conn->hcon)) { conn->disc_reason = HCI_ERROR_AUTH_FAILURE; result = L2CAP_CR_SEC_BLOCK; goto response; } result = L2CAP_CR_NO_MEM; /* Check for valid dynamic CID range (as per Erratum 3253) */ if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_DYN_END) { result = L2CAP_CR_INVALID_SCID; goto response; } /* Check if we already have channel with that dcid */ if (__l2cap_get_chan_by_dcid(conn, scid)) { result = L2CAP_CR_SCID_IN_USE; goto response; } chan = pchan->ops->new_connection(pchan); if (!chan) goto response; /* For certain devices (ex: HID mouse), support for authentication, * pairing and bonding is optional. For such devices, inorder to avoid * the ACL alive for too long after L2CAP disconnection, reset the ACL * disc_timeout back to HCI_DISCONN_TIMEOUT during L2CAP connect. */ conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT; bacpy(&chan->src, &conn->hcon->src); bacpy(&chan->dst, &conn->hcon->dst); chan->src_type = bdaddr_src_type(conn->hcon); chan->dst_type = bdaddr_dst_type(conn->hcon); chan->psm = psm; chan->dcid = scid; chan->local_amp_id = amp_id; __l2cap_chan_add(conn, chan); dcid = chan->scid; __set_chan_timer(chan, chan->ops->get_sndtimeo(chan)); chan->ident = cmd->ident; if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) { if (l2cap_chan_check_security(chan, false)) { if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) { l2cap_state_change(chan, BT_CONNECT2); result = L2CAP_CR_PEND; status = L2CAP_CS_AUTHOR_PEND; chan->ops->defer(chan); } else { /* Force pending result for AMP controllers. * The connection will succeed after the * physical link is up. */ if (amp_id == AMP_ID_BREDR) { l2cap_state_change(chan, BT_CONFIG); result = L2CAP_CR_SUCCESS; } else { l2cap_state_change(chan, BT_CONNECT2); result = L2CAP_CR_PEND; } status = L2CAP_CS_NO_INFO; } } else { l2cap_state_change(chan, BT_CONNECT2); result = L2CAP_CR_PEND; status = L2CAP_CS_AUTHEN_PEND; } } else { l2cap_state_change(chan, BT_CONNECT2); result = L2CAP_CR_PEND; status = L2CAP_CS_NO_INFO; } response: l2cap_chan_unlock(pchan); mutex_unlock(&conn->chan_lock); l2cap_chan_put(pchan); sendresp: rsp.scid = cpu_to_le16(scid); rsp.dcid = cpu_to_le16(dcid); rsp.result = cpu_to_le16(result); rsp.status = cpu_to_le16(status); l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp); if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) { struct l2cap_info_req info; info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK); conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT; conn->info_ident = l2cap_get_ident(conn); schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT); l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ, sizeof(info), &info); } if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) && result == L2CAP_CR_SUCCESS) { u8 buf[128]; set_bit(CONF_REQ_SENT, &chan->conf_state); l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ, l2cap_build_conf_req(chan, buf, sizeof(buf)), buf); chan->num_conf_req++; } return chan; } static int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data) { struct hci_dev *hdev = conn->hcon->hdev; struct hci_conn *hcon = conn->hcon; if (cmd_len < sizeof(struct l2cap_conn_req)) return -EPROTO; hci_dev_lock(hdev); if (hci_dev_test_flag(hdev, HCI_MGMT) && !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &hcon->flags)) mgmt_device_connected(hdev, hcon, NULL, 0); hci_dev_unlock(hdev); l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP, 0); return 0; } static int l2cap_connect_create_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data) { struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data; u16 scid, dcid, result, status; struct l2cap_chan *chan; u8 req[128]; int err; if (cmd_len < sizeof(*rsp)) return -EPROTO; scid = __le16_to_cpu(rsp->scid); dcid = __le16_to_cpu(rsp->dcid); result = __le16_to_cpu(rsp->result); status = __le16_to_cpu(rsp->status); if (result == L2CAP_CR_SUCCESS && (dcid < L2CAP_CID_DYN_START || dcid > L2CAP_CID_DYN_END)) return -EPROTO; BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status); mutex_lock(&conn->chan_lock); if (scid) { chan = __l2cap_get_chan_by_scid(conn, scid); if (!chan) { err = -EBADSLT; goto unlock; } } else { chan = __l2cap_get_chan_by_ident(conn, cmd->ident); if (!chan) { err = -EBADSLT; goto unlock; } } chan = l2cap_chan_hold_unless_zero(chan); if (!chan) { err = -EBADSLT; goto unlock; } err = 0; l2cap_chan_lock(chan); switch (result) { case L2CAP_CR_SUCCESS: if (__l2cap_get_chan_by_dcid(conn, dcid)) { err = -EBADSLT; break; } l2cap_state_change(chan, BT_CONFIG); chan->ident = 0; chan->dcid = dcid; clear_bit(CONF_CONNECT_PEND, &chan->conf_state); if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) break; l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ, l2cap_build_conf_req(chan, req, sizeof(req)), req); chan->num_conf_req++; break; case L2CAP_CR_PEND: set_bit(CONF_CONNECT_PEND, &chan->conf_state); break; default: l2cap_chan_del(chan, ECONNREFUSED); break; } l2cap_chan_unlock(chan); l2cap_chan_put(chan); unlock: mutex_unlock(&conn->chan_lock); return err; } static inline void set_default_fcs(struct l2cap_chan *chan) { /* FCS is enabled only in ERTM or streaming mode, if one or both * sides request it. */ if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING) chan->fcs = L2CAP_FCS_NONE; else if (!test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) chan->fcs = L2CAP_FCS_CRC16; } static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data, u8 ident, u16 flags) { struct l2cap_conn *conn = chan->conn; BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn, chan, ident, flags); clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state); set_bit(CONF_OUTPUT_DONE, &chan->conf_state); l2cap_send_cmd(conn, ident, L2CAP_CONF_RSP, l2cap_build_conf_rsp(chan, data, L2CAP_CONF_SUCCESS, flags), data); } static void cmd_reject_invalid_cid(struct l2cap_conn *conn, u8 ident, u16 scid, u16 dcid) { struct l2cap_cmd_rej_cid rej; rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID); rej.scid = __cpu_to_le16(scid); rej.dcid = __cpu_to_le16(dcid); l2cap_send_cmd(conn, ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej); } static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data) { struct l2cap_conf_req *req = (struct l2cap_conf_req *) data; u16 dcid, flags; u8 rsp[64]; struct l2cap_chan *chan; int len, err = 0; if (cmd_len < sizeof(*req)) return -EPROTO; dcid = __le16_to_cpu(req->dcid); flags = __le16_to_cpu(req->flags); BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags); chan = l2cap_get_chan_by_scid(conn, dcid); if (!chan) { cmd_reject_invalid_cid(conn, cmd->ident, dcid, 0); return 0; } if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2 && chan->state != BT_CONNECTED) { cmd_reject_invalid_cid(conn, cmd->ident, chan->scid, chan->dcid); goto unlock; } /* Reject if config buffer is too small. */ len = cmd_len - sizeof(*req); if (chan->conf_len + len > sizeof(chan->conf_req)) { l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, l2cap_build_conf_rsp(chan, rsp, L2CAP_CONF_REJECT, flags), rsp); goto unlock; } /* Store config. */ memcpy(chan->conf_req + chan->conf_len, req->data, len); chan->conf_len += len; if (flags & L2CAP_CONF_FLAG_CONTINUATION) { /* Incomplete config. Send empty response. */ l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, l2cap_build_conf_rsp(chan, rsp, L2CAP_CONF_SUCCESS, flags), rsp); goto unlock; } /* Complete config. */ len = l2cap_parse_conf_req(chan, rsp, sizeof(rsp)); if (len < 0) { l2cap_send_disconn_req(chan, ECONNRESET); goto unlock; } chan->ident = cmd->ident; l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp); if (chan->num_conf_rsp < L2CAP_CONF_MAX_CONF_RSP) chan->num_conf_rsp++; /* Reset config buffer. */ chan->conf_len = 0; if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) goto unlock; if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) { set_default_fcs(chan); if (chan->mode == L2CAP_MODE_ERTM || chan->mode == L2CAP_MODE_STREAMING) err = l2cap_ertm_init(chan); if (err < 0) l2cap_send_disconn_req(chan, -err); else l2cap_chan_ready(chan); goto unlock; } if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) { u8 buf[64]; l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ, l2cap_build_conf_req(chan, buf, sizeof(buf)), buf); chan->num_conf_req++; } /* Got Conf Rsp PENDING from remote side and assume we sent Conf Rsp PENDING in the code above */ if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) && test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) { /* check compatibility */ /* Send rsp for BR/EDR channel */ if (!chan->hs_hcon) l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags); else chan->ident = cmd->ident; } unlock: l2cap_chan_unlock(chan); l2cap_chan_put(chan); return err; } static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data) { struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data; u16 scid, flags, result; struct l2cap_chan *chan; int len = cmd_len - sizeof(*rsp); int err = 0; if (cmd_len < sizeof(*rsp)) return -EPROTO; scid = __le16_to_cpu(rsp->scid); flags = __le16_to_cpu(rsp->flags); result = __le16_to_cpu(rsp->result); BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags, result, len); chan = l2cap_get_chan_by_scid(conn, scid); if (!chan) return 0; switch (result) { case L2CAP_CONF_SUCCESS: l2cap_conf_rfc_get(chan, rsp->data, len); clear_bit(CONF_REM_CONF_PEND, &chan->conf_state); break; case L2CAP_CONF_PENDING: set_bit(CONF_REM_CONF_PEND, &chan->conf_state); if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) { char buf[64]; len = l2cap_parse_conf_rsp(chan, rsp->data, len, buf, sizeof(buf), &result); if (len < 0) { l2cap_send_disconn_req(chan, ECONNRESET); goto done; } if (!chan->hs_hcon) { l2cap_send_efs_conf_rsp(chan, buf, cmd->ident, 0); } else { if (l2cap_check_efs(chan)) { amp_create_logical_link(chan); chan->ident = cmd->ident; } } } goto done; case L2CAP_CONF_UNKNOWN: case L2CAP_CONF_UNACCEPT: if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) { char req[64]; if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) { l2cap_send_disconn_req(chan, ECONNRESET); goto done; } /* throw out any old stored conf requests */ result = L2CAP_CONF_SUCCESS; len = l2cap_parse_conf_rsp(chan, rsp->data, len, req, sizeof(req), &result); if (len < 0) { l2cap_send_disconn_req(chan, ECONNRESET); goto done; } l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ, len, req); chan->num_conf_req++; if (result != L2CAP_CONF_SUCCESS) goto done; break; } fallthrough; default: l2cap_chan_set_err(chan, ECONNRESET); __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT); l2cap_send_disconn_req(chan, ECONNRESET); goto done; } if (flags & L2CAP_CONF_FLAG_CONTINUATION) goto done; set_bit(CONF_INPUT_DONE, &chan->conf_state); if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) { set_default_fcs(chan); if (chan->mode == L2CAP_MODE_ERTM || chan->mode == L2CAP_MODE_STREAMING) err = l2cap_ertm_init(chan); if (err < 0) l2cap_send_disconn_req(chan, -err); else l2cap_chan_ready(chan); } done: l2cap_chan_unlock(chan); l2cap_chan_put(chan); return err; } static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data) { struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data; struct l2cap_disconn_rsp rsp; u16 dcid, scid; struct l2cap_chan *chan; if (cmd_len != sizeof(*req)) return -EPROTO; scid = __le16_to_cpu(req->scid); dcid = __le16_to_cpu(req->dcid); BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid); chan = l2cap_get_chan_by_scid(conn, dcid); if (!chan) { cmd_reject_invalid_cid(conn, cmd->ident, dcid, scid); return 0; } rsp.dcid = cpu_to_le16(chan->scid); rsp.scid = cpu_to_le16(chan->dcid); l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp); chan->ops->set_shutdown(chan); l2cap_chan_unlock(chan); mutex_lock(&conn->chan_lock); l2cap_chan_lock(chan); l2cap_chan_del(chan, ECONNRESET); mutex_unlock(&conn->chan_lock); chan->ops->close(chan); l2cap_chan_unlock(chan); l2cap_chan_put(chan); return 0; } static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data) { struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data; u16 dcid, scid; struct l2cap_chan *chan; if (cmd_len != sizeof(*rsp)) return -EPROTO; scid = __le16_to_cpu(rsp->scid); dcid = __le16_to_cpu(rsp->dcid); BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid); chan = l2cap_get_chan_by_scid(conn, scid); if (!chan) { return 0; } if (chan->state != BT_DISCONN) { l2cap_chan_unlock(chan); l2cap_chan_put(chan); return 0; } l2cap_chan_unlock(chan); mutex_lock(&conn->chan_lock); l2cap_chan_lock(chan); l2cap_chan_del(chan, 0); mutex_unlock(&conn->chan_lock); chan->ops->close(chan); l2cap_chan_unlock(chan); l2cap_chan_put(chan); return 0; } static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data) { struct l2cap_info_req *req = (struct l2cap_info_req *) data; u16 type; if (cmd_len != sizeof(*req)) return -EPROTO; type = __le16_to_cpu(req->type); BT_DBG("type 0x%4.4x", type); if (type == L2CAP_IT_FEAT_MASK) { u8 buf[8]; u32 feat_mask = l2cap_feat_mask; struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf; rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK); rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS); if (!disable_ertm) feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING | L2CAP_FEAT_FCS; if (conn->local_fixed_chan & L2CAP_FC_A2MP) feat_mask |= L2CAP_FEAT_EXT_FLOW | L2CAP_FEAT_EXT_WINDOW; put_unaligned_le32(feat_mask, rsp->data); l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf), buf); } else if (type == L2CAP_IT_FIXED_CHAN) { u8 buf[12]; struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf; rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN); rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS); rsp->data[0] = conn->local_fixed_chan; memset(rsp->data + 1, 0, 7); l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf), buf); } else { struct l2cap_info_rsp rsp; rsp.type = cpu_to_le16(type); rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP); l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp), &rsp); } return 0; } static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data) { struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data; u16 type, result; if (cmd_len < sizeof(*rsp)) return -EPROTO; type = __le16_to_cpu(rsp->type); result = __le16_to_cpu(rsp->result); BT_DBG("type 0x%4.4x result 0x%2.2x", type, result); /* L2CAP Info req/rsp are unbound to channels, add extra checks */ if (cmd->ident != conn->info_ident || conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) return 0; cancel_delayed_work(&conn->info_timer); if (result != L2CAP_IR_SUCCESS) { conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE; conn->info_ident = 0; l2cap_conn_start(conn); return 0; } switch (type) { case L2CAP_IT_FEAT_MASK: conn->feat_mask = get_unaligned_le32(rsp->data); if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) { struct l2cap_info_req req; req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN); conn->info_ident = l2cap_get_ident(conn); l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ, sizeof(req), &req); } else { conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE; conn->info_ident = 0; l2cap_conn_start(conn); } break; case L2CAP_IT_FIXED_CHAN: conn->remote_fixed_chan = rsp->data[0]; conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE; conn->info_ident = 0; l2cap_conn_start(conn); break; } return 0; } static int l2cap_create_channel_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data) { struct l2cap_create_chan_req *req = data; struct l2cap_create_chan_rsp rsp; struct l2cap_chan *chan; struct hci_dev *hdev; u16 psm, scid; if (cmd_len != sizeof(*req)) return -EPROTO; if (!(conn->local_fixed_chan & L2CAP_FC_A2MP)) return -EINVAL; psm = le16_to_cpu(req->psm); scid = le16_to_cpu(req->scid); BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id); /* For controller id 0 make BR/EDR connection */ if (req->amp_id == AMP_ID_BREDR) { l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP, req->amp_id); return 0; } /* Validate AMP controller id */ hdev = hci_dev_get(req->amp_id); if (!hdev) goto error; if (hdev->dev_type != HCI_AMP || !test_bit(HCI_UP, &hdev->flags)) { hci_dev_put(hdev); goto error; } chan = l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP, req->amp_id); if (chan) { struct amp_mgr *mgr = conn->hcon->amp_mgr; struct hci_conn *hs_hcon; hs_hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK, &conn->hcon->dst); if (!hs_hcon) { hci_dev_put(hdev); cmd_reject_invalid_cid(conn, cmd->ident, chan->scid, chan->dcid); return 0; } BT_DBG("mgr %p bredr_chan %p hs_hcon %p", mgr, chan, hs_hcon); mgr->bredr_chan = chan; chan->hs_hcon = hs_hcon; chan->fcs = L2CAP_FCS_NONE; conn->mtu = hdev->block_mtu; } hci_dev_put(hdev); return 0; error: rsp.dcid = 0; rsp.scid = cpu_to_le16(scid); rsp.result = cpu_to_le16(L2CAP_CR_BAD_AMP); rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO); l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP, sizeof(rsp), &rsp); return 0; } static void l2cap_send_move_chan_req(struct l2cap_chan *chan, u8 dest_amp_id) { struct l2cap_move_chan_req req; u8 ident; BT_DBG("chan %p, dest_amp_id %d", chan, dest_amp_id); ident = l2cap_get_ident(chan->conn); chan->ident = ident; req.icid = cpu_to_le16(chan->scid); req.dest_amp_id = dest_amp_id; l2cap_send_cmd(chan->conn, ident, L2CAP_MOVE_CHAN_REQ, sizeof(req), &req); __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT); } static void l2cap_send_move_chan_rsp(struct l2cap_chan *chan, u16 result) { struct l2cap_move_chan_rsp rsp; BT_DBG("chan %p, result 0x%4.4x", chan, result); rsp.icid = cpu_to_le16(chan->dcid); rsp.result = cpu_to_le16(result); l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_RSP, sizeof(rsp), &rsp); } static void l2cap_send_move_chan_cfm(struct l2cap_chan *chan, u16 result) { struct l2cap_move_chan_cfm cfm; BT_DBG("chan %p, result 0x%4.4x", chan, result); chan->ident = l2cap_get_ident(chan->conn); cfm.icid = cpu_to_le16(chan->scid); cfm.result = cpu_to_le16(result); l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_CFM, sizeof(cfm), &cfm); __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT); } static void l2cap_send_move_chan_cfm_icid(struct l2cap_conn *conn, u16 icid) { struct l2cap_move_chan_cfm cfm; BT_DBG("conn %p, icid 0x%4.4x", conn, icid); cfm.icid = cpu_to_le16(icid); cfm.result = cpu_to_le16(L2CAP_MC_UNCONFIRMED); l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_MOVE_CHAN_CFM, sizeof(cfm), &cfm); } static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident, u16 icid) { struct l2cap_move_chan_cfm_rsp rsp; BT_DBG("icid 0x%4.4x", icid); rsp.icid = cpu_to_le16(icid); l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp); } static void __release_logical_link(struct l2cap_chan *chan) { chan->hs_hchan = NULL; chan->hs_hcon = NULL; /* Placeholder - release the logical link */ } static void l2cap_logical_fail(struct l2cap_chan *chan) { /* Logical link setup failed */ if (chan->state != BT_CONNECTED) { /* Create channel failure, disconnect */ l2cap_send_disconn_req(chan, ECONNRESET); return; } switch (chan->move_role) { case L2CAP_MOVE_ROLE_RESPONDER: l2cap_move_done(chan); l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_SUPP); break; case L2CAP_MOVE_ROLE_INITIATOR: if (chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_COMP || chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_CFM) { /* Remote has only sent pending or * success responses, clean up */ l2cap_move_done(chan); } /* Other amp move states imply that the move * has already aborted */ l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED); break; } } static void l2cap_logical_finish_create(struct l2cap_chan *chan, struct hci_chan *hchan) { struct l2cap_conf_rsp rsp; chan->hs_hchan = hchan; chan->hs_hcon->l2cap_data = chan->conn; l2cap_send_efs_conf_rsp(chan, &rsp, chan->ident, 0); if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) { int err; set_default_fcs(chan); err = l2cap_ertm_init(chan); if (err < 0) l2cap_send_disconn_req(chan, -err); else l2cap_chan_ready(chan); } } static void l2cap_logical_finish_move(struct l2cap_chan *chan, struct hci_chan *hchan) { chan->hs_hcon = hchan->conn; chan->hs_hcon->l2cap_data = chan->conn; BT_DBG("move_state %d", chan->move_state); switch (chan->move_state) { case L2CAP_MOVE_WAIT_LOGICAL_COMP: /* Move confirm will be sent after a success * response is received */ chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS; break; case L2CAP_MOVE_WAIT_LOGICAL_CFM: if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) { chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY; } else if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) { chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP; l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED); } else if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) { chan->move_state = L2CAP_MOVE_WAIT_CONFIRM; l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS); } break; default: /* Move was not in expected state, free the channel */ __release_logical_link(chan); chan->move_state = L2CAP_MOVE_STABLE; } } /* Call with chan locked */ void l2cap_logical_cfm(struct l2cap_chan *chan, struct hci_chan *hchan, u8 status) { BT_DBG("chan %p, hchan %p, status %d", chan, hchan, status); if (status) { l2cap_logical_fail(chan); __release_logical_link(chan); return; } if (chan->state != BT_CONNECTED) { /* Ignore logical link if channel is on BR/EDR */ if (chan->local_amp_id != AMP_ID_BREDR) l2cap_logical_finish_create(chan, hchan); } else { l2cap_logical_finish_move(chan, hchan); } } void l2cap_move_start(struct l2cap_chan *chan) { BT_DBG("chan %p", chan); if (chan->local_amp_id == AMP_ID_BREDR) { if (chan->chan_policy != BT_CHANNEL_POLICY_AMP_PREFERRED) return; chan->move_role = L2CAP_MOVE_ROLE_INITIATOR; chan->move_state = L2CAP_MOVE_WAIT_PREPARE; /* Placeholder - start physical link setup */ } else { chan->move_role = L2CAP_MOVE_ROLE_INITIATOR; chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS; chan->move_id = 0; l2cap_move_setup(chan); l2cap_send_move_chan_req(chan, 0); } } static void l2cap_do_create(struct l2cap_chan *chan, int result, u8 local_amp_id, u8 remote_amp_id) { BT_DBG("chan %p state %s %u -> %u", chan, state_to_string(chan->state), local_amp_id, remote_amp_id); chan->fcs = L2CAP_FCS_NONE; /* Outgoing channel on AMP */ if (chan->state == BT_CONNECT) { if (result == L2CAP_CR_SUCCESS) { chan->local_amp_id = local_amp_id; l2cap_send_create_chan_req(chan, remote_amp_id); } else { /* Revert to BR/EDR connect */ l2cap_send_conn_req(chan); } return; } /* Incoming channel on AMP */ if (__l2cap_no_conn_pending(chan)) { struct l2cap_conn_rsp rsp; char buf[128]; rsp.scid = cpu_to_le16(chan->dcid); rsp.dcid = cpu_to_le16(chan->scid); if (result == L2CAP_CR_SUCCESS) { /* Send successful response */ rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS); rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO); } else { /* Send negative response */ rsp.result = cpu_to_le16(L2CAP_CR_NO_MEM); rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO); } l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_RSP, sizeof(rsp), &rsp); if (result == L2CAP_CR_SUCCESS) { l2cap_state_change(chan, BT_CONFIG); set_bit(CONF_REQ_SENT, &chan->conf_state); l2cap_send_cmd(chan->conn, l2cap_get_ident(chan->conn), L2CAP_CONF_REQ, l2cap_build_conf_req(chan, buf, sizeof(buf)), buf); chan->num_conf_req++; } } } static void l2cap_do_move_initiate(struct l2cap_chan *chan, u8 local_amp_id, u8 remote_amp_id) { l2cap_move_setup(chan); chan->move_id = local_amp_id; chan->move_state = L2CAP_MOVE_WAIT_RSP; l2cap_send_move_chan_req(chan, remote_amp_id); } static void l2cap_do_move_respond(struct l2cap_chan *chan, int result) { struct hci_chan *hchan = NULL; /* Placeholder - get hci_chan for logical link */ if (hchan) { if (hchan->state == BT_CONNECTED) { /* Logical link is ready to go */ chan->hs_hcon = hchan->conn; chan->hs_hcon->l2cap_data = chan->conn; chan->move_state = L2CAP_MOVE_WAIT_CONFIRM; l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS); l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS); } else { /* Wait for logical link to be ready */ chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM; } } else { /* Logical link not available */ l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_ALLOWED); } } static void l2cap_do_move_cancel(struct l2cap_chan *chan, int result) { if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) { u8 rsp_result; if (result == -EINVAL) rsp_result = L2CAP_MR_BAD_ID; else rsp_result = L2CAP_MR_NOT_ALLOWED; l2cap_send_move_chan_rsp(chan, rsp_result); } chan->move_role = L2CAP_MOVE_ROLE_NONE; chan->move_state = L2CAP_MOVE_STABLE; /* Restart data transmission */ l2cap_ertm_send(chan); } /* Invoke with locked chan */ void __l2cap_physical_cfm(struct l2cap_chan *chan, int result) { u8 local_amp_id = chan->local_amp_id; u8 remote_amp_id = chan->remote_amp_id; BT_DBG("chan %p, result %d, local_amp_id %d, remote_amp_id %d", chan, result, local_amp_id, remote_amp_id); if (chan->state == BT_DISCONN || chan->state == BT_CLOSED) return; if (chan->state != BT_CONNECTED) { l2cap_do_create(chan, result, local_amp_id, remote_amp_id); } else if (result != L2CAP_MR_SUCCESS) { l2cap_do_move_cancel(chan, result); } else { switch (chan->move_role) { case L2CAP_MOVE_ROLE_INITIATOR: l2cap_do_move_initiate(chan, local_amp_id, remote_amp_id); break; case L2CAP_MOVE_ROLE_RESPONDER: l2cap_do_move_respond(chan, result); break; default: l2cap_do_move_cancel(chan, result); break; } } } static inline int l2cap_move_channel_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data) { struct l2cap_move_chan_req *req = data; struct l2cap_move_chan_rsp rsp; struct l2cap_chan *chan; u16 icid = 0; u16 result = L2CAP_MR_NOT_ALLOWED; if (cmd_len != sizeof(*req)) return -EPROTO; icid = le16_to_cpu(req->icid); BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid, req->dest_amp_id); if (!(conn->local_fixed_chan & L2CAP_FC_A2MP)) return -EINVAL; chan = l2cap_get_chan_by_dcid(conn, icid); if (!chan) { rsp.icid = cpu_to_le16(icid); rsp.result = cpu_to_le16(L2CAP_MR_NOT_ALLOWED); l2cap_send_cmd(conn, cmd->ident, L2CAP_MOVE_CHAN_RSP, sizeof(rsp), &rsp); return 0; } chan->ident = cmd->ident; if (chan->scid < L2CAP_CID_DYN_START || chan->chan_policy == BT_CHANNEL_POLICY_BREDR_ONLY || (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)) { result = L2CAP_MR_NOT_ALLOWED; goto send_move_response; } if (chan->local_amp_id == req->dest_amp_id) { result = L2CAP_MR_SAME_ID; goto send_move_response; } if (req->dest_amp_id != AMP_ID_BREDR) { struct hci_dev *hdev; hdev = hci_dev_get(req->dest_amp_id); if (!hdev || hdev->dev_type != HCI_AMP || !test_bit(HCI_UP, &hdev->flags)) { if (hdev) hci_dev_put(hdev); result = L2CAP_MR_BAD_ID; goto send_move_response; } hci_dev_put(hdev); } /* Detect a move collision. Only send a collision response * if this side has "lost", otherwise proceed with the move. * The winner has the larger bd_addr. */ if ((__chan_is_moving(chan) || chan->move_role != L2CAP_MOVE_ROLE_NONE) && bacmp(&conn->hcon->src, &conn->hcon->dst) > 0) { result = L2CAP_MR_COLLISION; goto send_move_response; } chan->move_role = L2CAP_MOVE_ROLE_RESPONDER; l2cap_move_setup(chan); chan->move_id = req->dest_amp_id; if (req->dest_amp_id == AMP_ID_BREDR) { /* Moving to BR/EDR */ if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) { chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY; result = L2CAP_MR_PEND; } else { chan->move_state = L2CAP_MOVE_WAIT_CONFIRM; result = L2CAP_MR_SUCCESS; } } else { chan->move_state = L2CAP_MOVE_WAIT_PREPARE; /* Placeholder - uncomment when amp functions are available */ /*amp_accept_physical(chan, req->dest_amp_id);*/ result = L2CAP_MR_PEND; } send_move_response: l2cap_send_move_chan_rsp(chan, result); l2cap_chan_unlock(chan); l2cap_chan_put(chan); return 0; } static void l2cap_move_continue(struct l2cap_conn *conn, u16 icid, u16 result) { struct l2cap_chan *chan; struct hci_chan *hchan = NULL; chan = l2cap_get_chan_by_scid(conn, icid); if (!chan) { l2cap_send_move_chan_cfm_icid(conn, icid); return; } __clear_chan_timer(chan); if (result == L2CAP_MR_PEND) __set_chan_timer(chan, L2CAP_MOVE_ERTX_TIMEOUT); switch (chan->move_state) { case L2CAP_MOVE_WAIT_LOGICAL_COMP: /* Move confirm will be sent when logical link * is complete. */ chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM; break; case L2CAP_MOVE_WAIT_RSP_SUCCESS: if (result == L2CAP_MR_PEND) { break; } else if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) { chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY; } else { /* Logical link is up or moving to BR/EDR, * proceed with move */ chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP; l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED); } break; case L2CAP_MOVE_WAIT_RSP: /* Moving to AMP */ if (result == L2CAP_MR_SUCCESS) { /* Remote is ready, send confirm immediately * after logical link is ready */ chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM; } else { /* Both logical link and move success * are required to confirm */ chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_COMP; } /* Placeholder - get hci_chan for logical link */ if (!hchan) { /* Logical link not available */ l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED); break; } /* If the logical link is not yet connected, do not * send confirmation. */ if (hchan->state != BT_CONNECTED) break; /* Logical link is already ready to go */ chan->hs_hcon = hchan->conn; chan->hs_hcon->l2cap_data = chan->conn; if (result == L2CAP_MR_SUCCESS) { /* Can confirm now */ l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED); } else { /* Now only need move success * to confirm */ chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS; } l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS); break; default: /* Any other amp move state means the move failed. */ chan->move_id = chan->local_amp_id; l2cap_move_done(chan); l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED); } l2cap_chan_unlock(chan); l2cap_chan_put(chan); } static void l2cap_move_fail(struct l2cap_conn *conn, u8 ident, u16 icid, u16 result) { struct l2cap_chan *chan; chan = l2cap_get_chan_by_ident(conn, ident); if (!chan) { /* Could not locate channel, icid is best guess */ l2cap_send_move_chan_cfm_icid(conn, icid); return; } __clear_chan_timer(chan); if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) { if (result == L2CAP_MR_COLLISION) { chan->move_role = L2CAP_MOVE_ROLE_RESPONDER; } else { /* Cleanup - cancel move */ chan->move_id = chan->local_amp_id; l2cap_move_done(chan); } } l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED); l2cap_chan_unlock(chan); l2cap_chan_put(chan); } static int l2cap_move_channel_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data) { struct l2cap_move_chan_rsp *rsp = data; u16 icid, result; if (cmd_len != sizeof(*rsp)) return -EPROTO; icid = le16_to_cpu(rsp->icid); result = le16_to_cpu(rsp->result); BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result); if (result == L2CAP_MR_SUCCESS || result == L2CAP_MR_PEND) l2cap_move_continue(conn, icid, result); else l2cap_move_fail(conn, cmd->ident, icid, result); return 0; } static int l2cap_move_channel_confirm(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data) { struct l2cap_move_chan_cfm *cfm = data; struct l2cap_chan *chan; u16 icid, result; if (cmd_len != sizeof(*cfm)) return -EPROTO; icid = le16_to_cpu(cfm->icid); result = le16_to_cpu(cfm->result); BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result); chan = l2cap_get_chan_by_dcid(conn, icid); if (!chan) { /* Spec requires a response even if the icid was not found */ l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid); return 0; } if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM) { if (result == L2CAP_MC_CONFIRMED) { chan->local_amp_id = chan->move_id; if (chan->local_amp_id == AMP_ID_BREDR) __release_logical_link(chan); } else { chan->move_id = chan->local_amp_id; } l2cap_move_done(chan); } l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid); l2cap_chan_unlock(chan); l2cap_chan_put(chan); return 0; } static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data) { struct l2cap_move_chan_cfm_rsp *rsp = data; struct l2cap_chan *chan; u16 icid; if (cmd_len != sizeof(*rsp)) return -EPROTO; icid = le16_to_cpu(rsp->icid); BT_DBG("icid 0x%4.4x", icid); chan = l2cap_get_chan_by_scid(conn, icid); if (!chan) return 0; __clear_chan_timer(chan); if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM_RSP) { chan->local_amp_id = chan->move_id; if (chan->local_amp_id == AMP_ID_BREDR && chan->hs_hchan) __release_logical_link(chan); l2cap_move_done(chan); } l2cap_chan_unlock(chan); l2cap_chan_put(chan); return 0; } static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data) { struct hci_conn *hcon = conn->hcon; struct l2cap_conn_param_update_req *req; struct l2cap_conn_param_update_rsp rsp; u16 min, max, latency, to_multiplier; int err; if (hcon->role != HCI_ROLE_MASTER) return -EINVAL; if (cmd_len != sizeof(struct l2cap_conn_param_update_req)) return -EPROTO; req = (struct l2cap_conn_param_update_req *) data; min = __le16_to_cpu(req->min); max = __le16_to_cpu(req->max); latency = __le16_to_cpu(req->latency); to_multiplier = __le16_to_cpu(req->to_multiplier); BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x", min, max, latency, to_multiplier); memset(&rsp, 0, sizeof(rsp)); err = hci_check_conn_params(min, max, latency, to_multiplier); if (err) rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED); else rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED); l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP, sizeof(rsp), &rsp); if (!err) { u8 store_hint; store_hint = hci_le_conn_update(hcon, min, max, latency, to_multiplier); mgmt_new_conn_param(hcon->hdev, &hcon->dst, hcon->dst_type, store_hint, min, max, latency, to_multiplier); } return 0; } static int l2cap_le_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data) { struct l2cap_le_conn_rsp *rsp = (struct l2cap_le_conn_rsp *) data; struct hci_conn *hcon = conn->hcon; u16 dcid, mtu, mps, credits, result; struct l2cap_chan *chan; int err, sec_level; if (cmd_len < sizeof(*rsp)) return -EPROTO; dcid = __le16_to_cpu(rsp->dcid); mtu = __le16_to_cpu(rsp->mtu); mps = __le16_to_cpu(rsp->mps); credits = __le16_to_cpu(rsp->credits); result = __le16_to_cpu(rsp->result); if (result == L2CAP_CR_LE_SUCCESS && (mtu < 23 || mps < 23 || dcid < L2CAP_CID_DYN_START || dcid > L2CAP_CID_LE_DYN_END)) return -EPROTO; BT_DBG("dcid 0x%4.4x mtu %u mps %u credits %u result 0x%2.2x", dcid, mtu, mps, credits, result); mutex_lock(&conn->chan_lock); chan = __l2cap_get_chan_by_ident(conn, cmd->ident); if (!chan) { err = -EBADSLT; goto unlock; } err = 0; l2cap_chan_lock(chan); switch (result) { case L2CAP_CR_LE_SUCCESS: if (__l2cap_get_chan_by_dcid(conn, dcid)) { err = -EBADSLT; break; } chan->ident = 0; chan->dcid = dcid; chan->omtu = mtu; chan->remote_mps = mps; chan->tx_credits = credits; l2cap_chan_ready(chan); break; case L2CAP_CR_LE_AUTHENTICATION: case L2CAP_CR_LE_ENCRYPTION: /* If we already have MITM protection we can't do * anything. */ if (hcon->sec_level > BT_SECURITY_MEDIUM) { l2cap_chan_del(chan, ECONNREFUSED); break; } sec_level = hcon->sec_level + 1; if (chan->sec_level < sec_level) chan->sec_level = sec_level; /* We'll need to send a new Connect Request */ clear_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags); smp_conn_security(hcon, chan->sec_level); break; default: l2cap_chan_del(chan, ECONNREFUSED); break; } l2cap_chan_unlock(chan); unlock: mutex_unlock(&conn->chan_lock); return err; } static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data) { int err = 0; switch (cmd->code) { case L2CAP_COMMAND_REJ: l2cap_command_rej(conn, cmd, cmd_len, data); break; case L2CAP_CONN_REQ: err = l2cap_connect_req(conn, cmd, cmd_len, data); break; case L2CAP_CONN_RSP: case L2CAP_CREATE_CHAN_RSP: l2cap_connect_create_rsp(conn, cmd, cmd_len, data); break; case L2CAP_CONF_REQ: err = l2cap_config_req(conn, cmd, cmd_len, data); break; case L2CAP_CONF_RSP: l2cap_config_rsp(conn, cmd, cmd_len, data); break; case L2CAP_DISCONN_REQ: err = l2cap_disconnect_req(conn, cmd, cmd_len, data); break; case L2CAP_DISCONN_RSP: l2cap_disconnect_rsp(conn, cmd, cmd_len, data); break; case L2CAP_ECHO_REQ: l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data); break; case L2CAP_ECHO_RSP: break; case L2CAP_INFO_REQ: err = l2cap_information_req(conn, cmd, cmd_len, data); break; case L2CAP_INFO_RSP: l2cap_information_rsp(conn, cmd, cmd_len, data); break; case L2CAP_CREATE_CHAN_REQ: err = l2cap_create_channel_req(conn, cmd, cmd_len, data); break; case L2CAP_MOVE_CHAN_REQ: err = l2cap_move_channel_req(conn, cmd, cmd_len, data); break; case L2CAP_MOVE_CHAN_RSP: l2cap_move_channel_rsp(conn, cmd, cmd_len, data); break; case L2CAP_MOVE_CHAN_CFM: err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data); break; case L2CAP_MOVE_CHAN_CFM_RSP: l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data); break; default: BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code); err = -EINVAL; break; } return err; } static int l2cap_le_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data) { struct l2cap_le_conn_req *req = (struct l2cap_le_conn_req *) data; struct l2cap_le_conn_rsp rsp; struct l2cap_chan *chan, *pchan; u16 dcid, scid, credits, mtu, mps; __le16 psm; u8 result; if (cmd_len != sizeof(*req)) return -EPROTO; scid = __le16_to_cpu(req->scid); mtu = __le16_to_cpu(req->mtu); mps = __le16_to_cpu(req->mps); psm = req->psm; dcid = 0; credits = 0; if (mtu < 23 || mps < 23) return -EPROTO; BT_DBG("psm 0x%2.2x scid 0x%4.4x mtu %u mps %u", __le16_to_cpu(psm), scid, mtu, mps); /* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 3, Part A * page 1059: * * Valid range: 0x0001-0x00ff * * Table 4.15: L2CAP_LE_CREDIT_BASED_CONNECTION_REQ SPSM ranges */ if (!psm || __le16_to_cpu(psm) > L2CAP_PSM_LE_DYN_END) { result = L2CAP_CR_LE_BAD_PSM; chan = NULL; goto response; } /* Check if we have socket listening on psm */ pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src, &conn->hcon->dst, LE_LINK); if (!pchan) { result = L2CAP_CR_LE_BAD_PSM; chan = NULL; goto response; } mutex_lock(&conn->chan_lock); l2cap_chan_lock(pchan); if (!smp_sufficient_security(conn->hcon, pchan->sec_level, SMP_ALLOW_STK)) { result = L2CAP_CR_LE_AUTHENTICATION; chan = NULL; goto response_unlock; } /* Check for valid dynamic CID range */ if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_LE_DYN_END) { result = L2CAP_CR_LE_INVALID_SCID; chan = NULL; goto response_unlock; } /* Check if we already have channel with that dcid */ if (__l2cap_get_chan_by_dcid(conn, scid)) { result = L2CAP_CR_LE_SCID_IN_USE; chan = NULL; goto response_unlock; } chan = pchan->ops->new_connection(pchan); if (!chan) { result = L2CAP_CR_LE_NO_MEM; goto response_unlock; } bacpy(&chan->src, &conn->hcon->src); bacpy(&chan->dst, &conn->hcon->dst); chan->src_type = bdaddr_src_type(conn->hcon); chan->dst_type = bdaddr_dst_type(conn->hcon); chan->psm = psm; chan->dcid = scid; chan->omtu = mtu; chan->remote_mps = mps; __l2cap_chan_add(conn, chan); l2cap_le_flowctl_init(chan, __le16_to_cpu(req->credits)); dcid = chan->scid; credits = chan->rx_credits; __set_chan_timer(chan, chan->ops->get_sndtimeo(chan)); chan->ident = cmd->ident; if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) { l2cap_state_change(chan, BT_CONNECT2); /* The following result value is actually not defined * for LE CoC but we use it to let the function know * that it should bail out after doing its cleanup * instead of sending a response. */ result = L2CAP_CR_PEND; chan->ops->defer(chan); } else { l2cap_chan_ready(chan); result = L2CAP_CR_LE_SUCCESS; } response_unlock: l2cap_chan_unlock(pchan); mutex_unlock(&conn->chan_lock); l2cap_chan_put(pchan); if (result == L2CAP_CR_PEND) return 0; response: if (chan) { rsp.mtu = cpu_to_le16(chan->imtu); rsp.mps = cpu_to_le16(chan->mps); } else { rsp.mtu = 0; rsp.mps = 0; } rsp.dcid = cpu_to_le16(dcid); rsp.credits = cpu_to_le16(credits); rsp.result = cpu_to_le16(result); l2cap_send_cmd(conn, cmd->ident, L2CAP_LE_CONN_RSP, sizeof(rsp), &rsp); return 0; } static inline int l2cap_le_credits(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data) { struct l2cap_le_credits *pkt; struct l2cap_chan *chan; u16 cid, credits, max_credits; if (cmd_len != sizeof(*pkt)) return -EPROTO; pkt = (struct l2cap_le_credits *) data; cid = __le16_to_cpu(pkt->cid); credits = __le16_to_cpu(pkt->credits); BT_DBG("cid 0x%4.4x credits 0x%4.4x", cid, credits); chan = l2cap_get_chan_by_dcid(conn, cid); if (!chan) return -EBADSLT; max_credits = LE_FLOWCTL_MAX_CREDITS - chan->tx_credits; if (credits > max_credits) { BT_ERR("LE credits overflow"); l2cap_send_disconn_req(chan, ECONNRESET); /* Return 0 so that we don't trigger an unnecessary * command reject packet. */ goto unlock; } chan->tx_credits += credits; /* Resume sending */ l2cap_le_flowctl_send(chan); if (chan->tx_credits) chan->ops->resume(chan); unlock: l2cap_chan_unlock(chan); l2cap_chan_put(chan); return 0; } static inline int l2cap_ecred_conn_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data) { struct l2cap_ecred_conn_req *req = (void *) data; struct { struct l2cap_ecred_conn_rsp rsp; __le16 dcid[L2CAP_ECRED_MAX_CID]; } __packed pdu; struct l2cap_chan *chan, *pchan; u16 mtu, mps; __le16 psm; u8 result, len = 0; int i, num_scid; bool defer = false; if (!enable_ecred) return -EINVAL; if (cmd_len < sizeof(*req) || (cmd_len - sizeof(*req)) % sizeof(u16)) { result = L2CAP_CR_LE_INVALID_PARAMS; goto response; } cmd_len -= sizeof(*req); num_scid = cmd_len / sizeof(u16); if (num_scid > ARRAY_SIZE(pdu.dcid)) { result = L2CAP_CR_LE_INVALID_PARAMS; goto response; } mtu = __le16_to_cpu(req->mtu); mps = __le16_to_cpu(req->mps); if (mtu < L2CAP_ECRED_MIN_MTU || mps < L2CAP_ECRED_MIN_MPS) { result = L2CAP_CR_LE_UNACCEPT_PARAMS; goto response; } psm = req->psm; /* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 3, Part A * page 1059: * * Valid range: 0x0001-0x00ff * * Table 4.15: L2CAP_LE_CREDIT_BASED_CONNECTION_REQ SPSM ranges */ if (!psm || __le16_to_cpu(psm) > L2CAP_PSM_LE_DYN_END) { result = L2CAP_CR_LE_BAD_PSM; goto response; } BT_DBG("psm 0x%2.2x mtu %u mps %u", __le16_to_cpu(psm), mtu, mps); memset(&pdu, 0, sizeof(pdu)); /* Check if we have socket listening on psm */ pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src, &conn->hcon->dst, LE_LINK); if (!pchan) { result = L2CAP_CR_LE_BAD_PSM; goto response; } mutex_lock(&conn->chan_lock); l2cap_chan_lock(pchan); if (!smp_sufficient_security(conn->hcon, pchan->sec_level, SMP_ALLOW_STK)) { result = L2CAP_CR_LE_AUTHENTICATION; goto unlock; } result = L2CAP_CR_LE_SUCCESS; for (i = 0; i < num_scid; i++) { u16 scid = __le16_to_cpu(req->scid[i]); BT_DBG("scid[%d] 0x%4.4x", i, scid); pdu.dcid[i] = 0x0000; len += sizeof(*pdu.dcid); /* Check for valid dynamic CID range */ if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_LE_DYN_END) { result = L2CAP_CR_LE_INVALID_SCID; continue; } /* Check if we already have channel with that dcid */ if (__l2cap_get_chan_by_dcid(conn, scid)) { result = L2CAP_CR_LE_SCID_IN_USE; continue; } chan = pchan->ops->new_connection(pchan); if (!chan) { result = L2CAP_CR_LE_NO_MEM; continue; } bacpy(&chan->src, &conn->hcon->src); bacpy(&chan->dst, &conn->hcon->dst); chan->src_type = bdaddr_src_type(conn->hcon); chan->dst_type = bdaddr_dst_type(conn->hcon); chan->psm = psm; chan->dcid = scid; chan->omtu = mtu; chan->remote_mps = mps; __l2cap_chan_add(conn, chan); l2cap_ecred_init(chan, __le16_to_cpu(req->credits)); /* Init response */ if (!pdu.rsp.credits) { pdu.rsp.mtu = cpu_to_le16(chan->imtu); pdu.rsp.mps = cpu_to_le16(chan->mps); pdu.rsp.credits = cpu_to_le16(chan->rx_credits); } pdu.dcid[i] = cpu_to_le16(chan->scid); __set_chan_timer(chan, chan->ops->get_sndtimeo(chan)); chan->ident = cmd->ident; chan->mode = L2CAP_MODE_EXT_FLOWCTL; if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) { l2cap_state_change(chan, BT_CONNECT2); defer = true; chan->ops->defer(chan); } else { l2cap_chan_ready(chan); } } unlock: l2cap_chan_unlock(pchan); mutex_unlock(&conn->chan_lock); l2cap_chan_put(pchan); response: pdu.rsp.result = cpu_to_le16(result); if (defer) return 0; l2cap_send_cmd(conn, cmd->ident, L2CAP_ECRED_CONN_RSP, sizeof(pdu.rsp) + len, &pdu); return 0; } static inline int l2cap_ecred_conn_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data) { struct l2cap_ecred_conn_rsp *rsp = (void *) data; struct hci_conn *hcon = conn->hcon; u16 mtu, mps, credits, result; struct l2cap_chan *chan, *tmp; int err = 0, sec_level; int i = 0; if (cmd_len < sizeof(*rsp)) return -EPROTO; mtu = __le16_to_cpu(rsp->mtu); mps = __le16_to_cpu(rsp->mps); credits = __le16_to_cpu(rsp->credits); result = __le16_to_cpu(rsp->result); BT_DBG("mtu %u mps %u credits %u result 0x%4.4x", mtu, mps, credits, result); mutex_lock(&conn->chan_lock); cmd_len -= sizeof(*rsp); list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) { u16 dcid; if (chan->ident != cmd->ident || chan->mode != L2CAP_MODE_EXT_FLOWCTL || chan->state == BT_CONNECTED) continue; l2cap_chan_lock(chan); /* Check that there is a dcid for each pending channel */ if (cmd_len < sizeof(dcid)) { l2cap_chan_del(chan, ECONNREFUSED); l2cap_chan_unlock(chan); continue; } dcid = __le16_to_cpu(rsp->dcid[i++]); cmd_len -= sizeof(u16); BT_DBG("dcid[%d] 0x%4.4x", i, dcid); /* Check if dcid is already in use */ if (dcid && __l2cap_get_chan_by_dcid(conn, dcid)) { /* If a device receives a * L2CAP_CREDIT_BASED_CONNECTION_RSP packet with an * already-assigned Destination CID, then both the * original channel and the new channel shall be * immediately discarded and not used. */ l2cap_chan_del(chan, ECONNREFUSED); l2cap_chan_unlock(chan); chan = __l2cap_get_chan_by_dcid(conn, dcid); l2cap_chan_lock(chan); l2cap_chan_del(chan, ECONNRESET); l2cap_chan_unlock(chan); continue; } switch (result) { case L2CAP_CR_LE_AUTHENTICATION: case L2CAP_CR_LE_ENCRYPTION: /* If we already have MITM protection we can't do * anything. */ if (hcon->sec_level > BT_SECURITY_MEDIUM) { l2cap_chan_del(chan, ECONNREFUSED); break; } sec_level = hcon->sec_level + 1; if (chan->sec_level < sec_level) chan->sec_level = sec_level; /* We'll need to send a new Connect Request */ clear_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags); smp_conn_security(hcon, chan->sec_level); break; case L2CAP_CR_LE_BAD_PSM: l2cap_chan_del(chan, ECONNREFUSED); break; default: /* If dcid was not set it means channels was refused */ if (!dcid) { l2cap_chan_del(chan, ECONNREFUSED); break; } chan->ident = 0; chan->dcid = dcid; chan->omtu = mtu; chan->remote_mps = mps; chan->tx_credits = credits; l2cap_chan_ready(chan); break; } l2cap_chan_unlock(chan); } mutex_unlock(&conn->chan_lock); return err; } static inline int l2cap_ecred_reconf_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data) { struct l2cap_ecred_reconf_req *req = (void *) data; struct l2cap_ecred_reconf_rsp rsp; u16 mtu, mps, result; struct l2cap_chan *chan; int i, num_scid; if (!enable_ecred) return -EINVAL; if (cmd_len < sizeof(*req) || cmd_len - sizeof(*req) % sizeof(u16)) { result = L2CAP_CR_LE_INVALID_PARAMS; goto respond; } mtu = __le16_to_cpu(req->mtu); mps = __le16_to_cpu(req->mps); BT_DBG("mtu %u mps %u", mtu, mps); if (mtu < L2CAP_ECRED_MIN_MTU) { result = L2CAP_RECONF_INVALID_MTU; goto respond; } if (mps < L2CAP_ECRED_MIN_MPS) { result = L2CAP_RECONF_INVALID_MPS; goto respond; } cmd_len -= sizeof(*req); num_scid = cmd_len / sizeof(u16); result = L2CAP_RECONF_SUCCESS; for (i = 0; i < num_scid; i++) { u16 scid; scid = __le16_to_cpu(req->scid[i]); if (!scid) return -EPROTO; chan = __l2cap_get_chan_by_dcid(conn, scid); if (!chan) continue; /* If the MTU value is decreased for any of the included * channels, then the receiver shall disconnect all * included channels. */ if (chan->omtu > mtu) { BT_ERR("chan %p decreased MTU %u -> %u", chan, chan->omtu, mtu); result = L2CAP_RECONF_INVALID_MTU; } chan->omtu = mtu; chan->remote_mps = mps; } respond: rsp.result = cpu_to_le16(result); l2cap_send_cmd(conn, cmd->ident, L2CAP_ECRED_RECONF_RSP, sizeof(rsp), &rsp); return 0; } static inline int l2cap_ecred_reconf_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data) { struct l2cap_chan *chan, *tmp; struct l2cap_ecred_conn_rsp *rsp = (void *) data; u16 result; if (cmd_len < sizeof(*rsp)) return -EPROTO; result = __le16_to_cpu(rsp->result); BT_DBG("result 0x%4.4x", rsp->result); if (!result) return 0; list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) { if (chan->ident != cmd->ident) continue; l2cap_chan_del(chan, ECONNRESET); } return 0; } static inline int l2cap_le_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data) { struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data; struct l2cap_chan *chan; if (cmd_len < sizeof(*rej)) return -EPROTO; mutex_lock(&conn->chan_lock); chan = __l2cap_get_chan_by_ident(conn, cmd->ident); if (!chan) goto done; chan = l2cap_chan_hold_unless_zero(chan); if (!chan) goto done; l2cap_chan_lock(chan); l2cap_chan_del(chan, ECONNREFUSED); l2cap_chan_unlock(chan); l2cap_chan_put(chan); done: mutex_unlock(&conn->chan_lock); return 0; } static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data) { int err = 0; switch (cmd->code) { case L2CAP_COMMAND_REJ: l2cap_le_command_rej(conn, cmd, cmd_len, data); break; case L2CAP_CONN_PARAM_UPDATE_REQ: err = l2cap_conn_param_update_req(conn, cmd, cmd_len, data); break; case L2CAP_CONN_PARAM_UPDATE_RSP: break; case L2CAP_LE_CONN_RSP: l2cap_le_connect_rsp(conn, cmd, cmd_len, data); break; case L2CAP_LE_CONN_REQ: err = l2cap_le_connect_req(conn, cmd, cmd_len, data); break; case L2CAP_LE_CREDITS: err = l2cap_le_credits(conn, cmd, cmd_len, data); break; case L2CAP_ECRED_CONN_REQ: err = l2cap_ecred_conn_req(conn, cmd, cmd_len, data); break; case L2CAP_ECRED_CONN_RSP: err = l2cap_ecred_conn_rsp(conn, cmd, cmd_len, data); break; case L2CAP_ECRED_RECONF_REQ: err = l2cap_ecred_reconf_req(conn, cmd, cmd_len, data); break; case L2CAP_ECRED_RECONF_RSP: err = l2cap_ecred_reconf_rsp(conn, cmd, cmd_len, data); break; case L2CAP_DISCONN_REQ: err = l2cap_disconnect_req(conn, cmd, cmd_len, data); break; case L2CAP_DISCONN_RSP: l2cap_disconnect_rsp(conn, cmd, cmd_len, data); break; default: BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code); err = -EINVAL; break; } return err; } static inline void l2cap_le_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb) { struct hci_conn *hcon = conn->hcon; struct l2cap_cmd_hdr *cmd; u16 len; int err; if (hcon->type != LE_LINK) goto drop; if (skb->len < L2CAP_CMD_HDR_SIZE) goto drop; cmd = (void *) skb->data; skb_pull(skb, L2CAP_CMD_HDR_SIZE); len = le16_to_cpu(cmd->len); BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len, cmd->ident); if (len != skb->len || !cmd->ident) { BT_DBG("corrupted command"); goto drop; } err = l2cap_le_sig_cmd(conn, cmd, len, skb->data); if (err) { struct l2cap_cmd_rej_unk rej; BT_ERR("Wrong link type (%d)", err); rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD); l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej); } drop: kfree_skb(skb); } static inline void l2cap_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb) { struct hci_conn *hcon = conn->hcon; struct l2cap_cmd_hdr *cmd; int err; l2cap_raw_recv(conn, skb); if (hcon->type != ACL_LINK) goto drop; while (skb->len >= L2CAP_CMD_HDR_SIZE) { u16 len; cmd = (void *) skb->data; skb_pull(skb, L2CAP_CMD_HDR_SIZE); len = le16_to_cpu(cmd->len); BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len, cmd->ident); if (len > skb->len || !cmd->ident) { BT_DBG("corrupted command"); break; } err = l2cap_bredr_sig_cmd(conn, cmd, len, skb->data); if (err) { struct l2cap_cmd_rej_unk rej; BT_ERR("Wrong link type (%d)", err); rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD); l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej); } skb_pull(skb, len); } drop: kfree_skb(skb); } static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb) { u16 our_fcs, rcv_fcs; int hdr_size; if (test_bit(FLAG_EXT_CTRL, &chan->flags)) hdr_size = L2CAP_EXT_HDR_SIZE; else hdr_size = L2CAP_ENH_HDR_SIZE; if (chan->fcs == L2CAP_FCS_CRC16) { skb_trim(skb, skb->len - L2CAP_FCS_SIZE); rcv_fcs = get_unaligned_le16(skb->data + skb->len); our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size); if (our_fcs != rcv_fcs) return -EBADMSG; } return 0; } static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan) { struct l2cap_ctrl control; BT_DBG("chan %p", chan); memset(&control, 0, sizeof(control)); control.sframe = 1; control.final = 1; control.reqseq = chan->buffer_seq; set_bit(CONN_SEND_FBIT, &chan->conn_state); if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) { control.super = L2CAP_SUPER_RNR; l2cap_send_sframe(chan, &control); } if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) && chan->unacked_frames > 0) __set_retrans_timer(chan); /* Send pending iframes */ l2cap_ertm_send(chan); if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) && test_bit(CONN_SEND_FBIT, &chan->conn_state)) { /* F-bit wasn't sent in an s-frame or i-frame yet, so * send it now. */ control.super = L2CAP_SUPER_RR; l2cap_send_sframe(chan, &control); } } static void append_skb_frag(struct sk_buff *skb, struct sk_buff *new_frag, struct sk_buff **last_frag) { /* skb->len reflects data in skb as well as all fragments * skb->data_len reflects only data in fragments */ if (!skb_has_frag_list(skb)) skb_shinfo(skb)->frag_list = new_frag; new_frag->next = NULL; (*last_frag)->next = new_frag; *last_frag = new_frag; skb->len += new_frag->len; skb->data_len += new_frag->len; skb->truesize += new_frag->truesize; } static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb, struct l2cap_ctrl *control) { int err = -EINVAL; switch (control->sar) { case L2CAP_SAR_UNSEGMENTED: if (chan->sdu) break; err = chan->ops->recv(chan, skb); break; case L2CAP_SAR_START: if (chan->sdu) break; if (!pskb_may_pull(skb, L2CAP_SDULEN_SIZE)) break; chan->sdu_len = get_unaligned_le16(skb->data); skb_pull(skb, L2CAP_SDULEN_SIZE); if (chan->sdu_len > chan->imtu) { err = -EMSGSIZE; break; } if (skb->len >= chan->sdu_len) break; chan->sdu = skb; chan->sdu_last_frag = skb; skb = NULL; err = 0; break; case L2CAP_SAR_CONTINUE: if (!chan->sdu) break; append_skb_frag(chan->sdu, skb, &chan->sdu_last_frag); skb = NULL; if (chan->sdu->len >= chan->sdu_len) break; err = 0; break; case L2CAP_SAR_END: if (!chan->sdu) break; append_skb_frag(chan->sdu, skb, &chan->sdu_last_frag); skb = NULL; if (chan->sdu->len != chan->sdu_len) break; err = chan->ops->recv(chan, chan->sdu); if (!err) { /* Reassembly complete */ chan->sdu = NULL; chan->sdu_last_frag = NULL; chan->sdu_len = 0; } break; } if (err) { kfree_skb(skb); kfree_skb(chan->sdu); chan->sdu = NULL; chan->sdu_last_frag = NULL; chan->sdu_len = 0; } return err; } static int l2cap_resegment(struct l2cap_chan *chan) { /* Placeholder */ return 0; } void l2cap_chan_busy(struct l2cap_chan *chan, int busy) { u8 event; if (chan->mode != L2CAP_MODE_ERTM) return; event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR; l2cap_tx(chan, NULL, NULL, event); } static int l2cap_rx_queued_iframes(struct l2cap_chan *chan) { int err = 0; /* Pass sequential frames to l2cap_reassemble_sdu() * until a gap is encountered. */ BT_DBG("chan %p", chan); while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) { struct sk_buff *skb; BT_DBG("Searching for skb with txseq %d (queue len %d)", chan->buffer_seq, skb_queue_len(&chan->srej_q)); skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq); if (!skb) break; skb_unlink(skb, &chan->srej_q); chan->buffer_seq = __next_seq(chan, chan->buffer_seq); err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->l2cap); if (err) break; } if (skb_queue_empty(&chan->srej_q)) { chan->rx_state = L2CAP_RX_STATE_RECV; l2cap_send_ack(chan); } return err; } static void l2cap_handle_srej(struct l2cap_chan *chan, struct l2cap_ctrl *control) { struct sk_buff *skb; BT_DBG("chan %p, control %p", chan, control); if (control->reqseq == chan->next_tx_seq) { BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq); l2cap_send_disconn_req(chan, ECONNRESET); return; } skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq); if (skb == NULL) { BT_DBG("Seq %d not available for retransmission", control->reqseq); return; } if (chan->max_tx != 0 && bt_cb(skb)->l2cap.retries >= chan->max_tx) { BT_DBG("Retry limit exceeded (%d)", chan->max_tx); l2cap_send_disconn_req(chan, ECONNRESET); return; } clear_bit(CONN_REMOTE_BUSY, &chan->conn_state); if (control->poll) { l2cap_pass_to_tx(chan, control); set_bit(CONN_SEND_FBIT, &chan->conn_state); l2cap_retransmit(chan, control); l2cap_ertm_send(chan); if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) { set_bit(CONN_SREJ_ACT, &chan->conn_state); chan->srej_save_reqseq = control->reqseq; } } else { l2cap_pass_to_tx_fbit(chan, control); if (control->final) { if (chan->srej_save_reqseq != control->reqseq || !test_and_clear_bit(CONN_SREJ_ACT, &chan->conn_state)) l2cap_retransmit(chan, control); } else { l2cap_retransmit(chan, control); if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) { set_bit(CONN_SREJ_ACT, &chan->conn_state); chan->srej_save_reqseq = control->reqseq; } } } } static void l2cap_handle_rej(struct l2cap_chan *chan, struct l2cap_ctrl *control) { struct sk_buff *skb; BT_DBG("chan %p, control %p", chan, control); if (control->reqseq == chan->next_tx_seq) { BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq); l2cap_send_disconn_req(chan, ECONNRESET); return; } skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq); if (chan->max_tx && skb && bt_cb(skb)->l2cap.retries >= chan->max_tx) { BT_DBG("Retry limit exceeded (%d)", chan->max_tx); l2cap_send_disconn_req(chan, ECONNRESET); return; } clear_bit(CONN_REMOTE_BUSY, &chan->conn_state); l2cap_pass_to_tx(chan, control); if (control->final) { if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state)) l2cap_retransmit_all(chan, control); } else { l2cap_retransmit_all(chan, control); l2cap_ertm_send(chan); if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) set_bit(CONN_REJ_ACT, &chan->conn_state); } } static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq) { BT_DBG("chan %p, txseq %d", chan, txseq); BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq, chan->expected_tx_seq); if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) { if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) { /* See notes below regarding "double poll" and * invalid packets. */ if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) { BT_DBG("Invalid/Ignore - after SREJ"); return L2CAP_TXSEQ_INVALID_IGNORE; } else { BT_DBG("Invalid - in window after SREJ sent"); return L2CAP_TXSEQ_INVALID; } } if (chan->srej_list.head == txseq) { BT_DBG("Expected SREJ"); return L2CAP_TXSEQ_EXPECTED_SREJ; } if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) { BT_DBG("Duplicate SREJ - txseq already stored"); return L2CAP_TXSEQ_DUPLICATE_SREJ; } if (l2cap_seq_list_contains(&chan->srej_list, txseq)) { BT_DBG("Unexpected SREJ - not requested"); return L2CAP_TXSEQ_UNEXPECTED_SREJ; } } if (chan->expected_tx_seq == txseq) { if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) { BT_DBG("Invalid - txseq outside tx window"); return L2CAP_TXSEQ_INVALID; } else { BT_DBG("Expected"); return L2CAP_TXSEQ_EXPECTED; } } if (__seq_offset(chan, txseq, chan->last_acked_seq) < __seq_offset(chan, chan->expected_tx_seq, chan->last_acked_seq)) { BT_DBG("Duplicate - expected_tx_seq later than txseq"); return L2CAP_TXSEQ_DUPLICATE; } if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) { /* A source of invalid packets is a "double poll" condition, * where delays cause us to send multiple poll packets. If * the remote stack receives and processes both polls, * sequence numbers can wrap around in such a way that a * resent frame has a sequence number that looks like new data * with a sequence gap. This would trigger an erroneous SREJ * request. * * Fortunately, this is impossible with a tx window that's * less than half of the maximum sequence number, which allows * invalid frames to be safely ignored. * * With tx window sizes greater than half of the tx window * maximum, the frame is invalid and cannot be ignored. This * causes a disconnect. */ if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) { BT_DBG("Invalid/Ignore - txseq outside tx window"); return L2CAP_TXSEQ_INVALID_IGNORE; } else { BT_DBG("Invalid - txseq outside tx window"); return L2CAP_TXSEQ_INVALID; } } else { BT_DBG("Unexpected - txseq indicates missing frames"); return L2CAP_TXSEQ_UNEXPECTED; } } static int l2cap_rx_state_recv(struct l2cap_chan *chan, struct l2cap_ctrl *control, struct sk_buff *skb, u8 event) { struct l2cap_ctrl local_control; int err = 0; bool skb_in_use = false; BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb, event); switch (event) { case L2CAP_EV_RECV_IFRAME: switch (l2cap_classify_txseq(chan, control->txseq)) { case L2CAP_TXSEQ_EXPECTED: l2cap_pass_to_tx(chan, control); if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) { BT_DBG("Busy, discarding expected seq %d", control->txseq); break; } chan->expected_tx_seq = __next_seq(chan, control->txseq); chan->buffer_seq = chan->expected_tx_seq; skb_in_use = true; /* l2cap_reassemble_sdu may free skb, hence invalidate * control, so make a copy in advance to use it after * l2cap_reassemble_sdu returns and to avoid the race * condition, for example: * * The current thread calls: * l2cap_reassemble_sdu * chan->ops->recv == l2cap_sock_recv_cb * __sock_queue_rcv_skb * Another thread calls: * bt_sock_recvmsg * skb_recv_datagram * skb_free_datagram * Then the current thread tries to access control, but * it was freed by skb_free_datagram. */ local_control = *control; err = l2cap_reassemble_sdu(chan, skb, control); if (err) break; if (local_control.final) { if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state)) { local_control.final = 0; l2cap_retransmit_all(chan, &local_control); l2cap_ertm_send(chan); } } if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) l2cap_send_ack(chan); break; case L2CAP_TXSEQ_UNEXPECTED: l2cap_pass_to_tx(chan, control); /* Can't issue SREJ frames in the local busy state. * Drop this frame, it will be seen as missing * when local busy is exited. */ if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) { BT_DBG("Busy, discarding unexpected seq %d", control->txseq); break; } /* There was a gap in the sequence, so an SREJ * must be sent for each missing frame. The * current frame is stored for later use. */ skb_queue_tail(&chan->srej_q, skb); skb_in_use = true; BT_DBG("Queued %p (queue len %d)", skb, skb_queue_len(&chan->srej_q)); clear_bit(CONN_SREJ_ACT, &chan->conn_state); l2cap_seq_list_clear(&chan->srej_list); l2cap_send_srej(chan, control->txseq); chan->rx_state = L2CAP_RX_STATE_SREJ_SENT; break; case L2CAP_TXSEQ_DUPLICATE: l2cap_pass_to_tx(chan, control); break; case L2CAP_TXSEQ_INVALID_IGNORE: break; case L2CAP_TXSEQ_INVALID: default: l2cap_send_disconn_req(chan, ECONNRESET); break; } break; case L2CAP_EV_RECV_RR: l2cap_pass_to_tx(chan, control); if (control->final) { clear_bit(CONN_REMOTE_BUSY, &chan->conn_state); if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state) && !__chan_is_moving(chan)) { control->final = 0; l2cap_retransmit_all(chan, control); } l2cap_ertm_send(chan); } else if (control->poll) { l2cap_send_i_or_rr_or_rnr(chan); } else { if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) && chan->unacked_frames) __set_retrans_timer(chan); l2cap_ertm_send(chan); } break; case L2CAP_EV_RECV_RNR: set_bit(CONN_REMOTE_BUSY, &chan->conn_state); l2cap_pass_to_tx(chan, control); if (control && control->poll) { set_bit(CONN_SEND_FBIT, &chan->conn_state); l2cap_send_rr_or_rnr(chan, 0); } __clear_retrans_timer(chan); l2cap_seq_list_clear(&chan->retrans_list); break; case L2CAP_EV_RECV_REJ: l2cap_handle_rej(chan, control); break; case L2CAP_EV_RECV_SREJ: l2cap_handle_srej(chan, control); break; default: break; } if (skb && !skb_in_use) { BT_DBG("Freeing %p", skb); kfree_skb(skb); } return err; } static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan, struct l2cap_ctrl *control, struct sk_buff *skb, u8 event) { int err = 0; u16 txseq = control->txseq; bool skb_in_use = false; BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb, event); switch (event) { case L2CAP_EV_RECV_IFRAME: switch (l2cap_classify_txseq(chan, txseq)) { case L2CAP_TXSEQ_EXPECTED: /* Keep frame for reassembly later */ l2cap_pass_to_tx(chan, control); skb_queue_tail(&chan->srej_q, skb); skb_in_use = true; BT_DBG("Queued %p (queue len %d)", skb, skb_queue_len(&chan->srej_q)); chan->expected_tx_seq = __next_seq(chan, txseq); break; case L2CAP_TXSEQ_EXPECTED_SREJ: l2cap_seq_list_pop(&chan->srej_list); l2cap_pass_to_tx(chan, control); skb_queue_tail(&chan->srej_q, skb); skb_in_use = true; BT_DBG("Queued %p (queue len %d)", skb, skb_queue_len(&chan->srej_q)); err = l2cap_rx_queued_iframes(chan); if (err) break; break; case L2CAP_TXSEQ_UNEXPECTED: /* Got a frame that can't be reassembled yet. * Save it for later, and send SREJs to cover * the missing frames. */ skb_queue_tail(&chan->srej_q, skb); skb_in_use = true; BT_DBG("Queued %p (queue len %d)", skb, skb_queue_len(&chan->srej_q)); l2cap_pass_to_tx(chan, control); l2cap_send_srej(chan, control->txseq); break; case L2CAP_TXSEQ_UNEXPECTED_SREJ: /* This frame was requested with an SREJ, but * some expected retransmitted frames are * missing. Request retransmission of missing * SREJ'd frames. */ skb_queue_tail(&chan->srej_q, skb); skb_in_use = true; BT_DBG("Queued %p (queue len %d)", skb, skb_queue_len(&chan->srej_q)); l2cap_pass_to_tx(chan, control); l2cap_send_srej_list(chan, control->txseq); break; case L2CAP_TXSEQ_DUPLICATE_SREJ: /* We've already queued this frame. Drop this copy. */ l2cap_pass_to_tx(chan, control); break; case L2CAP_TXSEQ_DUPLICATE: /* Expecting a later sequence number, so this frame * was already received. Ignore it completely. */ break; case L2CAP_TXSEQ_INVALID_IGNORE: break; case L2CAP_TXSEQ_INVALID: default: l2cap_send_disconn_req(chan, ECONNRESET); break; } break; case L2CAP_EV_RECV_RR: l2cap_pass_to_tx(chan, control); if (control->final) { clear_bit(CONN_REMOTE_BUSY, &chan->conn_state); if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state)) { control->final = 0; l2cap_retransmit_all(chan, control); } l2cap_ertm_send(chan); } else if (control->poll) { if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) && chan->unacked_frames) { __set_retrans_timer(chan); } set_bit(CONN_SEND_FBIT, &chan->conn_state); l2cap_send_srej_tail(chan); } else { if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) && chan->unacked_frames) __set_retrans_timer(chan); l2cap_send_ack(chan); } break; case L2CAP_EV_RECV_RNR: set_bit(CONN_REMOTE_BUSY, &chan->conn_state); l2cap_pass_to_tx(chan, control); if (control->poll) { l2cap_send_srej_tail(chan); } else { struct l2cap_ctrl rr_control; memset(&rr_control, 0, sizeof(rr_control)); rr_control.sframe = 1; rr_control.super = L2CAP_SUPER_RR; rr_control.reqseq = chan->buffer_seq; l2cap_send_sframe(chan, &rr_control); } break; case L2CAP_EV_RECV_REJ: l2cap_handle_rej(chan, control); break; case L2CAP_EV_RECV_SREJ: l2cap_handle_srej(chan, control); break; } if (skb && !skb_in_use) { BT_DBG("Freeing %p", skb); kfree_skb(skb); } return err; } static int l2cap_finish_move(struct l2cap_chan *chan) { BT_DBG("chan %p", chan); chan->rx_state = L2CAP_RX_STATE_RECV; if (chan->hs_hcon) chan->conn->mtu = chan->hs_hcon->hdev->block_mtu; else chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu; return l2cap_resegment(chan); } static int l2cap_rx_state_wait_p(struct l2cap_chan *chan, struct l2cap_ctrl *control, struct sk_buff *skb, u8 event) { int err; BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb, event); if (!control->poll) return -EPROTO; l2cap_process_reqseq(chan, control->reqseq); if (!skb_queue_empty(&chan->tx_q)) chan->tx_send_head = skb_peek(&chan->tx_q); else chan->tx_send_head = NULL; /* Rewind next_tx_seq to the point expected * by the receiver. */ chan->next_tx_seq = control->reqseq; chan->unacked_frames = 0; err = l2cap_finish_move(chan); if (err) return err; set_bit(CONN_SEND_FBIT, &chan->conn_state); l2cap_send_i_or_rr_or_rnr(chan); if (event == L2CAP_EV_RECV_IFRAME) return -EPROTO; return l2cap_rx_state_recv(chan, control, NULL, event); } static int l2cap_rx_state_wait_f(struct l2cap_chan *chan, struct l2cap_ctrl *control, struct sk_buff *skb, u8 event) { int err; if (!control->final) return -EPROTO; clear_bit(CONN_REMOTE_BUSY, &chan->conn_state); chan->rx_state = L2CAP_RX_STATE_RECV; l2cap_process_reqseq(chan, control->reqseq); if (!skb_queue_empty(&chan->tx_q)) chan->tx_send_head = skb_peek(&chan->tx_q); else chan->tx_send_head = NULL; /* Rewind next_tx_seq to the point expected * by the receiver. */ chan->next_tx_seq = control->reqseq; chan->unacked_frames = 0; if (chan->hs_hcon) chan->conn->mtu = chan->hs_hcon->hdev->block_mtu; else chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu; err = l2cap_resegment(chan); if (!err) err = l2cap_rx_state_recv(chan, control, skb, event); return err; } static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq) { /* Make sure reqseq is for a packet that has been sent but not acked */ u16 unacked; unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq); return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked; } static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control, struct sk_buff *skb, u8 event) { int err = 0; BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan, control, skb, event, chan->rx_state); if (__valid_reqseq(chan, control->reqseq)) { switch (chan->rx_state) { case L2CAP_RX_STATE_RECV: err = l2cap_rx_state_recv(chan, control, skb, event); break; case L2CAP_RX_STATE_SREJ_SENT: err = l2cap_rx_state_srej_sent(chan, control, skb, event); break; case L2CAP_RX_STATE_WAIT_P: err = l2cap_rx_state_wait_p(chan, control, skb, event); break; case L2CAP_RX_STATE_WAIT_F: err = l2cap_rx_state_wait_f(chan, control, skb, event); break; default: /* shut it down */ break; } } else { BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d", control->reqseq, chan->next_tx_seq, chan->expected_ack_seq); l2cap_send_disconn_req(chan, ECONNRESET); } return err; } static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control, struct sk_buff *skb) { /* l2cap_reassemble_sdu may free skb, hence invalidate control, so store * the txseq field in advance to use it after l2cap_reassemble_sdu * returns and to avoid the race condition, for example: * * The current thread calls: * l2cap_reassemble_sdu * chan->ops->recv == l2cap_sock_recv_cb * __sock_queue_rcv_skb * Another thread calls: * bt_sock_recvmsg * skb_recv_datagram * skb_free_datagram * Then the current thread tries to access control, but it was freed by * skb_free_datagram. */ u16 txseq = control->txseq; BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb, chan->rx_state); if (l2cap_classify_txseq(chan, txseq) == L2CAP_TXSEQ_EXPECTED) { l2cap_pass_to_tx(chan, control); BT_DBG("buffer_seq %u->%u", chan->buffer_seq, __next_seq(chan, chan->buffer_seq)); chan->buffer_seq = __next_seq(chan, chan->buffer_seq); l2cap_reassemble_sdu(chan, skb, control); } else { if (chan->sdu) { kfree_skb(chan->sdu); chan->sdu = NULL; } chan->sdu_last_frag = NULL; chan->sdu_len = 0; if (skb) { BT_DBG("Freeing %p", skb); kfree_skb(skb); } } chan->last_acked_seq = txseq; chan->expected_tx_seq = __next_seq(chan, txseq); return 0; } static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb) { struct l2cap_ctrl *control = &bt_cb(skb)->l2cap; u16 len; u8 event; __unpack_control(chan, skb); len = skb->len; /* * We can just drop the corrupted I-frame here. * Receiver will miss it and start proper recovery * procedures and ask for retransmission. */ if (l2cap_check_fcs(chan, skb)) goto drop; if (!control->sframe && control->sar == L2CAP_SAR_START) len -= L2CAP_SDULEN_SIZE; if (chan->fcs == L2CAP_FCS_CRC16) len -= L2CAP_FCS_SIZE; if (len > chan->mps) { l2cap_send_disconn_req(chan, ECONNRESET); goto drop; } if (chan->ops->filter) { if (chan->ops->filter(chan, skb)) goto drop; } if (!control->sframe) { int err; BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d", control->sar, control->reqseq, control->final, control->txseq); /* Validate F-bit - F=0 always valid, F=1 only * valid in TX WAIT_F */ if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F) goto drop; if (chan->mode != L2CAP_MODE_STREAMING) { event = L2CAP_EV_RECV_IFRAME; err = l2cap_rx(chan, control, skb, event); } else { err = l2cap_stream_rx(chan, control, skb); } if (err) l2cap_send_disconn_req(chan, ECONNRESET); } else { const u8 rx_func_to_event[4] = { L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ, L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ }; /* Only I-frames are expected in streaming mode */ if (chan->mode == L2CAP_MODE_STREAMING) goto drop; BT_DBG("sframe reqseq %d, final %d, poll %d, super %d", control->reqseq, control->final, control->poll, control->super); if (len != 0) { BT_ERR("Trailing bytes: %d in sframe", len); l2cap_send_disconn_req(chan, ECONNRESET); goto drop; } /* Validate F and P bits */ if (control->final && (control->poll || chan->tx_state != L2CAP_TX_STATE_WAIT_F)) goto drop; event = rx_func_to_event[control->super]; if (l2cap_rx(chan, control, skb, event)) l2cap_send_disconn_req(chan, ECONNRESET); } return 0; drop: kfree_skb(skb); return 0; } static void l2cap_chan_le_send_credits(struct l2cap_chan *chan) { struct l2cap_conn *conn = chan->conn; struct l2cap_le_credits pkt; u16 return_credits; return_credits = (chan->imtu / chan->mps) + 1; if (chan->rx_credits >= return_credits) return; return_credits -= chan->rx_credits; BT_DBG("chan %p returning %u credits to sender", chan, return_credits); chan->rx_credits += return_credits; pkt.cid = cpu_to_le16(chan->scid); pkt.credits = cpu_to_le16(return_credits); chan->ident = l2cap_get_ident(conn); l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CREDITS, sizeof(pkt), &pkt); } static int l2cap_ecred_recv(struct l2cap_chan *chan, struct sk_buff *skb) { int err; BT_DBG("SDU reassemble complete: chan %p skb->len %u", chan, skb->len); /* Wait recv to confirm reception before updating the credits */ err = chan->ops->recv(chan, skb); /* Update credits whenever an SDU is received */ l2cap_chan_le_send_credits(chan); return err; } static int l2cap_ecred_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb) { int err; if (!chan->rx_credits) { BT_ERR("No credits to receive LE L2CAP data"); l2cap_send_disconn_req(chan, ECONNRESET); return -ENOBUFS; } if (chan->imtu < skb->len) { BT_ERR("Too big LE L2CAP PDU"); return -ENOBUFS; } chan->rx_credits--; BT_DBG("rx_credits %u -> %u", chan->rx_credits + 1, chan->rx_credits); /* Update if remote had run out of credits, this should only happens * if the remote is not using the entire MPS. */ if (!chan->rx_credits) l2cap_chan_le_send_credits(chan); err = 0; if (!chan->sdu) { u16 sdu_len; sdu_len = get_unaligned_le16(skb->data); skb_pull(skb, L2CAP_SDULEN_SIZE); BT_DBG("Start of new SDU. sdu_len %u skb->len %u imtu %u", sdu_len, skb->len, chan->imtu); if (sdu_len > chan->imtu) { BT_ERR("Too big LE L2CAP SDU length received"); err = -EMSGSIZE; goto failed; } if (skb->len > sdu_len) { BT_ERR("Too much LE L2CAP data received"); err = -EINVAL; goto failed; } if (skb->len == sdu_len) return l2cap_ecred_recv(chan, skb); chan->sdu = skb; chan->sdu_len = sdu_len; chan->sdu_last_frag = skb; /* Detect if remote is not able to use the selected MPS */ if (skb->len + L2CAP_SDULEN_SIZE < chan->mps) { u16 mps_len = skb->len + L2CAP_SDULEN_SIZE; /* Adjust the number of credits */ BT_DBG("chan->mps %u -> %u", chan->mps, mps_len); chan->mps = mps_len; l2cap_chan_le_send_credits(chan); } return 0; } BT_DBG("SDU fragment. chan->sdu->len %u skb->len %u chan->sdu_len %u", chan->sdu->len, skb->len, chan->sdu_len); if (chan->sdu->len + skb->len > chan->sdu_len) { BT_ERR("Too much LE L2CAP data received"); err = -EINVAL; goto failed; } append_skb_frag(chan->sdu, skb, &chan->sdu_last_frag); skb = NULL; if (chan->sdu->len == chan->sdu_len) { err = l2cap_ecred_recv(chan, chan->sdu); if (!err) { chan->sdu = NULL; chan->sdu_last_frag = NULL; chan->sdu_len = 0; } } failed: if (err) { kfree_skb(skb); kfree_skb(chan->sdu); chan->sdu = NULL; chan->sdu_last_frag = NULL; chan->sdu_len = 0; } /* We can't return an error here since we took care of the skb * freeing internally. An error return would cause the caller to * do a double-free of the skb. */ return 0; } static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb) { struct l2cap_chan *chan; chan = l2cap_get_chan_by_scid(conn, cid); if (!chan) { if (cid == L2CAP_CID_A2MP) { chan = a2mp_channel_create(conn, skb); if (!chan) { kfree_skb(skb); return; } l2cap_chan_hold(chan); l2cap_chan_lock(chan); } else { BT_DBG("unknown cid 0x%4.4x", cid); /* Drop packet and return */ kfree_skb(skb); return; } } BT_DBG("chan %p, len %d", chan, skb->len); /* If we receive data on a fixed channel before the info req/rsp * procedure is done simply assume that the channel is supported * and mark it as ready. */ if (chan->chan_type == L2CAP_CHAN_FIXED) l2cap_chan_ready(chan); if (chan->state != BT_CONNECTED) goto drop; switch (chan->mode) { case L2CAP_MODE_LE_FLOWCTL: case L2CAP_MODE_EXT_FLOWCTL: if (l2cap_ecred_data_rcv(chan, skb) < 0) goto drop; goto done; case L2CAP_MODE_BASIC: /* If socket recv buffers overflows we drop data here * which is *bad* because L2CAP has to be reliable. * But we don't have any other choice. L2CAP doesn't * provide flow control mechanism. */ if (chan->imtu < skb->len) { BT_ERR("Dropping L2CAP data: receive buffer overflow"); goto drop; } if (!chan->ops->recv(chan, skb)) goto done; break; case L2CAP_MODE_ERTM: case L2CAP_MODE_STREAMING: l2cap_data_rcv(chan, skb); goto done; default: BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode); break; } drop: kfree_skb(skb); done: l2cap_chan_unlock(chan); l2cap_chan_put(chan); } static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb) { struct hci_conn *hcon = conn->hcon; struct l2cap_chan *chan; if (hcon->type != ACL_LINK) goto free_skb; chan = l2cap_global_chan_by_psm(0, psm, &hcon->src, &hcon->dst, ACL_LINK); if (!chan) goto free_skb; BT_DBG("chan %p, len %d", chan, skb->len); if (chan->state != BT_BOUND && chan->state != BT_CONNECTED) goto drop; if (chan->imtu < skb->len) goto drop; /* Store remote BD_ADDR and PSM for msg_name */ bacpy(&bt_cb(skb)->l2cap.bdaddr, &hcon->dst); bt_cb(skb)->l2cap.psm = psm; if (!chan->ops->recv(chan, skb)) { l2cap_chan_put(chan); return; } drop: l2cap_chan_put(chan); free_skb: kfree_skb(skb); } static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb) { struct l2cap_hdr *lh = (void *) skb->data; struct hci_conn *hcon = conn->hcon; u16 cid, len; __le16 psm; if (hcon->state != BT_CONNECTED) { BT_DBG("queueing pending rx skb"); skb_queue_tail(&conn->pending_rx, skb); return; } skb_pull(skb, L2CAP_HDR_SIZE); cid = __le16_to_cpu(lh->cid); len = __le16_to_cpu(lh->len); if (len != skb->len) { kfree_skb(skb); return; } /* Since we can't actively block incoming LE connections we must * at least ensure that we ignore incoming data from them. */ if (hcon->type == LE_LINK && hci_bdaddr_list_lookup(&hcon->hdev->reject_list, &hcon->dst, bdaddr_dst_type(hcon))) { kfree_skb(skb); return; } BT_DBG("len %d, cid 0x%4.4x", len, cid); switch (cid) { case L2CAP_CID_SIGNALING: l2cap_sig_channel(conn, skb); break; case L2CAP_CID_CONN_LESS: psm = get_unaligned((__le16 *) skb->data); skb_pull(skb, L2CAP_PSMLEN_SIZE); l2cap_conless_channel(conn, psm, skb); break; case L2CAP_CID_LE_SIGNALING: l2cap_le_sig_channel(conn, skb); break; default: l2cap_data_channel(conn, cid, skb); break; } } static void process_pending_rx(struct work_struct *work) { struct l2cap_conn *conn = container_of(work, struct l2cap_conn, pending_rx_work); struct sk_buff *skb; BT_DBG(""); while ((skb = skb_dequeue(&conn->pending_rx))) l2cap_recv_frame(conn, skb); } static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon) { struct l2cap_conn *conn = hcon->l2cap_data; struct hci_chan *hchan; if (conn) return conn; hchan = hci_chan_create(hcon); if (!hchan) return NULL; conn = kzalloc(sizeof(*conn), GFP_KERNEL); if (!conn) { hci_chan_del(hchan); return NULL; } kref_init(&conn->ref); hcon->l2cap_data = conn; conn->hcon = hci_conn_get(hcon); conn->hchan = hchan; BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan); switch (hcon->type) { case LE_LINK: if (hcon->hdev->le_mtu) { conn->mtu = hcon->hdev->le_mtu; break; } fallthrough; default: conn->mtu = hcon->hdev->acl_mtu; break; } conn->feat_mask = 0; conn->local_fixed_chan = L2CAP_FC_SIG_BREDR | L2CAP_FC_CONNLESS; if (hcon->type == ACL_LINK && hci_dev_test_flag(hcon->hdev, HCI_HS_ENABLED)) conn->local_fixed_chan |= L2CAP_FC_A2MP; if (hci_dev_test_flag(hcon->hdev, HCI_LE_ENABLED) && (bredr_sc_enabled(hcon->hdev) || hci_dev_test_flag(hcon->hdev, HCI_FORCE_BREDR_SMP))) conn->local_fixed_chan |= L2CAP_FC_SMP_BREDR; mutex_init(&conn->ident_lock); mutex_init(&conn->chan_lock); INIT_LIST_HEAD(&conn->chan_l); INIT_LIST_HEAD(&conn->users); INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout); skb_queue_head_init(&conn->pending_rx); INIT_WORK(&conn->pending_rx_work, process_pending_rx); INIT_DELAYED_WORK(&conn->id_addr_timer, l2cap_conn_update_id_addr); conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM; return conn; } static bool is_valid_psm(u16 psm, u8 dst_type) { if (!psm) return false; if (bdaddr_type_is_le(dst_type)) return (psm <= 0x00ff); /* PSM must be odd and lsb of upper byte must be 0 */ return ((psm & 0x0101) == 0x0001); } struct l2cap_chan_data { struct l2cap_chan *chan; struct pid *pid; int count; }; static void l2cap_chan_by_pid(struct l2cap_chan *chan, void *data) { struct l2cap_chan_data *d = data; struct pid *pid; if (chan == d->chan) return; if (!test_bit(FLAG_DEFER_SETUP, &chan->flags)) return; pid = chan->ops->get_peer_pid(chan); /* Only count deferred channels with the same PID/PSM */ if (d->pid != pid || chan->psm != d->chan->psm || chan->ident || chan->mode != L2CAP_MODE_EXT_FLOWCTL || chan->state != BT_CONNECT) return; d->count++; } int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid, bdaddr_t *dst, u8 dst_type) { struct l2cap_conn *conn; struct hci_conn *hcon; struct hci_dev *hdev; int err; BT_DBG("%pMR -> %pMR (type %u) psm 0x%4.4x mode 0x%2.2x", &chan->src, dst, dst_type, __le16_to_cpu(psm), chan->mode); hdev = hci_get_route(dst, &chan->src, chan->src_type); if (!hdev) return -EHOSTUNREACH; hci_dev_lock(hdev); if (!is_valid_psm(__le16_to_cpu(psm), dst_type) && !cid && chan->chan_type != L2CAP_CHAN_RAW) { err = -EINVAL; goto done; } if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !psm) { err = -EINVAL; goto done; } if (chan->chan_type == L2CAP_CHAN_FIXED && !cid) { err = -EINVAL; goto done; } switch (chan->mode) { case L2CAP_MODE_BASIC: break; case L2CAP_MODE_LE_FLOWCTL: break; case L2CAP_MODE_EXT_FLOWCTL: if (!enable_ecred) { err = -EOPNOTSUPP; goto done; } break; case L2CAP_MODE_ERTM: case L2CAP_MODE_STREAMING: if (!disable_ertm) break; fallthrough; default: err = -EOPNOTSUPP; goto done; } switch (chan->state) { case BT_CONNECT: case BT_CONNECT2: case BT_CONFIG: /* Already connecting */ err = 0; goto done; case BT_CONNECTED: /* Already connected */ err = -EISCONN; goto done; case BT_OPEN: case BT_BOUND: /* Can connect */ break; default: err = -EBADFD; goto done; } /* Set destination address and psm */ bacpy(&chan->dst, dst); chan->dst_type = dst_type; chan->psm = psm; chan->dcid = cid; if (bdaddr_type_is_le(dst_type)) { /* Convert from L2CAP channel address type to HCI address type */ if (dst_type == BDADDR_LE_PUBLIC) dst_type = ADDR_LE_DEV_PUBLIC; else dst_type = ADDR_LE_DEV_RANDOM; if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) hcon = hci_connect_le(hdev, dst, dst_type, false, chan->sec_level, HCI_LE_CONN_TIMEOUT, HCI_ROLE_SLAVE); else hcon = hci_connect_le_scan(hdev, dst, dst_type, chan->sec_level, HCI_LE_CONN_TIMEOUT, CONN_REASON_L2CAP_CHAN); } else { u8 auth_type = l2cap_get_auth_type(chan); hcon = hci_connect_acl(hdev, dst, chan->sec_level, auth_type, CONN_REASON_L2CAP_CHAN); } if (IS_ERR(hcon)) { err = PTR_ERR(hcon); goto done; } conn = l2cap_conn_add(hcon); if (!conn) { hci_conn_drop(hcon); err = -ENOMEM; goto done; } if (chan->mode == L2CAP_MODE_EXT_FLOWCTL) { struct l2cap_chan_data data; data.chan = chan; data.pid = chan->ops->get_peer_pid(chan); data.count = 1; l2cap_chan_list(conn, l2cap_chan_by_pid, &data); /* Check if there isn't too many channels being connected */ if (data.count > L2CAP_ECRED_CONN_SCID_MAX) { hci_conn_drop(hcon); err = -EPROTO; goto done; } } mutex_lock(&conn->chan_lock); l2cap_chan_lock(chan); if (cid && __l2cap_get_chan_by_dcid(conn, cid)) { hci_conn_drop(hcon); err = -EBUSY; goto chan_unlock; } /* Update source addr of the socket */ bacpy(&chan->src, &hcon->src); chan->src_type = bdaddr_src_type(hcon); __l2cap_chan_add(conn, chan); /* l2cap_chan_add takes its own ref so we can drop this one */ hci_conn_drop(hcon); l2cap_state_change(chan, BT_CONNECT); __set_chan_timer(chan, chan->ops->get_sndtimeo(chan)); /* Release chan->sport so that it can be reused by other * sockets (as it's only used for listening sockets). */ write_lock(&chan_list_lock); chan->sport = 0; write_unlock(&chan_list_lock); if (hcon->state == BT_CONNECTED) { if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) { __clear_chan_timer(chan); if (l2cap_chan_check_security(chan, true)) l2cap_state_change(chan, BT_CONNECTED); } else l2cap_do_start(chan); } err = 0; chan_unlock: l2cap_chan_unlock(chan); mutex_unlock(&conn->chan_lock); done: hci_dev_unlock(hdev); hci_dev_put(hdev); return err; } EXPORT_SYMBOL_GPL(l2cap_chan_connect); static void l2cap_ecred_reconfigure(struct l2cap_chan *chan) { struct l2cap_conn *conn = chan->conn; struct { struct l2cap_ecred_reconf_req req; __le16 scid; } pdu; pdu.req.mtu = cpu_to_le16(chan->imtu); pdu.req.mps = cpu_to_le16(chan->mps); pdu.scid = cpu_to_le16(chan->scid); chan->ident = l2cap_get_ident(conn); l2cap_send_cmd(conn, chan->ident, L2CAP_ECRED_RECONF_REQ, sizeof(pdu), &pdu); } int l2cap_chan_reconfigure(struct l2cap_chan *chan, __u16 mtu) { if (chan->imtu > mtu) return -EINVAL; BT_DBG("chan %p mtu 0x%4.4x", chan, mtu); chan->imtu = mtu; l2cap_ecred_reconfigure(chan); return 0; } /* ---- L2CAP interface with lower layer (HCI) ---- */ int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr) { int exact = 0, lm1 = 0, lm2 = 0; struct l2cap_chan *c; BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr); /* Find listening sockets and check their link_mode */ read_lock(&chan_list_lock); list_for_each_entry(c, &chan_list, global_l) { if (c->state != BT_LISTEN) continue; if (!bacmp(&c->src, &hdev->bdaddr)) { lm1 |= HCI_LM_ACCEPT; if (test_bit(FLAG_ROLE_SWITCH, &c->flags)) lm1 |= HCI_LM_MASTER; exact++; } else if (!bacmp(&c->src, BDADDR_ANY)) { lm2 |= HCI_LM_ACCEPT; if (test_bit(FLAG_ROLE_SWITCH, &c->flags)) lm2 |= HCI_LM_MASTER; } } read_unlock(&chan_list_lock); return exact ? lm1 : lm2; } /* Find the next fixed channel in BT_LISTEN state, continue iteration * from an existing channel in the list or from the beginning of the * global list (by passing NULL as first parameter). */ static struct l2cap_chan *l2cap_global_fixed_chan(struct l2cap_chan *c, struct hci_conn *hcon) { u8 src_type = bdaddr_src_type(hcon); read_lock(&chan_list_lock); if (c) c = list_next_entry(c, global_l); else c = list_entry(chan_list.next, typeof(*c), global_l); list_for_each_entry_from(c, &chan_list, global_l) { if (c->chan_type != L2CAP_CHAN_FIXED) continue; if (c->state != BT_LISTEN) continue; if (bacmp(&c->src, &hcon->src) && bacmp(&c->src, BDADDR_ANY)) continue; if (src_type != c->src_type) continue; c = l2cap_chan_hold_unless_zero(c); read_unlock(&chan_list_lock); return c; } read_unlock(&chan_list_lock); return NULL; } static void l2cap_connect_cfm(struct hci_conn *hcon, u8 status) { struct hci_dev *hdev = hcon->hdev; struct l2cap_conn *conn; struct l2cap_chan *pchan; u8 dst_type; if (hcon->type != ACL_LINK && hcon->type != LE_LINK) return; BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status); if (status) { l2cap_conn_del(hcon, bt_to_errno(status)); return; } conn = l2cap_conn_add(hcon); if (!conn) return; dst_type = bdaddr_dst_type(hcon); /* If device is blocked, do not create channels for it */ if (hci_bdaddr_list_lookup(&hdev->reject_list, &hcon->dst, dst_type)) return; /* Find fixed channels and notify them of the new connection. We * use multiple individual lookups, continuing each time where * we left off, because the list lock would prevent calling the * potentially sleeping l2cap_chan_lock() function. */ pchan = l2cap_global_fixed_chan(NULL, hcon); while (pchan) { struct l2cap_chan *chan, *next; /* Client fixed channels should override server ones */ if (__l2cap_get_chan_by_dcid(conn, pchan->scid)) goto next; l2cap_chan_lock(pchan); chan = pchan->ops->new_connection(pchan); if (chan) { bacpy(&chan->src, &hcon->src); bacpy(&chan->dst, &hcon->dst); chan->src_type = bdaddr_src_type(hcon); chan->dst_type = dst_type; __l2cap_chan_add(conn, chan); } l2cap_chan_unlock(pchan); next: next = l2cap_global_fixed_chan(pchan, hcon); l2cap_chan_put(pchan); pchan = next; } l2cap_conn_ready(conn); } int l2cap_disconn_ind(struct hci_conn *hcon) { struct l2cap_conn *conn = hcon->l2cap_data; BT_DBG("hcon %p", hcon); if (!conn) return HCI_ERROR_REMOTE_USER_TERM; return conn->disc_reason; } static void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason) { if (hcon->type != ACL_LINK && hcon->type != LE_LINK) return; BT_DBG("hcon %p reason %d", hcon, reason); l2cap_conn_del(hcon, bt_to_errno(reason)); } static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt) { if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) return; if (encrypt == 0x00) { if (chan->sec_level == BT_SECURITY_MEDIUM) { __set_chan_timer(chan, L2CAP_ENC_TIMEOUT); } else if (chan->sec_level == BT_SECURITY_HIGH || chan->sec_level == BT_SECURITY_FIPS) l2cap_chan_close(chan, ECONNREFUSED); } else { if (chan->sec_level == BT_SECURITY_MEDIUM) __clear_chan_timer(chan); } } static void l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt) { struct l2cap_conn *conn = hcon->l2cap_data; struct l2cap_chan *chan; if (!conn) return; BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt); mutex_lock(&conn->chan_lock); list_for_each_entry(chan, &conn->chan_l, list) { l2cap_chan_lock(chan); BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid, state_to_string(chan->state)); if (chan->scid == L2CAP_CID_A2MP) { l2cap_chan_unlock(chan); continue; } if (!status && encrypt) chan->sec_level = hcon->sec_level; if (!__l2cap_no_conn_pending(chan)) { l2cap_chan_unlock(chan); continue; } if (!status && (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)) { chan->ops->resume(chan); l2cap_check_encryption(chan, encrypt); l2cap_chan_unlock(chan); continue; } if (chan->state == BT_CONNECT) { if (!status && l2cap_check_enc_key_size(hcon)) l2cap_start_connection(chan); else __set_chan_timer(chan, L2CAP_DISC_TIMEOUT); } else if (chan->state == BT_CONNECT2 && !(chan->mode == L2CAP_MODE_EXT_FLOWCTL || chan->mode == L2CAP_MODE_LE_FLOWCTL)) { struct l2cap_conn_rsp rsp; __u16 res, stat; if (!status && l2cap_check_enc_key_size(hcon)) { if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) { res = L2CAP_CR_PEND; stat = L2CAP_CS_AUTHOR_PEND; chan->ops->defer(chan); } else { l2cap_state_change(chan, BT_CONFIG); res = L2CAP_CR_SUCCESS; stat = L2CAP_CS_NO_INFO; } } else { l2cap_state_change(chan, BT_DISCONN); __set_chan_timer(chan, L2CAP_DISC_TIMEOUT); res = L2CAP_CR_SEC_BLOCK; stat = L2CAP_CS_NO_INFO; } rsp.scid = cpu_to_le16(chan->dcid); rsp.dcid = cpu_to_le16(chan->scid); rsp.result = cpu_to_le16(res); rsp.status = cpu_to_le16(stat); l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp); if (!test_bit(CONF_REQ_SENT, &chan->conf_state) && res == L2CAP_CR_SUCCESS) { char buf[128]; set_bit(CONF_REQ_SENT, &chan->conf_state); l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ, l2cap_build_conf_req(chan, buf, sizeof(buf)), buf); chan->num_conf_req++; } } l2cap_chan_unlock(chan); } mutex_unlock(&conn->chan_lock); } /* Append fragment into frame respecting the maximum len of rx_skb */ static int l2cap_recv_frag(struct l2cap_conn *conn, struct sk_buff *skb, u16 len) { if (!conn->rx_skb) { /* Allocate skb for the complete frame (with header) */ conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL); if (!conn->rx_skb) return -ENOMEM; /* Init rx_len */ conn->rx_len = len; } /* Copy as much as the rx_skb can hold */ len = min_t(u16, len, skb->len); skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, len), len); skb_pull(skb, len); conn->rx_len -= len; return len; } static int l2cap_recv_len(struct l2cap_conn *conn, struct sk_buff *skb) { struct sk_buff *rx_skb; int len; /* Append just enough to complete the header */ len = l2cap_recv_frag(conn, skb, L2CAP_LEN_SIZE - conn->rx_skb->len); /* If header could not be read just continue */ if (len < 0 || conn->rx_skb->len < L2CAP_LEN_SIZE) return len; rx_skb = conn->rx_skb; len = get_unaligned_le16(rx_skb->data); /* Check if rx_skb has enough space to received all fragments */ if (len + (L2CAP_HDR_SIZE - L2CAP_LEN_SIZE) <= skb_tailroom(rx_skb)) { /* Update expected len */ conn->rx_len = len + (L2CAP_HDR_SIZE - L2CAP_LEN_SIZE); return L2CAP_LEN_SIZE; } /* Reset conn->rx_skb since it will need to be reallocated in order to * fit all fragments. */ conn->rx_skb = NULL; /* Reallocates rx_skb using the exact expected length */ len = l2cap_recv_frag(conn, rx_skb, len + (L2CAP_HDR_SIZE - L2CAP_LEN_SIZE)); kfree_skb(rx_skb); return len; } static void l2cap_recv_reset(struct l2cap_conn *conn) { kfree_skb(conn->rx_skb); conn->rx_skb = NULL; conn->rx_len = 0; } void l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags) { struct l2cap_conn *conn = hcon->l2cap_data; int len; /* For AMP controller do not create l2cap conn */ if (!conn && hcon->hdev->dev_type != HCI_PRIMARY) goto drop; if (!conn) conn = l2cap_conn_add(hcon); if (!conn) goto drop; BT_DBG("conn %p len %u flags 0x%x", conn, skb->len, flags); switch (flags) { case ACL_START: case ACL_START_NO_FLUSH: case ACL_COMPLETE: if (conn->rx_skb) { BT_ERR("Unexpected start frame (len %d)", skb->len); l2cap_recv_reset(conn); l2cap_conn_unreliable(conn, ECOMM); } /* Start fragment may not contain the L2CAP length so just * copy the initial byte when that happens and use conn->mtu as * expected length. */ if (skb->len < L2CAP_LEN_SIZE) { l2cap_recv_frag(conn, skb, conn->mtu); break; } len = get_unaligned_le16(skb->data) + L2CAP_HDR_SIZE; if (len == skb->len) { /* Complete frame received */ l2cap_recv_frame(conn, skb); return; } BT_DBG("Start: total len %d, frag len %u", len, skb->len); if (skb->len > len) { BT_ERR("Frame is too long (len %u, expected len %d)", skb->len, len); l2cap_conn_unreliable(conn, ECOMM); goto drop; } /* Append fragment into frame (with header) */ if (l2cap_recv_frag(conn, skb, len) < 0) goto drop; break; case ACL_CONT: BT_DBG("Cont: frag len %u (expecting %u)", skb->len, conn->rx_len); if (!conn->rx_skb) { BT_ERR("Unexpected continuation frame (len %d)", skb->len); l2cap_conn_unreliable(conn, ECOMM); goto drop; } /* Complete the L2CAP length if it has not been read */ if (conn->rx_skb->len < L2CAP_LEN_SIZE) { if (l2cap_recv_len(conn, skb) < 0) { l2cap_conn_unreliable(conn, ECOMM); goto drop; } /* Header still could not be read just continue */ if (conn->rx_skb->len < L2CAP_LEN_SIZE) break; } if (skb->len > conn->rx_len) { BT_ERR("Fragment is too long (len %u, expected %u)", skb->len, conn->rx_len); l2cap_recv_reset(conn); l2cap_conn_unreliable(conn, ECOMM); goto drop; } /* Append fragment into frame (with header) */ l2cap_recv_frag(conn, skb, skb->len); if (!conn->rx_len) { /* Complete frame received. l2cap_recv_frame * takes ownership of the skb so set the global * rx_skb pointer to NULL first. */ struct sk_buff *rx_skb = conn->rx_skb; conn->rx_skb = NULL; l2cap_recv_frame(conn, rx_skb); } break; } drop: kfree_skb(skb); } static struct hci_cb l2cap_cb = { .name = "L2CAP", .connect_cfm = l2cap_connect_cfm, .disconn_cfm = l2cap_disconn_cfm, .security_cfm = l2cap_security_cfm, }; static int l2cap_debugfs_show(struct seq_file *f, void *p) { struct l2cap_chan *c; read_lock(&chan_list_lock); list_for_each_entry(c, &chan_list, global_l) { seq_printf(f, "%pMR (%u) %pMR (%u) %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n", &c->src, c->src_type, &c->dst, c->dst_type, c->state, __le16_to_cpu(c->psm), c->scid, c->dcid, c->imtu, c->omtu, c->sec_level, c->mode); } read_unlock(&chan_list_lock); return 0; } DEFINE_SHOW_ATTRIBUTE(l2cap_debugfs); static struct dentry *l2cap_debugfs; int __init l2cap_init(void) { int err; err = l2cap_init_sockets(); if (err < 0) return err; hci_register_cb(&l2cap_cb); if (IS_ERR_OR_NULL(bt_debugfs)) return 0; l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs, NULL, &l2cap_debugfs_fops); return 0; } void l2cap_exit(void) { debugfs_remove(l2cap_debugfs); hci_unregister_cb(&l2cap_cb); l2cap_cleanup_sockets(); } module_param(disable_ertm, bool, 0644); MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode"); module_param(enable_ecred, bool, 0644); MODULE_PARM_DESC(enable_ecred, "Enable enhanced credit flow control mode");
linux-master
net/bluetooth/l2cap_core.c
// SPDX-License-Identifier: GPL-2.0-only /* Copyright (c) 2013-2014 Intel Corp. */ #include <linux/if_arp.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/module.h> #include <linux/debugfs.h> #include <net/ipv6.h> #include <net/ip6_route.h> #include <net/addrconf.h> #include <net/pkt_sched.h> #include <net/bluetooth/bluetooth.h> #include <net/bluetooth/hci_core.h> #include <net/bluetooth/l2cap.h> #include <net/6lowpan.h> /* for the compression support */ #define VERSION "0.1" static struct dentry *lowpan_enable_debugfs; static struct dentry *lowpan_control_debugfs; #define IFACE_NAME_TEMPLATE "bt%d" struct skb_cb { struct in6_addr addr; struct in6_addr gw; struct l2cap_chan *chan; }; #define lowpan_cb(skb) ((struct skb_cb *)((skb)->cb)) /* The devices list contains those devices that we are acting * as a proxy. The BT 6LoWPAN device is a virtual device that * connects to the Bluetooth LE device. The real connection to * BT device is done via l2cap layer. There exists one * virtual device / one BT 6LoWPAN network (=hciX device). * The list contains struct lowpan_dev elements. */ static LIST_HEAD(bt_6lowpan_devices); static DEFINE_SPINLOCK(devices_lock); static bool enable_6lowpan; /* We are listening incoming connections via this channel */ static struct l2cap_chan *listen_chan; static DEFINE_MUTEX(set_lock); struct lowpan_peer { struct list_head list; struct rcu_head rcu; struct l2cap_chan *chan; /* peer addresses in various formats */ unsigned char lladdr[ETH_ALEN]; struct in6_addr peer_addr; }; struct lowpan_btle_dev { struct list_head list; struct hci_dev *hdev; struct net_device *netdev; struct list_head peers; atomic_t peer_count; /* number of items in peers list */ struct work_struct delete_netdev; struct delayed_work notify_peers; }; static inline struct lowpan_btle_dev * lowpan_btle_dev(const struct net_device *netdev) { return (struct lowpan_btle_dev *)lowpan_dev(netdev)->priv; } static inline void peer_add(struct lowpan_btle_dev *dev, struct lowpan_peer *peer) { list_add_rcu(&peer->list, &dev->peers); atomic_inc(&dev->peer_count); } static inline bool peer_del(struct lowpan_btle_dev *dev, struct lowpan_peer *peer) { list_del_rcu(&peer->list); kfree_rcu(peer, rcu); module_put(THIS_MODULE); if (atomic_dec_and_test(&dev->peer_count)) { BT_DBG("last peer"); return true; } return false; } static inline struct lowpan_peer * __peer_lookup_chan(struct lowpan_btle_dev *dev, struct l2cap_chan *chan) { struct lowpan_peer *peer; list_for_each_entry_rcu(peer, &dev->peers, list) { if (peer->chan == chan) return peer; } return NULL; } static inline struct lowpan_peer * __peer_lookup_conn(struct lowpan_btle_dev *dev, struct l2cap_conn *conn) { struct lowpan_peer *peer; list_for_each_entry_rcu(peer, &dev->peers, list) { if (peer->chan->conn == conn) return peer; } return NULL; } static inline struct lowpan_peer *peer_lookup_dst(struct lowpan_btle_dev *dev, struct in6_addr *daddr, struct sk_buff *skb) { struct rt6_info *rt = (struct rt6_info *)skb_dst(skb); int count = atomic_read(&dev->peer_count); const struct in6_addr *nexthop; struct lowpan_peer *peer; struct neighbour *neigh; BT_DBG("peers %d addr %pI6c rt %p", count, daddr, rt); if (!rt) { if (ipv6_addr_any(&lowpan_cb(skb)->gw)) { /* There is neither route nor gateway, * probably the destination is a direct peer. */ nexthop = daddr; } else { /* There is a known gateway */ nexthop = &lowpan_cb(skb)->gw; } } else { nexthop = rt6_nexthop(rt, daddr); /* We need to remember the address because it is needed * by bt_xmit() when sending the packet. In bt_xmit(), the * destination routing info is not set. */ memcpy(&lowpan_cb(skb)->gw, nexthop, sizeof(struct in6_addr)); } BT_DBG("gw %pI6c", nexthop); rcu_read_lock(); list_for_each_entry_rcu(peer, &dev->peers, list) { BT_DBG("dst addr %pMR dst type %u ip %pI6c", &peer->chan->dst, peer->chan->dst_type, &peer->peer_addr); if (!ipv6_addr_cmp(&peer->peer_addr, nexthop)) { rcu_read_unlock(); return peer; } } /* use the neighbour cache for matching addresses assigned by SLAAC */ neigh = __ipv6_neigh_lookup(dev->netdev, nexthop); if (neigh) { list_for_each_entry_rcu(peer, &dev->peers, list) { if (!memcmp(neigh->ha, peer->lladdr, ETH_ALEN)) { neigh_release(neigh); rcu_read_unlock(); return peer; } } neigh_release(neigh); } rcu_read_unlock(); return NULL; } static struct lowpan_peer *lookup_peer(struct l2cap_conn *conn) { struct lowpan_btle_dev *entry; struct lowpan_peer *peer = NULL; rcu_read_lock(); list_for_each_entry_rcu(entry, &bt_6lowpan_devices, list) { peer = __peer_lookup_conn(entry, conn); if (peer) break; } rcu_read_unlock(); return peer; } static struct lowpan_btle_dev *lookup_dev(struct l2cap_conn *conn) { struct lowpan_btle_dev *entry; struct lowpan_btle_dev *dev = NULL; rcu_read_lock(); list_for_each_entry_rcu(entry, &bt_6lowpan_devices, list) { if (conn->hcon->hdev == entry->hdev) { dev = entry; break; } } rcu_read_unlock(); return dev; } static int give_skb_to_upper(struct sk_buff *skb, struct net_device *dev) { struct sk_buff *skb_cp; skb_cp = skb_copy(skb, GFP_ATOMIC); if (!skb_cp) return NET_RX_DROP; return netif_rx(skb_cp); } static int iphc_decompress(struct sk_buff *skb, struct net_device *netdev, struct lowpan_peer *peer) { const u8 *saddr; saddr = peer->lladdr; return lowpan_header_decompress(skb, netdev, netdev->dev_addr, saddr); } static int recv_pkt(struct sk_buff *skb, struct net_device *dev, struct lowpan_peer *peer) { struct sk_buff *local_skb; int ret; if (!netif_running(dev)) goto drop; if (dev->type != ARPHRD_6LOWPAN || !skb->len) goto drop; skb_reset_network_header(skb); skb = skb_share_check(skb, GFP_ATOMIC); if (!skb) goto drop; /* check that it's our buffer */ if (lowpan_is_ipv6(*skb_network_header(skb))) { /* Pull off the 1-byte of 6lowpan header. */ skb_pull(skb, 1); /* Copy the packet so that the IPv6 header is * properly aligned. */ local_skb = skb_copy_expand(skb, NET_SKB_PAD - 1, skb_tailroom(skb), GFP_ATOMIC); if (!local_skb) goto drop; local_skb->protocol = htons(ETH_P_IPV6); local_skb->pkt_type = PACKET_HOST; local_skb->dev = dev; skb_set_transport_header(local_skb, sizeof(struct ipv6hdr)); if (give_skb_to_upper(local_skb, dev) != NET_RX_SUCCESS) { kfree_skb(local_skb); goto drop; } dev->stats.rx_bytes += skb->len; dev->stats.rx_packets++; consume_skb(local_skb); consume_skb(skb); } else if (lowpan_is_iphc(*skb_network_header(skb))) { local_skb = skb_clone(skb, GFP_ATOMIC); if (!local_skb) goto drop; local_skb->dev = dev; ret = iphc_decompress(local_skb, dev, peer); if (ret < 0) { BT_DBG("iphc_decompress failed: %d", ret); kfree_skb(local_skb); goto drop; } local_skb->protocol = htons(ETH_P_IPV6); local_skb->pkt_type = PACKET_HOST; if (give_skb_to_upper(local_skb, dev) != NET_RX_SUCCESS) { kfree_skb(local_skb); goto drop; } dev->stats.rx_bytes += skb->len; dev->stats.rx_packets++; consume_skb(local_skb); consume_skb(skb); } else { BT_DBG("unknown packet type"); goto drop; } return NET_RX_SUCCESS; drop: dev->stats.rx_dropped++; return NET_RX_DROP; } /* Packet from BT LE device */ static int chan_recv_cb(struct l2cap_chan *chan, struct sk_buff *skb) { struct lowpan_btle_dev *dev; struct lowpan_peer *peer; int err; peer = lookup_peer(chan->conn); if (!peer) return -ENOENT; dev = lookup_dev(chan->conn); if (!dev || !dev->netdev) return -ENOENT; err = recv_pkt(skb, dev->netdev, peer); if (err) { BT_DBG("recv pkt %d", err); err = -EAGAIN; } return err; } static int setup_header(struct sk_buff *skb, struct net_device *netdev, bdaddr_t *peer_addr, u8 *peer_addr_type) { struct in6_addr ipv6_daddr; struct ipv6hdr *hdr; struct lowpan_btle_dev *dev; struct lowpan_peer *peer; u8 *daddr; int err, status = 0; hdr = ipv6_hdr(skb); dev = lowpan_btle_dev(netdev); memcpy(&ipv6_daddr, &hdr->daddr, sizeof(ipv6_daddr)); if (ipv6_addr_is_multicast(&ipv6_daddr)) { lowpan_cb(skb)->chan = NULL; daddr = NULL; } else { BT_DBG("dest IP %pI6c", &ipv6_daddr); /* The packet might be sent to 6lowpan interface * because of routing (either via default route * or user set route) so get peer according to * the destination address. */ peer = peer_lookup_dst(dev, &ipv6_daddr, skb); if (!peer) { BT_DBG("no such peer"); return -ENOENT; } daddr = peer->lladdr; *peer_addr = peer->chan->dst; *peer_addr_type = peer->chan->dst_type; lowpan_cb(skb)->chan = peer->chan; status = 1; } lowpan_header_compress(skb, netdev, daddr, dev->netdev->dev_addr); err = dev_hard_header(skb, netdev, ETH_P_IPV6, NULL, NULL, 0); if (err < 0) return err; return status; } static int header_create(struct sk_buff *skb, struct net_device *netdev, unsigned short type, const void *_daddr, const void *_saddr, unsigned int len) { if (type != ETH_P_IPV6) return -EINVAL; return 0; } /* Packet to BT LE device */ static int send_pkt(struct l2cap_chan *chan, struct sk_buff *skb, struct net_device *netdev) { struct msghdr msg; struct kvec iv; int err; /* Remember the skb so that we can send EAGAIN to the caller if * we run out of credits. */ chan->data = skb; iv.iov_base = skb->data; iv.iov_len = skb->len; memset(&msg, 0, sizeof(msg)); iov_iter_kvec(&msg.msg_iter, ITER_SOURCE, &iv, 1, skb->len); err = l2cap_chan_send(chan, &msg, skb->len); if (err > 0) { netdev->stats.tx_bytes += err; netdev->stats.tx_packets++; return 0; } if (err < 0) netdev->stats.tx_errors++; return err; } static int send_mcast_pkt(struct sk_buff *skb, struct net_device *netdev) { struct sk_buff *local_skb; struct lowpan_btle_dev *entry; int err = 0; rcu_read_lock(); list_for_each_entry_rcu(entry, &bt_6lowpan_devices, list) { struct lowpan_peer *pentry; struct lowpan_btle_dev *dev; if (entry->netdev != netdev) continue; dev = lowpan_btle_dev(entry->netdev); list_for_each_entry_rcu(pentry, &dev->peers, list) { int ret; local_skb = skb_clone(skb, GFP_ATOMIC); BT_DBG("xmit %s to %pMR type %u IP %pI6c chan %p", netdev->name, &pentry->chan->dst, pentry->chan->dst_type, &pentry->peer_addr, pentry->chan); ret = send_pkt(pentry->chan, local_skb, netdev); if (ret < 0) err = ret; kfree_skb(local_skb); } } rcu_read_unlock(); return err; } static netdev_tx_t bt_xmit(struct sk_buff *skb, struct net_device *netdev) { int err = 0; bdaddr_t addr; u8 addr_type; /* We must take a copy of the skb before we modify/replace the ipv6 * header as the header could be used elsewhere */ skb = skb_unshare(skb, GFP_ATOMIC); if (!skb) return NET_XMIT_DROP; /* Return values from setup_header() * <0 - error, packet is dropped * 0 - this is a multicast packet * 1 - this is unicast packet */ err = setup_header(skb, netdev, &addr, &addr_type); if (err < 0) { kfree_skb(skb); return NET_XMIT_DROP; } if (err) { if (lowpan_cb(skb)->chan) { BT_DBG("xmit %s to %pMR type %u IP %pI6c chan %p", netdev->name, &addr, addr_type, &lowpan_cb(skb)->addr, lowpan_cb(skb)->chan); err = send_pkt(lowpan_cb(skb)->chan, skb, netdev); } else { err = -ENOENT; } } else { /* We need to send the packet to every device behind this * interface. */ err = send_mcast_pkt(skb, netdev); } dev_kfree_skb(skb); if (err) BT_DBG("ERROR: xmit failed (%d)", err); return err < 0 ? NET_XMIT_DROP : err; } static int bt_dev_init(struct net_device *dev) { netdev_lockdep_set_classes(dev); return 0; } static const struct net_device_ops netdev_ops = { .ndo_init = bt_dev_init, .ndo_start_xmit = bt_xmit, }; static const struct header_ops header_ops = { .create = header_create, }; static void netdev_setup(struct net_device *dev) { dev->hard_header_len = 0; dev->needed_tailroom = 0; dev->flags = IFF_RUNNING | IFF_MULTICAST; dev->watchdog_timeo = 0; dev->tx_queue_len = DEFAULT_TX_QUEUE_LEN; dev->netdev_ops = &netdev_ops; dev->header_ops = &header_ops; dev->needs_free_netdev = true; } static struct device_type bt_type = { .name = "bluetooth", }; static void ifup(struct net_device *netdev) { int err; rtnl_lock(); err = dev_open(netdev, NULL); if (err < 0) BT_INFO("iface %s cannot be opened (%d)", netdev->name, err); rtnl_unlock(); } static void ifdown(struct net_device *netdev) { rtnl_lock(); dev_close(netdev); rtnl_unlock(); } static void do_notify_peers(struct work_struct *work) { struct lowpan_btle_dev *dev = container_of(work, struct lowpan_btle_dev, notify_peers.work); netdev_notify_peers(dev->netdev); /* send neighbour adv at startup */ } static bool is_bt_6lowpan(struct hci_conn *hcon) { if (hcon->type != LE_LINK) return false; if (!enable_6lowpan) return false; return true; } static struct l2cap_chan *chan_create(void) { struct l2cap_chan *chan; chan = l2cap_chan_create(); if (!chan) return NULL; l2cap_chan_set_defaults(chan); chan->chan_type = L2CAP_CHAN_CONN_ORIENTED; chan->mode = L2CAP_MODE_LE_FLOWCTL; chan->imtu = 1280; return chan; } static struct l2cap_chan *add_peer_chan(struct l2cap_chan *chan, struct lowpan_btle_dev *dev, bool new_netdev) { struct lowpan_peer *peer; peer = kzalloc(sizeof(*peer), GFP_ATOMIC); if (!peer) return NULL; peer->chan = chan; baswap((void *)peer->lladdr, &chan->dst); lowpan_iphc_uncompress_eui48_lladdr(&peer->peer_addr, peer->lladdr); spin_lock(&devices_lock); INIT_LIST_HEAD(&peer->list); peer_add(dev, peer); spin_unlock(&devices_lock); /* Notifying peers about us needs to be done without locks held */ if (new_netdev) INIT_DELAYED_WORK(&dev->notify_peers, do_notify_peers); schedule_delayed_work(&dev->notify_peers, msecs_to_jiffies(100)); return peer->chan; } static int setup_netdev(struct l2cap_chan *chan, struct lowpan_btle_dev **dev) { struct net_device *netdev; bdaddr_t addr; int err; netdev = alloc_netdev(LOWPAN_PRIV_SIZE(sizeof(struct lowpan_btle_dev)), IFACE_NAME_TEMPLATE, NET_NAME_UNKNOWN, netdev_setup); if (!netdev) return -ENOMEM; netdev->addr_assign_type = NET_ADDR_PERM; baswap(&addr, &chan->src); __dev_addr_set(netdev, &addr, sizeof(addr)); netdev->netdev_ops = &netdev_ops; SET_NETDEV_DEV(netdev, &chan->conn->hcon->hdev->dev); SET_NETDEV_DEVTYPE(netdev, &bt_type); *dev = lowpan_btle_dev(netdev); (*dev)->netdev = netdev; (*dev)->hdev = chan->conn->hcon->hdev; INIT_LIST_HEAD(&(*dev)->peers); spin_lock(&devices_lock); INIT_LIST_HEAD(&(*dev)->list); list_add_rcu(&(*dev)->list, &bt_6lowpan_devices); spin_unlock(&devices_lock); err = lowpan_register_netdev(netdev, LOWPAN_LLTYPE_BTLE); if (err < 0) { BT_INFO("register_netdev failed %d", err); spin_lock(&devices_lock); list_del_rcu(&(*dev)->list); spin_unlock(&devices_lock); free_netdev(netdev); goto out; } BT_DBG("ifindex %d peer bdaddr %pMR type %d my addr %pMR type %d", netdev->ifindex, &chan->dst, chan->dst_type, &chan->src, chan->src_type); set_bit(__LINK_STATE_PRESENT, &netdev->state); return 0; out: return err; } static inline void chan_ready_cb(struct l2cap_chan *chan) { struct lowpan_btle_dev *dev; bool new_netdev = false; dev = lookup_dev(chan->conn); BT_DBG("chan %p conn %p dev %p", chan, chan->conn, dev); if (!dev) { if (setup_netdev(chan, &dev) < 0) { l2cap_chan_del(chan, -ENOENT); return; } new_netdev = true; } if (!try_module_get(THIS_MODULE)) return; add_peer_chan(chan, dev, new_netdev); ifup(dev->netdev); } static inline struct l2cap_chan *chan_new_conn_cb(struct l2cap_chan *pchan) { struct l2cap_chan *chan; chan = chan_create(); if (!chan) return NULL; chan->ops = pchan->ops; BT_DBG("chan %p pchan %p", chan, pchan); return chan; } static void delete_netdev(struct work_struct *work) { struct lowpan_btle_dev *entry = container_of(work, struct lowpan_btle_dev, delete_netdev); lowpan_unregister_netdev(entry->netdev); /* The entry pointer is deleted by the netdev destructor. */ } static void chan_close_cb(struct l2cap_chan *chan) { struct lowpan_btle_dev *entry; struct lowpan_btle_dev *dev = NULL; struct lowpan_peer *peer; int err = -ENOENT; bool last = false, remove = true; BT_DBG("chan %p conn %p", chan, chan->conn); if (chan->conn && chan->conn->hcon) { if (!is_bt_6lowpan(chan->conn->hcon)) return; /* If conn is set, then the netdev is also there and we should * not remove it. */ remove = false; } spin_lock(&devices_lock); list_for_each_entry_rcu(entry, &bt_6lowpan_devices, list) { dev = lowpan_btle_dev(entry->netdev); peer = __peer_lookup_chan(dev, chan); if (peer) { last = peer_del(dev, peer); err = 0; BT_DBG("dev %p removing %speer %p", dev, last ? "last " : "1 ", peer); BT_DBG("chan %p orig refcnt %u", chan, kref_read(&chan->kref)); l2cap_chan_put(chan); break; } } if (!err && last && dev && !atomic_read(&dev->peer_count)) { spin_unlock(&devices_lock); cancel_delayed_work_sync(&dev->notify_peers); ifdown(dev->netdev); if (remove) { INIT_WORK(&entry->delete_netdev, delete_netdev); schedule_work(&entry->delete_netdev); } } else { spin_unlock(&devices_lock); } } static void chan_state_change_cb(struct l2cap_chan *chan, int state, int err) { BT_DBG("chan %p conn %p state %s err %d", chan, chan->conn, state_to_string(state), err); } static struct sk_buff *chan_alloc_skb_cb(struct l2cap_chan *chan, unsigned long hdr_len, unsigned long len, int nb) { /* Note that we must allocate using GFP_ATOMIC here as * this function is called originally from netdev hard xmit * function in atomic context. */ return bt_skb_alloc(hdr_len + len, GFP_ATOMIC); } static void chan_suspend_cb(struct l2cap_chan *chan) { struct lowpan_btle_dev *dev; BT_DBG("chan %p suspend", chan); dev = lookup_dev(chan->conn); if (!dev || !dev->netdev) return; netif_stop_queue(dev->netdev); } static void chan_resume_cb(struct l2cap_chan *chan) { struct lowpan_btle_dev *dev; BT_DBG("chan %p resume", chan); dev = lookup_dev(chan->conn); if (!dev || !dev->netdev) return; netif_wake_queue(dev->netdev); } static long chan_get_sndtimeo_cb(struct l2cap_chan *chan) { return L2CAP_CONN_TIMEOUT; } static const struct l2cap_ops bt_6lowpan_chan_ops = { .name = "L2CAP 6LoWPAN channel", .new_connection = chan_new_conn_cb, .recv = chan_recv_cb, .close = chan_close_cb, .state_change = chan_state_change_cb, .ready = chan_ready_cb, .resume = chan_resume_cb, .suspend = chan_suspend_cb, .get_sndtimeo = chan_get_sndtimeo_cb, .alloc_skb = chan_alloc_skb_cb, .teardown = l2cap_chan_no_teardown, .defer = l2cap_chan_no_defer, .set_shutdown = l2cap_chan_no_set_shutdown, }; static int bt_6lowpan_connect(bdaddr_t *addr, u8 dst_type) { struct l2cap_chan *chan; int err; chan = chan_create(); if (!chan) return -EINVAL; chan->ops = &bt_6lowpan_chan_ops; err = l2cap_chan_connect(chan, cpu_to_le16(L2CAP_PSM_IPSP), 0, addr, dst_type); BT_DBG("chan %p err %d", chan, err); if (err < 0) l2cap_chan_put(chan); return err; } static int bt_6lowpan_disconnect(struct l2cap_conn *conn, u8 dst_type) { struct lowpan_peer *peer; BT_DBG("conn %p dst type %u", conn, dst_type); peer = lookup_peer(conn); if (!peer) return -ENOENT; BT_DBG("peer %p chan %p", peer, peer->chan); l2cap_chan_close(peer->chan, ENOENT); return 0; } static struct l2cap_chan *bt_6lowpan_listen(void) { bdaddr_t *addr = BDADDR_ANY; struct l2cap_chan *chan; int err; if (!enable_6lowpan) return NULL; chan = chan_create(); if (!chan) return NULL; chan->ops = &bt_6lowpan_chan_ops; chan->state = BT_LISTEN; chan->src_type = BDADDR_LE_PUBLIC; atomic_set(&chan->nesting, L2CAP_NESTING_PARENT); BT_DBG("chan %p src type %u", chan, chan->src_type); err = l2cap_add_psm(chan, addr, cpu_to_le16(L2CAP_PSM_IPSP)); if (err) { l2cap_chan_put(chan); BT_ERR("psm cannot be added err %d", err); return NULL; } return chan; } static int get_l2cap_conn(char *buf, bdaddr_t *addr, u8 *addr_type, struct l2cap_conn **conn) { struct hci_conn *hcon; struct hci_dev *hdev; int n; n = sscanf(buf, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx %hhu", &addr->b[5], &addr->b[4], &addr->b[3], &addr->b[2], &addr->b[1], &addr->b[0], addr_type); if (n < 7) return -EINVAL; /* The LE_PUBLIC address type is ignored because of BDADDR_ANY */ hdev = hci_get_route(addr, BDADDR_ANY, BDADDR_LE_PUBLIC); if (!hdev) return -ENOENT; hci_dev_lock(hdev); hcon = hci_conn_hash_lookup_le(hdev, addr, *addr_type); hci_dev_unlock(hdev); hci_dev_put(hdev); if (!hcon) return -ENOENT; *conn = (struct l2cap_conn *)hcon->l2cap_data; BT_DBG("conn %p dst %pMR type %u", *conn, &hcon->dst, hcon->dst_type); return 0; } static void disconnect_all_peers(void) { struct lowpan_btle_dev *entry; struct lowpan_peer *peer, *tmp_peer, *new_peer; struct list_head peers; INIT_LIST_HEAD(&peers); /* We make a separate list of peers as the close_cb() will * modify the device peers list so it is better not to mess * with the same list at the same time. */ rcu_read_lock(); list_for_each_entry_rcu(entry, &bt_6lowpan_devices, list) { list_for_each_entry_rcu(peer, &entry->peers, list) { new_peer = kmalloc(sizeof(*new_peer), GFP_ATOMIC); if (!new_peer) break; new_peer->chan = peer->chan; INIT_LIST_HEAD(&new_peer->list); list_add(&new_peer->list, &peers); } } rcu_read_unlock(); spin_lock(&devices_lock); list_for_each_entry_safe(peer, tmp_peer, &peers, list) { l2cap_chan_close(peer->chan, ENOENT); list_del_rcu(&peer->list); kfree_rcu(peer, rcu); } spin_unlock(&devices_lock); } struct set_enable { struct work_struct work; bool flag; }; static void do_enable_set(struct work_struct *work) { struct set_enable *set_enable = container_of(work, struct set_enable, work); if (!set_enable->flag || enable_6lowpan != set_enable->flag) /* Disconnect existing connections if 6lowpan is * disabled */ disconnect_all_peers(); enable_6lowpan = set_enable->flag; mutex_lock(&set_lock); if (listen_chan) { l2cap_chan_close(listen_chan, 0); l2cap_chan_put(listen_chan); } listen_chan = bt_6lowpan_listen(); mutex_unlock(&set_lock); kfree(set_enable); } static int lowpan_enable_set(void *data, u64 val) { struct set_enable *set_enable; set_enable = kzalloc(sizeof(*set_enable), GFP_KERNEL); if (!set_enable) return -ENOMEM; set_enable->flag = !!val; INIT_WORK(&set_enable->work, do_enable_set); schedule_work(&set_enable->work); return 0; } static int lowpan_enable_get(void *data, u64 *val) { *val = enable_6lowpan; return 0; } DEFINE_DEBUGFS_ATTRIBUTE(lowpan_enable_fops, lowpan_enable_get, lowpan_enable_set, "%llu\n"); static ssize_t lowpan_control_write(struct file *fp, const char __user *user_buffer, size_t count, loff_t *position) { char buf[32]; size_t buf_size = min(count, sizeof(buf) - 1); int ret; bdaddr_t addr; u8 addr_type; struct l2cap_conn *conn = NULL; if (copy_from_user(buf, user_buffer, buf_size)) return -EFAULT; buf[buf_size] = '\0'; if (memcmp(buf, "connect ", 8) == 0) { ret = get_l2cap_conn(&buf[8], &addr, &addr_type, &conn); if (ret == -EINVAL) return ret; mutex_lock(&set_lock); if (listen_chan) { l2cap_chan_close(listen_chan, 0); l2cap_chan_put(listen_chan); listen_chan = NULL; } mutex_unlock(&set_lock); if (conn) { struct lowpan_peer *peer; if (!is_bt_6lowpan(conn->hcon)) return -EINVAL; peer = lookup_peer(conn); if (peer) { BT_DBG("6LoWPAN connection already exists"); return -EALREADY; } BT_DBG("conn %p dst %pMR type %d user %u", conn, &conn->hcon->dst, conn->hcon->dst_type, addr_type); } ret = bt_6lowpan_connect(&addr, addr_type); if (ret < 0) return ret; return count; } if (memcmp(buf, "disconnect ", 11) == 0) { ret = get_l2cap_conn(&buf[11], &addr, &addr_type, &conn); if (ret < 0) return ret; ret = bt_6lowpan_disconnect(conn, addr_type); if (ret < 0) return ret; return count; } return count; } static int lowpan_control_show(struct seq_file *f, void *ptr) { struct lowpan_btle_dev *entry; struct lowpan_peer *peer; spin_lock(&devices_lock); list_for_each_entry(entry, &bt_6lowpan_devices, list) { list_for_each_entry(peer, &entry->peers, list) seq_printf(f, "%pMR (type %u)\n", &peer->chan->dst, peer->chan->dst_type); } spin_unlock(&devices_lock); return 0; } static int lowpan_control_open(struct inode *inode, struct file *file) { return single_open(file, lowpan_control_show, inode->i_private); } static const struct file_operations lowpan_control_fops = { .open = lowpan_control_open, .read = seq_read, .write = lowpan_control_write, .llseek = seq_lseek, .release = single_release, }; static void disconnect_devices(void) { struct lowpan_btle_dev *entry, *tmp, *new_dev; struct list_head devices; INIT_LIST_HEAD(&devices); /* We make a separate list of devices because the unregister_netdev() * will call device_event() which will also want to modify the same * devices list. */ rcu_read_lock(); list_for_each_entry_rcu(entry, &bt_6lowpan_devices, list) { new_dev = kmalloc(sizeof(*new_dev), GFP_ATOMIC); if (!new_dev) break; new_dev->netdev = entry->netdev; INIT_LIST_HEAD(&new_dev->list); list_add_rcu(&new_dev->list, &devices); } rcu_read_unlock(); list_for_each_entry_safe(entry, tmp, &devices, list) { ifdown(entry->netdev); BT_DBG("Unregistering netdev %s %p", entry->netdev->name, entry->netdev); lowpan_unregister_netdev(entry->netdev); kfree(entry); } } static int device_event(struct notifier_block *unused, unsigned long event, void *ptr) { struct net_device *netdev = netdev_notifier_info_to_dev(ptr); struct lowpan_btle_dev *entry; if (netdev->type != ARPHRD_6LOWPAN) return NOTIFY_DONE; switch (event) { case NETDEV_UNREGISTER: spin_lock(&devices_lock); list_for_each_entry(entry, &bt_6lowpan_devices, list) { if (entry->netdev == netdev) { BT_DBG("Unregistered netdev %s %p", netdev->name, netdev); list_del(&entry->list); break; } } spin_unlock(&devices_lock); break; } return NOTIFY_DONE; } static struct notifier_block bt_6lowpan_dev_notifier = { .notifier_call = device_event, }; static int __init bt_6lowpan_init(void) { lowpan_enable_debugfs = debugfs_create_file_unsafe("6lowpan_enable", 0644, bt_debugfs, NULL, &lowpan_enable_fops); lowpan_control_debugfs = debugfs_create_file("6lowpan_control", 0644, bt_debugfs, NULL, &lowpan_control_fops); return register_netdevice_notifier(&bt_6lowpan_dev_notifier); } static void __exit bt_6lowpan_exit(void) { debugfs_remove(lowpan_enable_debugfs); debugfs_remove(lowpan_control_debugfs); if (listen_chan) { l2cap_chan_close(listen_chan, 0); l2cap_chan_put(listen_chan); } disconnect_devices(); unregister_netdevice_notifier(&bt_6lowpan_dev_notifier); } module_init(bt_6lowpan_init); module_exit(bt_6lowpan_exit); MODULE_AUTHOR("Jukka Rissanen <[email protected]>"); MODULE_DESCRIPTION("Bluetooth 6LoWPAN"); MODULE_VERSION(VERSION); MODULE_LICENSE("GPL");
linux-master
net/bluetooth/6lowpan.c
/* BlueZ - Bluetooth protocol stack for Linux Copyright (C) 2014 Intel Corporation This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License version 2 as published by the Free Software Foundation; THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS SOFTWARE IS DISCLAIMED. */ #include <linux/debugfs.h> #include <linux/kstrtox.h> #include <net/bluetooth/bluetooth.h> #include <net/bluetooth/hci_core.h> #include "smp.h" #include "hci_request.h" #include "hci_debugfs.h" #define DEFINE_QUIRK_ATTRIBUTE(__name, __quirk) \ static ssize_t __name ## _read(struct file *file, \ char __user *user_buf, \ size_t count, loff_t *ppos) \ { \ struct hci_dev *hdev = file->private_data; \ char buf[3]; \ \ buf[0] = test_bit(__quirk, &hdev->quirks) ? 'Y' : 'N'; \ buf[1] = '\n'; \ buf[2] = '\0'; \ return simple_read_from_buffer(user_buf, count, ppos, buf, 2); \ } \ \ static ssize_t __name ## _write(struct file *file, \ const char __user *user_buf, \ size_t count, loff_t *ppos) \ { \ struct hci_dev *hdev = file->private_data; \ bool enable; \ int err; \ \ if (test_bit(HCI_UP, &hdev->flags)) \ return -EBUSY; \ \ err = kstrtobool_from_user(user_buf, count, &enable); \ if (err) \ return err; \ \ if (enable == test_bit(__quirk, &hdev->quirks)) \ return -EALREADY; \ \ change_bit(__quirk, &hdev->quirks); \ \ return count; \ } \ \ static const struct file_operations __name ## _fops = { \ .open = simple_open, \ .read = __name ## _read, \ .write = __name ## _write, \ .llseek = default_llseek, \ } \ #define DEFINE_INFO_ATTRIBUTE(__name, __field) \ static int __name ## _show(struct seq_file *f, void *ptr) \ { \ struct hci_dev *hdev = f->private; \ \ hci_dev_lock(hdev); \ seq_printf(f, "%s\n", hdev->__field ? : ""); \ hci_dev_unlock(hdev); \ \ return 0; \ } \ \ DEFINE_SHOW_ATTRIBUTE(__name) static int features_show(struct seq_file *f, void *ptr) { struct hci_dev *hdev = f->private; u8 p; hci_dev_lock(hdev); for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) seq_printf(f, "%2u: %8ph\n", p, hdev->features[p]); if (lmp_le_capable(hdev)) seq_printf(f, "LE: %8ph\n", hdev->le_features); hci_dev_unlock(hdev); return 0; } DEFINE_SHOW_ATTRIBUTE(features); static int device_id_show(struct seq_file *f, void *ptr) { struct hci_dev *hdev = f->private; hci_dev_lock(hdev); seq_printf(f, "%4.4x:%4.4x:%4.4x:%4.4x\n", hdev->devid_source, hdev->devid_vendor, hdev->devid_product, hdev->devid_version); hci_dev_unlock(hdev); return 0; } DEFINE_SHOW_ATTRIBUTE(device_id); static int device_list_show(struct seq_file *f, void *ptr) { struct hci_dev *hdev = f->private; struct hci_conn_params *p; struct bdaddr_list *b; hci_dev_lock(hdev); list_for_each_entry(b, &hdev->accept_list, list) seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type); list_for_each_entry(p, &hdev->le_conn_params, list) { seq_printf(f, "%pMR (type %u) %u\n", &p->addr, p->addr_type, p->auto_connect); } hci_dev_unlock(hdev); return 0; } DEFINE_SHOW_ATTRIBUTE(device_list); static int blacklist_show(struct seq_file *f, void *p) { struct hci_dev *hdev = f->private; struct bdaddr_list *b; hci_dev_lock(hdev); list_for_each_entry(b, &hdev->reject_list, list) seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type); hci_dev_unlock(hdev); return 0; } DEFINE_SHOW_ATTRIBUTE(blacklist); static int blocked_keys_show(struct seq_file *f, void *p) { struct hci_dev *hdev = f->private; struct blocked_key *key; rcu_read_lock(); list_for_each_entry_rcu(key, &hdev->blocked_keys, list) seq_printf(f, "%u %*phN\n", key->type, 16, key->val); rcu_read_unlock(); return 0; } DEFINE_SHOW_ATTRIBUTE(blocked_keys); static int uuids_show(struct seq_file *f, void *p) { struct hci_dev *hdev = f->private; struct bt_uuid *uuid; hci_dev_lock(hdev); list_for_each_entry(uuid, &hdev->uuids, list) { u8 i, val[16]; /* The Bluetooth UUID values are stored in big endian, * but with reversed byte order. So convert them into * the right order for the %pUb modifier. */ for (i = 0; i < 16; i++) val[i] = uuid->uuid[15 - i]; seq_printf(f, "%pUb\n", val); } hci_dev_unlock(hdev); return 0; } DEFINE_SHOW_ATTRIBUTE(uuids); static int remote_oob_show(struct seq_file *f, void *ptr) { struct hci_dev *hdev = f->private; struct oob_data *data; hci_dev_lock(hdev); list_for_each_entry(data, &hdev->remote_oob_data, list) { seq_printf(f, "%pMR (type %u) %u %*phN %*phN %*phN %*phN\n", &data->bdaddr, data->bdaddr_type, data->present, 16, data->hash192, 16, data->rand192, 16, data->hash256, 16, data->rand256); } hci_dev_unlock(hdev); return 0; } DEFINE_SHOW_ATTRIBUTE(remote_oob); static int conn_info_min_age_set(void *data, u64 val) { struct hci_dev *hdev = data; if (val == 0 || val > hdev->conn_info_max_age) return -EINVAL; hci_dev_lock(hdev); hdev->conn_info_min_age = val; hci_dev_unlock(hdev); return 0; } static int conn_info_min_age_get(void *data, u64 *val) { struct hci_dev *hdev = data; hci_dev_lock(hdev); *val = hdev->conn_info_min_age; hci_dev_unlock(hdev); return 0; } DEFINE_DEBUGFS_ATTRIBUTE(conn_info_min_age_fops, conn_info_min_age_get, conn_info_min_age_set, "%llu\n"); static int conn_info_max_age_set(void *data, u64 val) { struct hci_dev *hdev = data; if (val == 0 || val < hdev->conn_info_min_age) return -EINVAL; hci_dev_lock(hdev); hdev->conn_info_max_age = val; hci_dev_unlock(hdev); return 0; } static int conn_info_max_age_get(void *data, u64 *val) { struct hci_dev *hdev = data; hci_dev_lock(hdev); *val = hdev->conn_info_max_age; hci_dev_unlock(hdev); return 0; } DEFINE_DEBUGFS_ATTRIBUTE(conn_info_max_age_fops, conn_info_max_age_get, conn_info_max_age_set, "%llu\n"); static ssize_t use_debug_keys_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct hci_dev *hdev = file->private_data; char buf[3]; buf[0] = hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS) ? 'Y' : 'N'; buf[1] = '\n'; buf[2] = '\0'; return simple_read_from_buffer(user_buf, count, ppos, buf, 2); } static const struct file_operations use_debug_keys_fops = { .open = simple_open, .read = use_debug_keys_read, .llseek = default_llseek, }; static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct hci_dev *hdev = file->private_data; char buf[3]; buf[0] = hci_dev_test_flag(hdev, HCI_SC_ONLY) ? 'Y' : 'N'; buf[1] = '\n'; buf[2] = '\0'; return simple_read_from_buffer(user_buf, count, ppos, buf, 2); } static const struct file_operations sc_only_mode_fops = { .open = simple_open, .read = sc_only_mode_read, .llseek = default_llseek, }; DEFINE_INFO_ATTRIBUTE(hardware_info, hw_info); DEFINE_INFO_ATTRIBUTE(firmware_info, fw_info); void hci_debugfs_create_common(struct hci_dev *hdev) { debugfs_create_file("features", 0444, hdev->debugfs, hdev, &features_fops); debugfs_create_u16("manufacturer", 0444, hdev->debugfs, &hdev->manufacturer); debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver); debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev); debugfs_create_u8("hardware_error", 0444, hdev->debugfs, &hdev->hw_error_code); debugfs_create_file("device_id", 0444, hdev->debugfs, hdev, &device_id_fops); debugfs_create_file("device_list", 0444, hdev->debugfs, hdev, &device_list_fops); debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev, &blacklist_fops); debugfs_create_file("blocked_keys", 0444, hdev->debugfs, hdev, &blocked_keys_fops); debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops); debugfs_create_file("remote_oob", 0400, hdev->debugfs, hdev, &remote_oob_fops); debugfs_create_file("conn_info_min_age", 0644, hdev->debugfs, hdev, &conn_info_min_age_fops); debugfs_create_file("conn_info_max_age", 0644, hdev->debugfs, hdev, &conn_info_max_age_fops); if (lmp_ssp_capable(hdev) || lmp_le_capable(hdev)) debugfs_create_file("use_debug_keys", 0444, hdev->debugfs, hdev, &use_debug_keys_fops); if (lmp_sc_capable(hdev) || lmp_le_capable(hdev)) debugfs_create_file("sc_only_mode", 0444, hdev->debugfs, hdev, &sc_only_mode_fops); if (hdev->hw_info) debugfs_create_file("hardware_info", 0444, hdev->debugfs, hdev, &hardware_info_fops); if (hdev->fw_info) debugfs_create_file("firmware_info", 0444, hdev->debugfs, hdev, &firmware_info_fops); } static int inquiry_cache_show(struct seq_file *f, void *p) { struct hci_dev *hdev = f->private; struct discovery_state *cache = &hdev->discovery; struct inquiry_entry *e; hci_dev_lock(hdev); list_for_each_entry(e, &cache->all, all) { struct inquiry_data *data = &e->data; seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n", &data->bdaddr, data->pscan_rep_mode, data->pscan_period_mode, data->pscan_mode, data->dev_class[2], data->dev_class[1], data->dev_class[0], __le16_to_cpu(data->clock_offset), data->rssi, data->ssp_mode, e->timestamp); } hci_dev_unlock(hdev); return 0; } DEFINE_SHOW_ATTRIBUTE(inquiry_cache); static int link_keys_show(struct seq_file *f, void *ptr) { struct hci_dev *hdev = f->private; struct link_key *key; rcu_read_lock(); list_for_each_entry_rcu(key, &hdev->link_keys, list) seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type, HCI_LINK_KEY_SIZE, key->val, key->pin_len); rcu_read_unlock(); return 0; } DEFINE_SHOW_ATTRIBUTE(link_keys); static int dev_class_show(struct seq_file *f, void *ptr) { struct hci_dev *hdev = f->private; hci_dev_lock(hdev); seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]); hci_dev_unlock(hdev); return 0; } DEFINE_SHOW_ATTRIBUTE(dev_class); static int voice_setting_get(void *data, u64 *val) { struct hci_dev *hdev = data; hci_dev_lock(hdev); *val = hdev->voice_setting; hci_dev_unlock(hdev); return 0; } DEFINE_DEBUGFS_ATTRIBUTE(voice_setting_fops, voice_setting_get, NULL, "0x%4.4llx\n"); static ssize_t ssp_debug_mode_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct hci_dev *hdev = file->private_data; char buf[3]; buf[0] = hdev->ssp_debug_mode ? 'Y' : 'N'; buf[1] = '\n'; buf[2] = '\0'; return simple_read_from_buffer(user_buf, count, ppos, buf, 2); } static const struct file_operations ssp_debug_mode_fops = { .open = simple_open, .read = ssp_debug_mode_read, .llseek = default_llseek, }; static int auto_accept_delay_set(void *data, u64 val) { struct hci_dev *hdev = data; hci_dev_lock(hdev); hdev->auto_accept_delay = val; hci_dev_unlock(hdev); return 0; } static int min_encrypt_key_size_set(void *data, u64 val) { struct hci_dev *hdev = data; if (val < 1 || val > 16) return -EINVAL; hci_dev_lock(hdev); hdev->min_enc_key_size = val; hci_dev_unlock(hdev); return 0; } static int min_encrypt_key_size_get(void *data, u64 *val) { struct hci_dev *hdev = data; hci_dev_lock(hdev); *val = hdev->min_enc_key_size; hci_dev_unlock(hdev); return 0; } DEFINE_DEBUGFS_ATTRIBUTE(min_encrypt_key_size_fops, min_encrypt_key_size_get, min_encrypt_key_size_set, "%llu\n"); static int auto_accept_delay_get(void *data, u64 *val) { struct hci_dev *hdev = data; hci_dev_lock(hdev); *val = hdev->auto_accept_delay; hci_dev_unlock(hdev); return 0; } DEFINE_DEBUGFS_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get, auto_accept_delay_set, "%llu\n"); static ssize_t force_bredr_smp_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct hci_dev *hdev = file->private_data; char buf[3]; buf[0] = hci_dev_test_flag(hdev, HCI_FORCE_BREDR_SMP) ? 'Y' : 'N'; buf[1] = '\n'; buf[2] = '\0'; return simple_read_from_buffer(user_buf, count, ppos, buf, 2); } static ssize_t force_bredr_smp_write(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos) { struct hci_dev *hdev = file->private_data; bool enable; int err; err = kstrtobool_from_user(user_buf, count, &enable); if (err) return err; err = smp_force_bredr(hdev, enable); if (err) return err; return count; } static const struct file_operations force_bredr_smp_fops = { .open = simple_open, .read = force_bredr_smp_read, .write = force_bredr_smp_write, .llseek = default_llseek, }; static int idle_timeout_set(void *data, u64 val) { struct hci_dev *hdev = data; if (val != 0 && (val < 500 || val > 3600000)) return -EINVAL; hci_dev_lock(hdev); hdev->idle_timeout = val; hci_dev_unlock(hdev); return 0; } static int idle_timeout_get(void *data, u64 *val) { struct hci_dev *hdev = data; hci_dev_lock(hdev); *val = hdev->idle_timeout; hci_dev_unlock(hdev); return 0; } DEFINE_DEBUGFS_ATTRIBUTE(idle_timeout_fops, idle_timeout_get, idle_timeout_set, "%llu\n"); static int sniff_min_interval_set(void *data, u64 val) { struct hci_dev *hdev = data; if (val == 0 || val % 2 || val > hdev->sniff_max_interval) return -EINVAL; hci_dev_lock(hdev); hdev->sniff_min_interval = val; hci_dev_unlock(hdev); return 0; } static int sniff_min_interval_get(void *data, u64 *val) { struct hci_dev *hdev = data; hci_dev_lock(hdev); *val = hdev->sniff_min_interval; hci_dev_unlock(hdev); return 0; } DEFINE_DEBUGFS_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get, sniff_min_interval_set, "%llu\n"); static int sniff_max_interval_set(void *data, u64 val) { struct hci_dev *hdev = data; if (val == 0 || val % 2 || val < hdev->sniff_min_interval) return -EINVAL; hci_dev_lock(hdev); hdev->sniff_max_interval = val; hci_dev_unlock(hdev); return 0; } static int sniff_max_interval_get(void *data, u64 *val) { struct hci_dev *hdev = data; hci_dev_lock(hdev); *val = hdev->sniff_max_interval; hci_dev_unlock(hdev); return 0; } DEFINE_DEBUGFS_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get, sniff_max_interval_set, "%llu\n"); void hci_debugfs_create_bredr(struct hci_dev *hdev) { debugfs_create_file("inquiry_cache", 0444, hdev->debugfs, hdev, &inquiry_cache_fops); debugfs_create_file("link_keys", 0400, hdev->debugfs, hdev, &link_keys_fops); debugfs_create_file("dev_class", 0444, hdev->debugfs, hdev, &dev_class_fops); debugfs_create_file("voice_setting", 0444, hdev->debugfs, hdev, &voice_setting_fops); /* If the controller does not support BR/EDR Secure Connections * feature, then the BR/EDR SMP channel shall not be present. * * To test this with Bluetooth 4.0 controllers, create a debugfs * switch that allows forcing BR/EDR SMP support and accepting * cross-transport pairing on non-AES encrypted connections. */ if (!lmp_sc_capable(hdev)) debugfs_create_file("force_bredr_smp", 0644, hdev->debugfs, hdev, &force_bredr_smp_fops); if (lmp_ssp_capable(hdev)) { debugfs_create_file("ssp_debug_mode", 0444, hdev->debugfs, hdev, &ssp_debug_mode_fops); debugfs_create_file("min_encrypt_key_size", 0644, hdev->debugfs, hdev, &min_encrypt_key_size_fops); debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs, hdev, &auto_accept_delay_fops); } if (lmp_sniff_capable(hdev)) { debugfs_create_file("idle_timeout", 0644, hdev->debugfs, hdev, &idle_timeout_fops); debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs, hdev, &sniff_min_interval_fops); debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs, hdev, &sniff_max_interval_fops); } } static int identity_show(struct seq_file *f, void *p) { struct hci_dev *hdev = f->private; bdaddr_t addr; u8 addr_type; hci_dev_lock(hdev); hci_copy_identity_address(hdev, &addr, &addr_type); seq_printf(f, "%pMR (type %u) %*phN %pMR\n", &addr, addr_type, 16, hdev->irk, &hdev->rpa); hci_dev_unlock(hdev); return 0; } DEFINE_SHOW_ATTRIBUTE(identity); static int rpa_timeout_set(void *data, u64 val) { struct hci_dev *hdev = data; /* Require the RPA timeout to be at least 30 seconds and at most * 24 hours. */ if (val < 30 || val > (60 * 60 * 24)) return -EINVAL; hci_dev_lock(hdev); hdev->rpa_timeout = val; hci_dev_unlock(hdev); return 0; } static int rpa_timeout_get(void *data, u64 *val) { struct hci_dev *hdev = data; hci_dev_lock(hdev); *val = hdev->rpa_timeout; hci_dev_unlock(hdev); return 0; } DEFINE_DEBUGFS_ATTRIBUTE(rpa_timeout_fops, rpa_timeout_get, rpa_timeout_set, "%llu\n"); static int random_address_show(struct seq_file *f, void *p) { struct hci_dev *hdev = f->private; hci_dev_lock(hdev); seq_printf(f, "%pMR\n", &hdev->random_addr); hci_dev_unlock(hdev); return 0; } DEFINE_SHOW_ATTRIBUTE(random_address); static int static_address_show(struct seq_file *f, void *p) { struct hci_dev *hdev = f->private; hci_dev_lock(hdev); seq_printf(f, "%pMR\n", &hdev->static_addr); hci_dev_unlock(hdev); return 0; } DEFINE_SHOW_ATTRIBUTE(static_address); static ssize_t force_static_address_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct hci_dev *hdev = file->private_data; char buf[3]; buf[0] = hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ? 'Y' : 'N'; buf[1] = '\n'; buf[2] = '\0'; return simple_read_from_buffer(user_buf, count, ppos, buf, 2); } static ssize_t force_static_address_write(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos) { struct hci_dev *hdev = file->private_data; bool enable; int err; if (hdev_is_powered(hdev)) return -EBUSY; err = kstrtobool_from_user(user_buf, count, &enable); if (err) return err; if (enable == hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR)) return -EALREADY; hci_dev_change_flag(hdev, HCI_FORCE_STATIC_ADDR); return count; } static const struct file_operations force_static_address_fops = { .open = simple_open, .read = force_static_address_read, .write = force_static_address_write, .llseek = default_llseek, }; static int white_list_show(struct seq_file *f, void *ptr) { struct hci_dev *hdev = f->private; struct bdaddr_list *b; hci_dev_lock(hdev); list_for_each_entry(b, &hdev->le_accept_list, list) seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type); hci_dev_unlock(hdev); return 0; } DEFINE_SHOW_ATTRIBUTE(white_list); static int resolv_list_show(struct seq_file *f, void *ptr) { struct hci_dev *hdev = f->private; struct bdaddr_list *b; hci_dev_lock(hdev); list_for_each_entry(b, &hdev->le_resolv_list, list) seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type); hci_dev_unlock(hdev); return 0; } DEFINE_SHOW_ATTRIBUTE(resolv_list); static int identity_resolving_keys_show(struct seq_file *f, void *ptr) { struct hci_dev *hdev = f->private; struct smp_irk *irk; rcu_read_lock(); list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) { seq_printf(f, "%pMR (type %u) %*phN %pMR\n", &irk->bdaddr, irk->addr_type, 16, irk->val, &irk->rpa); } rcu_read_unlock(); return 0; } DEFINE_SHOW_ATTRIBUTE(identity_resolving_keys); static int long_term_keys_show(struct seq_file *f, void *ptr) { struct hci_dev *hdev = f->private; struct smp_ltk *ltk; rcu_read_lock(); list_for_each_entry_rcu(ltk, &hdev->long_term_keys, list) seq_printf(f, "%pMR (type %u) %u 0x%02x %u %.4x %.16llx %*phN\n", &ltk->bdaddr, ltk->bdaddr_type, ltk->authenticated, ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv), __le64_to_cpu(ltk->rand), 16, ltk->val); rcu_read_unlock(); return 0; } DEFINE_SHOW_ATTRIBUTE(long_term_keys); static int conn_min_interval_set(void *data, u64 val) { struct hci_dev *hdev = data; if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval) return -EINVAL; hci_dev_lock(hdev); hdev->le_conn_min_interval = val; hci_dev_unlock(hdev); return 0; } static int conn_min_interval_get(void *data, u64 *val) { struct hci_dev *hdev = data; hci_dev_lock(hdev); *val = hdev->le_conn_min_interval; hci_dev_unlock(hdev); return 0; } DEFINE_DEBUGFS_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get, conn_min_interval_set, "%llu\n"); static int conn_max_interval_set(void *data, u64 val) { struct hci_dev *hdev = data; if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval) return -EINVAL; hci_dev_lock(hdev); hdev->le_conn_max_interval = val; hci_dev_unlock(hdev); return 0; } static int conn_max_interval_get(void *data, u64 *val) { struct hci_dev *hdev = data; hci_dev_lock(hdev); *val = hdev->le_conn_max_interval; hci_dev_unlock(hdev); return 0; } DEFINE_DEBUGFS_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get, conn_max_interval_set, "%llu\n"); static int conn_latency_set(void *data, u64 val) { struct hci_dev *hdev = data; if (val > 0x01f3) return -EINVAL; hci_dev_lock(hdev); hdev->le_conn_latency = val; hci_dev_unlock(hdev); return 0; } static int conn_latency_get(void *data, u64 *val) { struct hci_dev *hdev = data; hci_dev_lock(hdev); *val = hdev->le_conn_latency; hci_dev_unlock(hdev); return 0; } DEFINE_DEBUGFS_ATTRIBUTE(conn_latency_fops, conn_latency_get, conn_latency_set, "%llu\n"); static int supervision_timeout_set(void *data, u64 val) { struct hci_dev *hdev = data; if (val < 0x000a || val > 0x0c80) return -EINVAL; hci_dev_lock(hdev); hdev->le_supv_timeout = val; hci_dev_unlock(hdev); return 0; } static int supervision_timeout_get(void *data, u64 *val) { struct hci_dev *hdev = data; hci_dev_lock(hdev); *val = hdev->le_supv_timeout; hci_dev_unlock(hdev); return 0; } DEFINE_DEBUGFS_ATTRIBUTE(supervision_timeout_fops, supervision_timeout_get, supervision_timeout_set, "%llu\n"); static int adv_channel_map_set(void *data, u64 val) { struct hci_dev *hdev = data; if (val < 0x01 || val > 0x07) return -EINVAL; hci_dev_lock(hdev); hdev->le_adv_channel_map = val; hci_dev_unlock(hdev); return 0; } static int adv_channel_map_get(void *data, u64 *val) { struct hci_dev *hdev = data; hci_dev_lock(hdev); *val = hdev->le_adv_channel_map; hci_dev_unlock(hdev); return 0; } DEFINE_DEBUGFS_ATTRIBUTE(adv_channel_map_fops, adv_channel_map_get, adv_channel_map_set, "%llu\n"); static int adv_min_interval_set(void *data, u64 val) { struct hci_dev *hdev = data; if (val < 0x0020 || val > 0x4000 || val > hdev->le_adv_max_interval) return -EINVAL; hci_dev_lock(hdev); hdev->le_adv_min_interval = val; hci_dev_unlock(hdev); return 0; } static int adv_min_interval_get(void *data, u64 *val) { struct hci_dev *hdev = data; hci_dev_lock(hdev); *val = hdev->le_adv_min_interval; hci_dev_unlock(hdev); return 0; } DEFINE_DEBUGFS_ATTRIBUTE(adv_min_interval_fops, adv_min_interval_get, adv_min_interval_set, "%llu\n"); static int adv_max_interval_set(void *data, u64 val) { struct hci_dev *hdev = data; if (val < 0x0020 || val > 0x4000 || val < hdev->le_adv_min_interval) return -EINVAL; hci_dev_lock(hdev); hdev->le_adv_max_interval = val; hci_dev_unlock(hdev); return 0; } static int adv_max_interval_get(void *data, u64 *val) { struct hci_dev *hdev = data; hci_dev_lock(hdev); *val = hdev->le_adv_max_interval; hci_dev_unlock(hdev); return 0; } DEFINE_DEBUGFS_ATTRIBUTE(adv_max_interval_fops, adv_max_interval_get, adv_max_interval_set, "%llu\n"); static int min_key_size_set(void *data, u64 val) { struct hci_dev *hdev = data; if (val > hdev->le_max_key_size || val < SMP_MIN_ENC_KEY_SIZE) return -EINVAL; hci_dev_lock(hdev); hdev->le_min_key_size = val; hci_dev_unlock(hdev); return 0; } static int min_key_size_get(void *data, u64 *val) { struct hci_dev *hdev = data; hci_dev_lock(hdev); *val = hdev->le_min_key_size; hci_dev_unlock(hdev); return 0; } DEFINE_DEBUGFS_ATTRIBUTE(min_key_size_fops, min_key_size_get, min_key_size_set, "%llu\n"); static int max_key_size_set(void *data, u64 val) { struct hci_dev *hdev = data; if (val > SMP_MAX_ENC_KEY_SIZE || val < hdev->le_min_key_size) return -EINVAL; hci_dev_lock(hdev); hdev->le_max_key_size = val; hci_dev_unlock(hdev); return 0; } static int max_key_size_get(void *data, u64 *val) { struct hci_dev *hdev = data; hci_dev_lock(hdev); *val = hdev->le_max_key_size; hci_dev_unlock(hdev); return 0; } DEFINE_DEBUGFS_ATTRIBUTE(max_key_size_fops, max_key_size_get, max_key_size_set, "%llu\n"); static int auth_payload_timeout_set(void *data, u64 val) { struct hci_dev *hdev = data; if (val < 0x0001 || val > 0xffff) return -EINVAL; hci_dev_lock(hdev); hdev->auth_payload_timeout = val; hci_dev_unlock(hdev); return 0; } static int auth_payload_timeout_get(void *data, u64 *val) { struct hci_dev *hdev = data; hci_dev_lock(hdev); *val = hdev->auth_payload_timeout; hci_dev_unlock(hdev); return 0; } DEFINE_DEBUGFS_ATTRIBUTE(auth_payload_timeout_fops, auth_payload_timeout_get, auth_payload_timeout_set, "%llu\n"); static ssize_t force_no_mitm_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct hci_dev *hdev = file->private_data; char buf[3]; buf[0] = hci_dev_test_flag(hdev, HCI_FORCE_NO_MITM) ? 'Y' : 'N'; buf[1] = '\n'; buf[2] = '\0'; return simple_read_from_buffer(user_buf, count, ppos, buf, 2); } static ssize_t force_no_mitm_write(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos) { struct hci_dev *hdev = file->private_data; char buf[32]; size_t buf_size = min(count, (sizeof(buf) - 1)); bool enable; if (copy_from_user(buf, user_buf, buf_size)) return -EFAULT; buf[buf_size] = '\0'; if (kstrtobool(buf, &enable)) return -EINVAL; if (enable == hci_dev_test_flag(hdev, HCI_FORCE_NO_MITM)) return -EALREADY; hci_dev_change_flag(hdev, HCI_FORCE_NO_MITM); return count; } static const struct file_operations force_no_mitm_fops = { .open = simple_open, .read = force_no_mitm_read, .write = force_no_mitm_write, .llseek = default_llseek, }; DEFINE_QUIRK_ATTRIBUTE(quirk_strict_duplicate_filter, HCI_QUIRK_STRICT_DUPLICATE_FILTER); DEFINE_QUIRK_ATTRIBUTE(quirk_simultaneous_discovery, HCI_QUIRK_SIMULTANEOUS_DISCOVERY); void hci_debugfs_create_le(struct hci_dev *hdev) { debugfs_create_file("identity", 0400, hdev->debugfs, hdev, &identity_fops); debugfs_create_file("rpa_timeout", 0644, hdev->debugfs, hdev, &rpa_timeout_fops); debugfs_create_file("random_address", 0444, hdev->debugfs, hdev, &random_address_fops); debugfs_create_file("static_address", 0444, hdev->debugfs, hdev, &static_address_fops); /* For controllers with a public address, provide a debug * option to force the usage of the configured static * address. By default the public address is used. */ if (bacmp(&hdev->bdaddr, BDADDR_ANY)) debugfs_create_file("force_static_address", 0644, hdev->debugfs, hdev, &force_static_address_fops); debugfs_create_u8("white_list_size", 0444, hdev->debugfs, &hdev->le_accept_list_size); debugfs_create_file("white_list", 0444, hdev->debugfs, hdev, &white_list_fops); debugfs_create_u8("resolv_list_size", 0444, hdev->debugfs, &hdev->le_resolv_list_size); debugfs_create_file("resolv_list", 0444, hdev->debugfs, hdev, &resolv_list_fops); debugfs_create_file("identity_resolving_keys", 0400, hdev->debugfs, hdev, &identity_resolving_keys_fops); debugfs_create_file("long_term_keys", 0400, hdev->debugfs, hdev, &long_term_keys_fops); debugfs_create_file("conn_min_interval", 0644, hdev->debugfs, hdev, &conn_min_interval_fops); debugfs_create_file("conn_max_interval", 0644, hdev->debugfs, hdev, &conn_max_interval_fops); debugfs_create_file("conn_latency", 0644, hdev->debugfs, hdev, &conn_latency_fops); debugfs_create_file("supervision_timeout", 0644, hdev->debugfs, hdev, &supervision_timeout_fops); debugfs_create_file("adv_channel_map", 0644, hdev->debugfs, hdev, &adv_channel_map_fops); debugfs_create_file("adv_min_interval", 0644, hdev->debugfs, hdev, &adv_min_interval_fops); debugfs_create_file("adv_max_interval", 0644, hdev->debugfs, hdev, &adv_max_interval_fops); debugfs_create_u16("discov_interleaved_timeout", 0644, hdev->debugfs, &hdev->discov_interleaved_timeout); debugfs_create_file("min_key_size", 0644, hdev->debugfs, hdev, &min_key_size_fops); debugfs_create_file("max_key_size", 0644, hdev->debugfs, hdev, &max_key_size_fops); debugfs_create_file("auth_payload_timeout", 0644, hdev->debugfs, hdev, &auth_payload_timeout_fops); debugfs_create_file("force_no_mitm", 0644, hdev->debugfs, hdev, &force_no_mitm_fops); debugfs_create_file("quirk_strict_duplicate_filter", 0644, hdev->debugfs, hdev, &quirk_strict_duplicate_filter_fops); debugfs_create_file("quirk_simultaneous_discovery", 0644, hdev->debugfs, hdev, &quirk_simultaneous_discovery_fops); } void hci_debugfs_create_conn(struct hci_conn *conn) { struct hci_dev *hdev = conn->hdev; char name[6]; if (IS_ERR_OR_NULL(hdev->debugfs) || conn->debugfs) return; snprintf(name, sizeof(name), "%u", conn->handle); conn->debugfs = debugfs_create_dir(name, hdev->debugfs); } static ssize_t dut_mode_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct hci_dev *hdev = file->private_data; char buf[3]; buf[0] = hci_dev_test_flag(hdev, HCI_DUT_MODE) ? 'Y' : 'N'; buf[1] = '\n'; buf[2] = '\0'; return simple_read_from_buffer(user_buf, count, ppos, buf, 2); } static ssize_t dut_mode_write(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos) { struct hci_dev *hdev = file->private_data; struct sk_buff *skb; bool enable; int err; if (!test_bit(HCI_UP, &hdev->flags)) return -ENETDOWN; err = kstrtobool_from_user(user_buf, count, &enable); if (err) return err; if (enable == hci_dev_test_flag(hdev, HCI_DUT_MODE)) return -EALREADY; hci_req_sync_lock(hdev); if (enable) skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL, HCI_CMD_TIMEOUT); else skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL, HCI_CMD_TIMEOUT); hci_req_sync_unlock(hdev); if (IS_ERR(skb)) return PTR_ERR(skb); kfree_skb(skb); hci_dev_change_flag(hdev, HCI_DUT_MODE); return count; } static const struct file_operations dut_mode_fops = { .open = simple_open, .read = dut_mode_read, .write = dut_mode_write, .llseek = default_llseek, }; static ssize_t vendor_diag_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct hci_dev *hdev = file->private_data; char buf[3]; buf[0] = hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) ? 'Y' : 'N'; buf[1] = '\n'; buf[2] = '\0'; return simple_read_from_buffer(user_buf, count, ppos, buf, 2); } static ssize_t vendor_diag_write(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos) { struct hci_dev *hdev = file->private_data; bool enable; int err; err = kstrtobool_from_user(user_buf, count, &enable); if (err) return err; /* When the diagnostic flags are not persistent and the transport * is not active or in user channel operation, then there is no need * for the vendor callback. Instead just store the desired value and * the setting will be programmed when the controller gets powered on. */ if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) && (!test_bit(HCI_RUNNING, &hdev->flags) || hci_dev_test_flag(hdev, HCI_USER_CHANNEL))) goto done; hci_req_sync_lock(hdev); err = hdev->set_diag(hdev, enable); hci_req_sync_unlock(hdev); if (err < 0) return err; done: if (enable) hci_dev_set_flag(hdev, HCI_VENDOR_DIAG); else hci_dev_clear_flag(hdev, HCI_VENDOR_DIAG); return count; } static const struct file_operations vendor_diag_fops = { .open = simple_open, .read = vendor_diag_read, .write = vendor_diag_write, .llseek = default_llseek, }; void hci_debugfs_create_basic(struct hci_dev *hdev) { debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev, &dut_mode_fops); if (hdev->set_diag) debugfs_create_file("vendor_diag", 0644, hdev->debugfs, hdev, &vendor_diag_fops); }
linux-master
net/bluetooth/hci_debugfs.c
/* BlueZ - Bluetooth protocol stack for Linux Copyright (C) 2000-2001 Qualcomm Incorporated Written 2000,2001 by Maxim Krasnyansky <[email protected]> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License version 2 as published by the Free Software Foundation; THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS SOFTWARE IS DISCLAIMED. */ /* Bluetooth SCO sockets. */ #include <linux/module.h> #include <linux/debugfs.h> #include <linux/seq_file.h> #include <linux/sched/signal.h> #include <net/bluetooth/bluetooth.h> #include <net/bluetooth/hci_core.h> #include <net/bluetooth/sco.h> static bool disable_esco; static const struct proto_ops sco_sock_ops; static struct bt_sock_list sco_sk_list = { .lock = __RW_LOCK_UNLOCKED(sco_sk_list.lock) }; /* ---- SCO connections ---- */ struct sco_conn { struct hci_conn *hcon; spinlock_t lock; struct sock *sk; struct delayed_work timeout_work; unsigned int mtu; }; #define sco_conn_lock(c) spin_lock(&c->lock) #define sco_conn_unlock(c) spin_unlock(&c->lock) static void sco_sock_close(struct sock *sk); static void sco_sock_kill(struct sock *sk); /* ----- SCO socket info ----- */ #define sco_pi(sk) ((struct sco_pinfo *) sk) struct sco_pinfo { struct bt_sock bt; bdaddr_t src; bdaddr_t dst; __u32 flags; __u16 setting; struct bt_codec codec; struct sco_conn *conn; }; /* ---- SCO timers ---- */ #define SCO_CONN_TIMEOUT (HZ * 40) #define SCO_DISCONN_TIMEOUT (HZ * 2) static void sco_sock_timeout(struct work_struct *work) { struct sco_conn *conn = container_of(work, struct sco_conn, timeout_work.work); struct sock *sk; sco_conn_lock(conn); sk = conn->sk; if (sk) sock_hold(sk); sco_conn_unlock(conn); if (!sk) return; BT_DBG("sock %p state %d", sk, sk->sk_state); lock_sock(sk); sk->sk_err = ETIMEDOUT; sk->sk_state_change(sk); release_sock(sk); sock_put(sk); } static void sco_sock_set_timer(struct sock *sk, long timeout) { if (!sco_pi(sk)->conn) return; BT_DBG("sock %p state %d timeout %ld", sk, sk->sk_state, timeout); cancel_delayed_work(&sco_pi(sk)->conn->timeout_work); schedule_delayed_work(&sco_pi(sk)->conn->timeout_work, timeout); } static void sco_sock_clear_timer(struct sock *sk) { if (!sco_pi(sk)->conn) return; BT_DBG("sock %p state %d", sk, sk->sk_state); cancel_delayed_work(&sco_pi(sk)->conn->timeout_work); } /* ---- SCO connections ---- */ static struct sco_conn *sco_conn_add(struct hci_conn *hcon) { struct hci_dev *hdev = hcon->hdev; struct sco_conn *conn = hcon->sco_data; if (conn) { if (!conn->hcon) conn->hcon = hcon; return conn; } conn = kzalloc(sizeof(struct sco_conn), GFP_KERNEL); if (!conn) return NULL; spin_lock_init(&conn->lock); INIT_DELAYED_WORK(&conn->timeout_work, sco_sock_timeout); hcon->sco_data = conn; conn->hcon = hcon; if (hdev->sco_mtu > 0) conn->mtu = hdev->sco_mtu; else conn->mtu = 60; BT_DBG("hcon %p conn %p", hcon, conn); return conn; } /* Delete channel. * Must be called on the locked socket. */ static void sco_chan_del(struct sock *sk, int err) { struct sco_conn *conn; conn = sco_pi(sk)->conn; BT_DBG("sk %p, conn %p, err %d", sk, conn, err); if (conn) { sco_conn_lock(conn); conn->sk = NULL; sco_pi(sk)->conn = NULL; sco_conn_unlock(conn); if (conn->hcon) hci_conn_drop(conn->hcon); } sk->sk_state = BT_CLOSED; sk->sk_err = err; sk->sk_state_change(sk); sock_set_flag(sk, SOCK_ZAPPED); } static void sco_conn_del(struct hci_conn *hcon, int err) { struct sco_conn *conn = hcon->sco_data; struct sock *sk; if (!conn) return; BT_DBG("hcon %p conn %p, err %d", hcon, conn, err); /* Kill socket */ sco_conn_lock(conn); sk = conn->sk; if (sk) sock_hold(sk); sco_conn_unlock(conn); if (sk) { lock_sock(sk); sco_sock_clear_timer(sk); sco_chan_del(sk, err); release_sock(sk); sock_put(sk); } /* Ensure no more work items will run before freeing conn. */ cancel_delayed_work_sync(&conn->timeout_work); hcon->sco_data = NULL; kfree(conn); } static void __sco_chan_add(struct sco_conn *conn, struct sock *sk, struct sock *parent) { BT_DBG("conn %p", conn); sco_pi(sk)->conn = conn; conn->sk = sk; if (parent) bt_accept_enqueue(parent, sk, true); } static int sco_chan_add(struct sco_conn *conn, struct sock *sk, struct sock *parent) { int err = 0; sco_conn_lock(conn); if (conn->sk) err = -EBUSY; else __sco_chan_add(conn, sk, parent); sco_conn_unlock(conn); return err; } static int sco_connect(struct sock *sk) { struct sco_conn *conn; struct hci_conn *hcon; struct hci_dev *hdev; int err, type; BT_DBG("%pMR -> %pMR", &sco_pi(sk)->src, &sco_pi(sk)->dst); hdev = hci_get_route(&sco_pi(sk)->dst, &sco_pi(sk)->src, BDADDR_BREDR); if (!hdev) return -EHOSTUNREACH; hci_dev_lock(hdev); if (lmp_esco_capable(hdev) && !disable_esco) type = ESCO_LINK; else type = SCO_LINK; if (sco_pi(sk)->setting == BT_VOICE_TRANSPARENT && (!lmp_transp_capable(hdev) || !lmp_esco_capable(hdev))) { err = -EOPNOTSUPP; goto unlock; } hcon = hci_connect_sco(hdev, type, &sco_pi(sk)->dst, sco_pi(sk)->setting, &sco_pi(sk)->codec); if (IS_ERR(hcon)) { err = PTR_ERR(hcon); goto unlock; } conn = sco_conn_add(hcon); if (!conn) { hci_conn_drop(hcon); err = -ENOMEM; goto unlock; } lock_sock(sk); err = sco_chan_add(conn, sk, NULL); if (err) { release_sock(sk); goto unlock; } /* Update source addr of the socket */ bacpy(&sco_pi(sk)->src, &hcon->src); if (hcon->state == BT_CONNECTED) { sco_sock_clear_timer(sk); sk->sk_state = BT_CONNECTED; } else { sk->sk_state = BT_CONNECT; sco_sock_set_timer(sk, sk->sk_sndtimeo); } release_sock(sk); unlock: hci_dev_unlock(hdev); hci_dev_put(hdev); return err; } static int sco_send_frame(struct sock *sk, struct sk_buff *skb) { struct sco_conn *conn = sco_pi(sk)->conn; int len = skb->len; /* Check outgoing MTU */ if (len > conn->mtu) return -EINVAL; BT_DBG("sk %p len %d", sk, len); hci_send_sco(conn->hcon, skb); return len; } static void sco_recv_frame(struct sco_conn *conn, struct sk_buff *skb) { struct sock *sk; sco_conn_lock(conn); sk = conn->sk; sco_conn_unlock(conn); if (!sk) goto drop; BT_DBG("sk %p len %u", sk, skb->len); if (sk->sk_state != BT_CONNECTED) goto drop; if (!sock_queue_rcv_skb(sk, skb)) return; drop: kfree_skb(skb); } /* -------- Socket interface ---------- */ static struct sock *__sco_get_sock_listen_by_addr(bdaddr_t *ba) { struct sock *sk; sk_for_each(sk, &sco_sk_list.head) { if (sk->sk_state != BT_LISTEN) continue; if (!bacmp(&sco_pi(sk)->src, ba)) return sk; } return NULL; } /* Find socket listening on source bdaddr. * Returns closest match. */ static struct sock *sco_get_sock_listen(bdaddr_t *src) { struct sock *sk = NULL, *sk1 = NULL; read_lock(&sco_sk_list.lock); sk_for_each(sk, &sco_sk_list.head) { if (sk->sk_state != BT_LISTEN) continue; /* Exact match. */ if (!bacmp(&sco_pi(sk)->src, src)) break; /* Closest match */ if (!bacmp(&sco_pi(sk)->src, BDADDR_ANY)) sk1 = sk; } read_unlock(&sco_sk_list.lock); return sk ? sk : sk1; } static void sco_sock_destruct(struct sock *sk) { BT_DBG("sk %p", sk); skb_queue_purge(&sk->sk_receive_queue); skb_queue_purge(&sk->sk_write_queue); } static void sco_sock_cleanup_listen(struct sock *parent) { struct sock *sk; BT_DBG("parent %p", parent); /* Close not yet accepted channels */ while ((sk = bt_accept_dequeue(parent, NULL))) { sco_sock_close(sk); sco_sock_kill(sk); } parent->sk_state = BT_CLOSED; sock_set_flag(parent, SOCK_ZAPPED); } /* Kill socket (only if zapped and orphan) * Must be called on unlocked socket. */ static void sco_sock_kill(struct sock *sk) { if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket) return; BT_DBG("sk %p state %d", sk, sk->sk_state); /* Kill poor orphan */ bt_sock_unlink(&sco_sk_list, sk); sock_set_flag(sk, SOCK_DEAD); sock_put(sk); } static void __sco_sock_close(struct sock *sk) { BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket); switch (sk->sk_state) { case BT_LISTEN: sco_sock_cleanup_listen(sk); break; case BT_CONNECTED: case BT_CONFIG: if (sco_pi(sk)->conn->hcon) { sk->sk_state = BT_DISCONN; sco_sock_set_timer(sk, SCO_DISCONN_TIMEOUT); sco_conn_lock(sco_pi(sk)->conn); hci_conn_drop(sco_pi(sk)->conn->hcon); sco_pi(sk)->conn->hcon = NULL; sco_conn_unlock(sco_pi(sk)->conn); } else sco_chan_del(sk, ECONNRESET); break; case BT_CONNECT2: case BT_CONNECT: case BT_DISCONN: sco_chan_del(sk, ECONNRESET); break; default: sock_set_flag(sk, SOCK_ZAPPED); break; } } /* Must be called on unlocked socket. */ static void sco_sock_close(struct sock *sk) { lock_sock(sk); sco_sock_clear_timer(sk); __sco_sock_close(sk); release_sock(sk); } static void sco_sock_init(struct sock *sk, struct sock *parent) { BT_DBG("sk %p", sk); if (parent) { sk->sk_type = parent->sk_type; bt_sk(sk)->flags = bt_sk(parent)->flags; security_sk_clone(parent, sk); } } static struct proto sco_proto = { .name = "SCO", .owner = THIS_MODULE, .obj_size = sizeof(struct sco_pinfo) }; static struct sock *sco_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio, int kern) { struct sock *sk; sk = bt_sock_alloc(net, sock, &sco_proto, proto, prio, kern); if (!sk) return NULL; sk->sk_destruct = sco_sock_destruct; sk->sk_sndtimeo = SCO_CONN_TIMEOUT; sco_pi(sk)->setting = BT_VOICE_CVSD_16BIT; sco_pi(sk)->codec.id = BT_CODEC_CVSD; sco_pi(sk)->codec.cid = 0xffff; sco_pi(sk)->codec.vid = 0xffff; sco_pi(sk)->codec.data_path = 0x00; bt_sock_link(&sco_sk_list, sk); return sk; } static int sco_sock_create(struct net *net, struct socket *sock, int protocol, int kern) { struct sock *sk; BT_DBG("sock %p", sock); sock->state = SS_UNCONNECTED; if (sock->type != SOCK_SEQPACKET) return -ESOCKTNOSUPPORT; sock->ops = &sco_sock_ops; sk = sco_sock_alloc(net, sock, protocol, GFP_ATOMIC, kern); if (!sk) return -ENOMEM; sco_sock_init(sk, NULL); return 0; } static int sco_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_len) { struct sockaddr_sco *sa = (struct sockaddr_sco *) addr; struct sock *sk = sock->sk; int err = 0; if (!addr || addr_len < sizeof(struct sockaddr_sco) || addr->sa_family != AF_BLUETOOTH) return -EINVAL; BT_DBG("sk %p %pMR", sk, &sa->sco_bdaddr); lock_sock(sk); if (sk->sk_state != BT_OPEN) { err = -EBADFD; goto done; } if (sk->sk_type != SOCK_SEQPACKET) { err = -EINVAL; goto done; } bacpy(&sco_pi(sk)->src, &sa->sco_bdaddr); sk->sk_state = BT_BOUND; done: release_sock(sk); return err; } static int sco_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags) { struct sockaddr_sco *sa = (struct sockaddr_sco *) addr; struct sock *sk = sock->sk; int err; BT_DBG("sk %p", sk); if (alen < sizeof(struct sockaddr_sco) || addr->sa_family != AF_BLUETOOTH) return -EINVAL; if (sk->sk_state != BT_OPEN && sk->sk_state != BT_BOUND) return -EBADFD; if (sk->sk_type != SOCK_SEQPACKET) err = -EINVAL; lock_sock(sk); /* Set destination address and psm */ bacpy(&sco_pi(sk)->dst, &sa->sco_bdaddr); release_sock(sk); err = sco_connect(sk); if (err) return err; lock_sock(sk); err = bt_sock_wait_state(sk, BT_CONNECTED, sock_sndtimeo(sk, flags & O_NONBLOCK)); release_sock(sk); return err; } static int sco_sock_listen(struct socket *sock, int backlog) { struct sock *sk = sock->sk; bdaddr_t *src = &sco_pi(sk)->src; int err = 0; BT_DBG("sk %p backlog %d", sk, backlog); lock_sock(sk); if (sk->sk_state != BT_BOUND) { err = -EBADFD; goto done; } if (sk->sk_type != SOCK_SEQPACKET) { err = -EINVAL; goto done; } write_lock(&sco_sk_list.lock); if (__sco_get_sock_listen_by_addr(src)) { err = -EADDRINUSE; goto unlock; } sk->sk_max_ack_backlog = backlog; sk->sk_ack_backlog = 0; sk->sk_state = BT_LISTEN; unlock: write_unlock(&sco_sk_list.lock); done: release_sock(sk); return err; } static int sco_sock_accept(struct socket *sock, struct socket *newsock, int flags, bool kern) { DEFINE_WAIT_FUNC(wait, woken_wake_function); struct sock *sk = sock->sk, *ch; long timeo; int err = 0; lock_sock(sk); timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK); BT_DBG("sk %p timeo %ld", sk, timeo); /* Wait for an incoming connection. (wake-one). */ add_wait_queue_exclusive(sk_sleep(sk), &wait); while (1) { if (sk->sk_state != BT_LISTEN) { err = -EBADFD; break; } ch = bt_accept_dequeue(sk, newsock); if (ch) break; if (!timeo) { err = -EAGAIN; break; } if (signal_pending(current)) { err = sock_intr_errno(timeo); break; } release_sock(sk); timeo = wait_woken(&wait, TASK_INTERRUPTIBLE, timeo); lock_sock(sk); } remove_wait_queue(sk_sleep(sk), &wait); if (err) goto done; newsock->state = SS_CONNECTED; BT_DBG("new socket %p", ch); done: release_sock(sk); return err; } static int sco_sock_getname(struct socket *sock, struct sockaddr *addr, int peer) { struct sockaddr_sco *sa = (struct sockaddr_sco *) addr; struct sock *sk = sock->sk; BT_DBG("sock %p, sk %p", sock, sk); addr->sa_family = AF_BLUETOOTH; if (peer) bacpy(&sa->sco_bdaddr, &sco_pi(sk)->dst); else bacpy(&sa->sco_bdaddr, &sco_pi(sk)->src); return sizeof(struct sockaddr_sco); } static int sco_sock_sendmsg(struct socket *sock, struct msghdr *msg, size_t len) { struct sock *sk = sock->sk; struct sk_buff *skb; int err; BT_DBG("sock %p, sk %p", sock, sk); err = sock_error(sk); if (err) return err; if (msg->msg_flags & MSG_OOB) return -EOPNOTSUPP; skb = bt_skb_sendmsg(sk, msg, len, len, 0, 0); if (IS_ERR(skb)) return PTR_ERR(skb); lock_sock(sk); if (sk->sk_state == BT_CONNECTED) err = sco_send_frame(sk, skb); else err = -ENOTCONN; release_sock(sk); if (err < 0) kfree_skb(skb); return err; } static void sco_conn_defer_accept(struct hci_conn *conn, u16 setting) { struct hci_dev *hdev = conn->hdev; BT_DBG("conn %p", conn); conn->state = BT_CONFIG; if (!lmp_esco_capable(hdev)) { struct hci_cp_accept_conn_req cp; bacpy(&cp.bdaddr, &conn->dst); cp.role = 0x00; /* Ignored */ hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp), &cp); } else { struct hci_cp_accept_sync_conn_req cp; bacpy(&cp.bdaddr, &conn->dst); cp.pkt_type = cpu_to_le16(conn->pkt_type); cp.tx_bandwidth = cpu_to_le32(0x00001f40); cp.rx_bandwidth = cpu_to_le32(0x00001f40); cp.content_format = cpu_to_le16(setting); switch (setting & SCO_AIRMODE_MASK) { case SCO_AIRMODE_TRANSP: if (conn->pkt_type & ESCO_2EV3) cp.max_latency = cpu_to_le16(0x0008); else cp.max_latency = cpu_to_le16(0x000D); cp.retrans_effort = 0x02; break; case SCO_AIRMODE_CVSD: cp.max_latency = cpu_to_le16(0xffff); cp.retrans_effort = 0xff; break; default: /* use CVSD settings as fallback */ cp.max_latency = cpu_to_le16(0xffff); cp.retrans_effort = 0xff; break; } hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ, sizeof(cp), &cp); } } static int sco_sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, int flags) { struct sock *sk = sock->sk; struct sco_pinfo *pi = sco_pi(sk); lock_sock(sk); if (sk->sk_state == BT_CONNECT2 && test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) { sco_conn_defer_accept(pi->conn->hcon, pi->setting); sk->sk_state = BT_CONFIG; release_sock(sk); return 0; } release_sock(sk); return bt_sock_recvmsg(sock, msg, len, flags); } static int sco_sock_setsockopt(struct socket *sock, int level, int optname, sockptr_t optval, unsigned int optlen) { struct sock *sk = sock->sk; int len, err = 0; struct bt_voice voice; u32 opt; struct bt_codecs *codecs; struct hci_dev *hdev; __u8 buffer[255]; BT_DBG("sk %p", sk); lock_sock(sk); switch (optname) { case BT_DEFER_SETUP: if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) { err = -EINVAL; break; } if (copy_from_sockptr(&opt, optval, sizeof(u32))) { err = -EFAULT; break; } if (opt) set_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags); else clear_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags); break; case BT_VOICE: if (sk->sk_state != BT_OPEN && sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECT2) { err = -EINVAL; break; } voice.setting = sco_pi(sk)->setting; len = min_t(unsigned int, sizeof(voice), optlen); if (copy_from_sockptr(&voice, optval, len)) { err = -EFAULT; break; } /* Explicitly check for these values */ if (voice.setting != BT_VOICE_TRANSPARENT && voice.setting != BT_VOICE_CVSD_16BIT) { err = -EINVAL; break; } sco_pi(sk)->setting = voice.setting; hdev = hci_get_route(&sco_pi(sk)->dst, &sco_pi(sk)->src, BDADDR_BREDR); if (!hdev) { err = -EBADFD; break; } if (enhanced_sync_conn_capable(hdev) && voice.setting == BT_VOICE_TRANSPARENT) sco_pi(sk)->codec.id = BT_CODEC_TRANSPARENT; hci_dev_put(hdev); break; case BT_PKT_STATUS: if (copy_from_sockptr(&opt, optval, sizeof(u32))) { err = -EFAULT; break; } if (opt) set_bit(BT_SK_PKT_STATUS, &bt_sk(sk)->flags); else clear_bit(BT_SK_PKT_STATUS, &bt_sk(sk)->flags); break; case BT_CODEC: if (sk->sk_state != BT_OPEN && sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECT2) { err = -EINVAL; break; } hdev = hci_get_route(&sco_pi(sk)->dst, &sco_pi(sk)->src, BDADDR_BREDR); if (!hdev) { err = -EBADFD; break; } if (!hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED)) { hci_dev_put(hdev); err = -EOPNOTSUPP; break; } if (!hdev->get_data_path_id) { hci_dev_put(hdev); err = -EOPNOTSUPP; break; } if (optlen < sizeof(struct bt_codecs) || optlen > sizeof(buffer)) { hci_dev_put(hdev); err = -EINVAL; break; } if (copy_from_sockptr(buffer, optval, optlen)) { hci_dev_put(hdev); err = -EFAULT; break; } codecs = (void *)buffer; if (codecs->num_codecs > 1) { hci_dev_put(hdev); err = -EINVAL; break; } sco_pi(sk)->codec = codecs->codecs[0]; hci_dev_put(hdev); break; default: err = -ENOPROTOOPT; break; } release_sock(sk); return err; } static int sco_sock_getsockopt_old(struct socket *sock, int optname, char __user *optval, int __user *optlen) { struct sock *sk = sock->sk; struct sco_options opts; struct sco_conninfo cinfo; int len, err = 0; BT_DBG("sk %p", sk); if (get_user(len, optlen)) return -EFAULT; lock_sock(sk); switch (optname) { case SCO_OPTIONS: if (sk->sk_state != BT_CONNECTED && !(sk->sk_state == BT_CONNECT2 && test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags))) { err = -ENOTCONN; break; } opts.mtu = sco_pi(sk)->conn->mtu; BT_DBG("mtu %u", opts.mtu); len = min_t(unsigned int, len, sizeof(opts)); if (copy_to_user(optval, (char *)&opts, len)) err = -EFAULT; break; case SCO_CONNINFO: if (sk->sk_state != BT_CONNECTED && !(sk->sk_state == BT_CONNECT2 && test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags))) { err = -ENOTCONN; break; } memset(&cinfo, 0, sizeof(cinfo)); cinfo.hci_handle = sco_pi(sk)->conn->hcon->handle; memcpy(cinfo.dev_class, sco_pi(sk)->conn->hcon->dev_class, 3); len = min_t(unsigned int, len, sizeof(cinfo)); if (copy_to_user(optval, (char *)&cinfo, len)) err = -EFAULT; break; default: err = -ENOPROTOOPT; break; } release_sock(sk); return err; } static int sco_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen) { struct sock *sk = sock->sk; int len, err = 0; struct bt_voice voice; u32 phys; int buf_len; struct codec_list *c; u8 num_codecs, i, __user *ptr; struct hci_dev *hdev; struct hci_codec_caps *caps; struct bt_codec codec; BT_DBG("sk %p", sk); if (level == SOL_SCO) return sco_sock_getsockopt_old(sock, optname, optval, optlen); if (get_user(len, optlen)) return -EFAULT; lock_sock(sk); switch (optname) { case BT_DEFER_SETUP: if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) { err = -EINVAL; break; } if (put_user(test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags), (u32 __user *)optval)) err = -EFAULT; break; case BT_VOICE: voice.setting = sco_pi(sk)->setting; len = min_t(unsigned int, len, sizeof(voice)); if (copy_to_user(optval, (char *)&voice, len)) err = -EFAULT; break; case BT_PHY: if (sk->sk_state != BT_CONNECTED) { err = -ENOTCONN; break; } phys = hci_conn_get_phy(sco_pi(sk)->conn->hcon); if (put_user(phys, (u32 __user *) optval)) err = -EFAULT; break; case BT_PKT_STATUS: if (put_user(test_bit(BT_SK_PKT_STATUS, &bt_sk(sk)->flags), (int __user *)optval)) err = -EFAULT; break; case BT_SNDMTU: case BT_RCVMTU: if (sk->sk_state != BT_CONNECTED) { err = -ENOTCONN; break; } if (put_user(sco_pi(sk)->conn->mtu, (u32 __user *)optval)) err = -EFAULT; break; case BT_CODEC: num_codecs = 0; buf_len = 0; hdev = hci_get_route(&sco_pi(sk)->dst, &sco_pi(sk)->src, BDADDR_BREDR); if (!hdev) { err = -EBADFD; break; } if (!hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED)) { hci_dev_put(hdev); err = -EOPNOTSUPP; break; } if (!hdev->get_data_path_id) { hci_dev_put(hdev); err = -EOPNOTSUPP; break; } release_sock(sk); /* find total buffer size required to copy codec + caps */ hci_dev_lock(hdev); list_for_each_entry(c, &hdev->local_codecs, list) { if (c->transport != HCI_TRANSPORT_SCO_ESCO) continue; num_codecs++; for (i = 0, caps = c->caps; i < c->num_caps; i++) { buf_len += 1 + caps->len; caps = (void *)&caps->data[caps->len]; } buf_len += sizeof(struct bt_codec); } hci_dev_unlock(hdev); buf_len += sizeof(struct bt_codecs); if (buf_len > len) { hci_dev_put(hdev); return -ENOBUFS; } ptr = optval; if (put_user(num_codecs, ptr)) { hci_dev_put(hdev); return -EFAULT; } ptr += sizeof(num_codecs); /* Iterate all the codecs supported over SCO and populate * codec data */ hci_dev_lock(hdev); list_for_each_entry(c, &hdev->local_codecs, list) { if (c->transport != HCI_TRANSPORT_SCO_ESCO) continue; codec.id = c->id; codec.cid = c->cid; codec.vid = c->vid; err = hdev->get_data_path_id(hdev, &codec.data_path); if (err < 0) break; codec.num_caps = c->num_caps; if (copy_to_user(ptr, &codec, sizeof(codec))) { err = -EFAULT; break; } ptr += sizeof(codec); /* find codec capabilities data length */ len = 0; for (i = 0, caps = c->caps; i < c->num_caps; i++) { len += 1 + caps->len; caps = (void *)&caps->data[caps->len]; } /* copy codec capabilities data */ if (len && copy_to_user(ptr, c->caps, len)) { err = -EFAULT; break; } ptr += len; } hci_dev_unlock(hdev); hci_dev_put(hdev); lock_sock(sk); if (!err && put_user(buf_len, optlen)) err = -EFAULT; break; default: err = -ENOPROTOOPT; break; } release_sock(sk); return err; } static int sco_sock_shutdown(struct socket *sock, int how) { struct sock *sk = sock->sk; int err = 0; BT_DBG("sock %p, sk %p", sock, sk); if (!sk) return 0; sock_hold(sk); lock_sock(sk); if (!sk->sk_shutdown) { sk->sk_shutdown = SHUTDOWN_MASK; sco_sock_clear_timer(sk); __sco_sock_close(sk); if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime && !(current->flags & PF_EXITING)) err = bt_sock_wait_state(sk, BT_CLOSED, sk->sk_lingertime); } release_sock(sk); sock_put(sk); return err; } static int sco_sock_release(struct socket *sock) { struct sock *sk = sock->sk; int err = 0; BT_DBG("sock %p, sk %p", sock, sk); if (!sk) return 0; sco_sock_close(sk); if (sock_flag(sk, SOCK_LINGER) && READ_ONCE(sk->sk_lingertime) && !(current->flags & PF_EXITING)) { lock_sock(sk); err = bt_sock_wait_state(sk, BT_CLOSED, sk->sk_lingertime); release_sock(sk); } sock_orphan(sk); sco_sock_kill(sk); return err; } static void sco_conn_ready(struct sco_conn *conn) { struct sock *parent; struct sock *sk = conn->sk; BT_DBG("conn %p", conn); if (sk) { lock_sock(sk); sco_sock_clear_timer(sk); sk->sk_state = BT_CONNECTED; sk->sk_state_change(sk); release_sock(sk); } else { sco_conn_lock(conn); if (!conn->hcon) { sco_conn_unlock(conn); return; } parent = sco_get_sock_listen(&conn->hcon->src); if (!parent) { sco_conn_unlock(conn); return; } lock_sock(parent); sk = sco_sock_alloc(sock_net(parent), NULL, BTPROTO_SCO, GFP_ATOMIC, 0); if (!sk) { release_sock(parent); sco_conn_unlock(conn); return; } sco_sock_init(sk, parent); bacpy(&sco_pi(sk)->src, &conn->hcon->src); bacpy(&sco_pi(sk)->dst, &conn->hcon->dst); hci_conn_hold(conn->hcon); __sco_chan_add(conn, sk, parent); if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(parent)->flags)) sk->sk_state = BT_CONNECT2; else sk->sk_state = BT_CONNECTED; /* Wake up parent */ parent->sk_data_ready(parent); release_sock(parent); sco_conn_unlock(conn); } } /* ----- SCO interface with lower layer (HCI) ----- */ int sco_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 *flags) { struct sock *sk; int lm = 0; BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr); /* Find listening sockets */ read_lock(&sco_sk_list.lock); sk_for_each(sk, &sco_sk_list.head) { if (sk->sk_state != BT_LISTEN) continue; if (!bacmp(&sco_pi(sk)->src, &hdev->bdaddr) || !bacmp(&sco_pi(sk)->src, BDADDR_ANY)) { lm |= HCI_LM_ACCEPT; if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) *flags |= HCI_PROTO_DEFER; break; } } read_unlock(&sco_sk_list.lock); return lm; } static void sco_connect_cfm(struct hci_conn *hcon, __u8 status) { if (hcon->type != SCO_LINK && hcon->type != ESCO_LINK) return; BT_DBG("hcon %p bdaddr %pMR status %u", hcon, &hcon->dst, status); if (!status) { struct sco_conn *conn; conn = sco_conn_add(hcon); if (conn) sco_conn_ready(conn); } else sco_conn_del(hcon, bt_to_errno(status)); } static void sco_disconn_cfm(struct hci_conn *hcon, __u8 reason) { if (hcon->type != SCO_LINK && hcon->type != ESCO_LINK) return; BT_DBG("hcon %p reason %d", hcon, reason); sco_conn_del(hcon, bt_to_errno(reason)); } void sco_recv_scodata(struct hci_conn *hcon, struct sk_buff *skb) { struct sco_conn *conn = hcon->sco_data; if (!conn) goto drop; BT_DBG("conn %p len %u", conn, skb->len); if (skb->len) { sco_recv_frame(conn, skb); return; } drop: kfree_skb(skb); } static struct hci_cb sco_cb = { .name = "SCO", .connect_cfm = sco_connect_cfm, .disconn_cfm = sco_disconn_cfm, }; static int sco_debugfs_show(struct seq_file *f, void *p) { struct sock *sk; read_lock(&sco_sk_list.lock); sk_for_each(sk, &sco_sk_list.head) { seq_printf(f, "%pMR %pMR %d\n", &sco_pi(sk)->src, &sco_pi(sk)->dst, sk->sk_state); } read_unlock(&sco_sk_list.lock); return 0; } DEFINE_SHOW_ATTRIBUTE(sco_debugfs); static struct dentry *sco_debugfs; static const struct proto_ops sco_sock_ops = { .family = PF_BLUETOOTH, .owner = THIS_MODULE, .release = sco_sock_release, .bind = sco_sock_bind, .connect = sco_sock_connect, .listen = sco_sock_listen, .accept = sco_sock_accept, .getname = sco_sock_getname, .sendmsg = sco_sock_sendmsg, .recvmsg = sco_sock_recvmsg, .poll = bt_sock_poll, .ioctl = bt_sock_ioctl, .gettstamp = sock_gettstamp, .mmap = sock_no_mmap, .socketpair = sock_no_socketpair, .shutdown = sco_sock_shutdown, .setsockopt = sco_sock_setsockopt, .getsockopt = sco_sock_getsockopt }; static const struct net_proto_family sco_sock_family_ops = { .family = PF_BLUETOOTH, .owner = THIS_MODULE, .create = sco_sock_create, }; int __init sco_init(void) { int err; BUILD_BUG_ON(sizeof(struct sockaddr_sco) > sizeof(struct sockaddr)); err = proto_register(&sco_proto, 0); if (err < 0) return err; err = bt_sock_register(BTPROTO_SCO, &sco_sock_family_ops); if (err < 0) { BT_ERR("SCO socket registration failed"); goto error; } err = bt_procfs_init(&init_net, "sco", &sco_sk_list, NULL); if (err < 0) { BT_ERR("Failed to create SCO proc file"); bt_sock_unregister(BTPROTO_SCO); goto error; } BT_INFO("SCO socket layer initialized"); hci_register_cb(&sco_cb); if (IS_ERR_OR_NULL(bt_debugfs)) return 0; sco_debugfs = debugfs_create_file("sco", 0444, bt_debugfs, NULL, &sco_debugfs_fops); return 0; error: proto_unregister(&sco_proto); return err; } void sco_exit(void) { bt_procfs_cleanup(&init_net, "sco"); debugfs_remove(sco_debugfs); hci_unregister_cb(&sco_cb); bt_sock_unregister(BTPROTO_SCO); proto_unregister(&sco_proto); } module_param(disable_esco, bool, 0644); MODULE_PARM_DESC(disable_esco, "Disable eSCO connection creation");
linux-master
net/bluetooth/sco.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright 2015, Heiner Kallweit <[email protected]> */ #include <net/bluetooth/bluetooth.h> #include <net/bluetooth/hci_core.h> #include "leds.h" DEFINE_LED_TRIGGER(bt_power_led_trigger); struct hci_basic_led_trigger { struct led_trigger led_trigger; struct hci_dev *hdev; }; #define to_hci_basic_led_trigger(arg) container_of(arg, \ struct hci_basic_led_trigger, led_trigger) void hci_leds_update_powered(struct hci_dev *hdev, bool enabled) { if (hdev->power_led) led_trigger_event(hdev->power_led, enabled ? LED_FULL : LED_OFF); if (!enabled) { struct hci_dev *d; read_lock(&hci_dev_list_lock); list_for_each_entry(d, &hci_dev_list, list) { if (test_bit(HCI_UP, &d->flags)) enabled = true; } read_unlock(&hci_dev_list_lock); } led_trigger_event(bt_power_led_trigger, enabled ? LED_FULL : LED_OFF); } static int power_activate(struct led_classdev *led_cdev) { struct hci_basic_led_trigger *htrig; bool powered; htrig = to_hci_basic_led_trigger(led_cdev->trigger); powered = test_bit(HCI_UP, &htrig->hdev->flags); led_trigger_event(led_cdev->trigger, powered ? LED_FULL : LED_OFF); return 0; } static struct led_trigger *led_allocate_basic(struct hci_dev *hdev, int (*activate)(struct led_classdev *led_cdev), const char *name) { struct hci_basic_led_trigger *htrig; htrig = devm_kzalloc(&hdev->dev, sizeof(*htrig), GFP_KERNEL); if (!htrig) return NULL; htrig->hdev = hdev; htrig->led_trigger.activate = activate; htrig->led_trigger.name = devm_kasprintf(&hdev->dev, GFP_KERNEL, "%s-%s", hdev->name, name); if (!htrig->led_trigger.name) goto err_alloc; if (devm_led_trigger_register(&hdev->dev, &htrig->led_trigger)) goto err_register; return &htrig->led_trigger; err_register: devm_kfree(&hdev->dev, (void *)htrig->led_trigger.name); err_alloc: devm_kfree(&hdev->dev, htrig); return NULL; } void hci_leds_init(struct hci_dev *hdev) { /* initialize power_led */ hdev->power_led = led_allocate_basic(hdev, power_activate, "power"); } void bt_leds_init(void) { led_trigger_register_simple("bluetooth-power", &bt_power_led_trigger); } void bt_leds_cleanup(void) { led_trigger_unregister_simple(bt_power_led_trigger); }
linux-master
net/bluetooth/leds.c
// SPDX-License-Identifier: GPL-2.0-only /* Copyright (c) 2011,2012 Intel Corp. */ #include <net/bluetooth/bluetooth.h> #include <net/bluetooth/hci.h> #include <net/bluetooth/hci_core.h> #include <crypto/hash.h> #include "hci_request.h" #include "a2mp.h" #include "amp.h" /* Remote AMP Controllers interface */ void amp_ctrl_get(struct amp_ctrl *ctrl) { BT_DBG("ctrl %p orig refcnt %d", ctrl, kref_read(&ctrl->kref)); kref_get(&ctrl->kref); } static void amp_ctrl_destroy(struct kref *kref) { struct amp_ctrl *ctrl = container_of(kref, struct amp_ctrl, kref); BT_DBG("ctrl %p", ctrl); kfree(ctrl->assoc); kfree(ctrl); } int amp_ctrl_put(struct amp_ctrl *ctrl) { BT_DBG("ctrl %p orig refcnt %d", ctrl, kref_read(&ctrl->kref)); return kref_put(&ctrl->kref, &amp_ctrl_destroy); } struct amp_ctrl *amp_ctrl_add(struct amp_mgr *mgr, u8 id) { struct amp_ctrl *ctrl; ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL); if (!ctrl) return NULL; kref_init(&ctrl->kref); ctrl->id = id; mutex_lock(&mgr->amp_ctrls_lock); list_add(&ctrl->list, &mgr->amp_ctrls); mutex_unlock(&mgr->amp_ctrls_lock); BT_DBG("mgr %p ctrl %p", mgr, ctrl); return ctrl; } void amp_ctrl_list_flush(struct amp_mgr *mgr) { struct amp_ctrl *ctrl, *n; BT_DBG("mgr %p", mgr); mutex_lock(&mgr->amp_ctrls_lock); list_for_each_entry_safe(ctrl, n, &mgr->amp_ctrls, list) { list_del(&ctrl->list); amp_ctrl_put(ctrl); } mutex_unlock(&mgr->amp_ctrls_lock); } struct amp_ctrl *amp_ctrl_lookup(struct amp_mgr *mgr, u8 id) { struct amp_ctrl *ctrl; BT_DBG("mgr %p id %u", mgr, id); mutex_lock(&mgr->amp_ctrls_lock); list_for_each_entry(ctrl, &mgr->amp_ctrls, list) { if (ctrl->id == id) { amp_ctrl_get(ctrl); mutex_unlock(&mgr->amp_ctrls_lock); return ctrl; } } mutex_unlock(&mgr->amp_ctrls_lock); return NULL; } /* Physical Link interface */ static u8 __next_handle(struct amp_mgr *mgr) { if (++mgr->handle == 0) mgr->handle = 1; return mgr->handle; } struct hci_conn *phylink_add(struct hci_dev *hdev, struct amp_mgr *mgr, u8 remote_id, bool out) { bdaddr_t *dst = &mgr->l2cap_conn->hcon->dst; struct hci_conn *hcon; u8 role = out ? HCI_ROLE_MASTER : HCI_ROLE_SLAVE; hcon = hci_conn_add(hdev, AMP_LINK, dst, role); if (!hcon) return NULL; BT_DBG("hcon %p dst %pMR", hcon, dst); hcon->state = BT_CONNECT; hcon->attempt++; hcon->handle = __next_handle(mgr); hcon->remote_id = remote_id; hcon->amp_mgr = amp_mgr_get(mgr); return hcon; } /* AMP crypto key generation interface */ static int hmac_sha256(u8 *key, u8 ksize, char *plaintext, u8 psize, u8 *output) { struct crypto_shash *tfm; struct shash_desc *shash; int ret; if (!ksize) return -EINVAL; tfm = crypto_alloc_shash("hmac(sha256)", 0, 0); if (IS_ERR(tfm)) { BT_DBG("crypto_alloc_ahash failed: err %ld", PTR_ERR(tfm)); return PTR_ERR(tfm); } ret = crypto_shash_setkey(tfm, key, ksize); if (ret) { BT_DBG("crypto_ahash_setkey failed: err %d", ret); goto failed; } shash = kzalloc(sizeof(*shash) + crypto_shash_descsize(tfm), GFP_KERNEL); if (!shash) { ret = -ENOMEM; goto failed; } shash->tfm = tfm; ret = crypto_shash_digest(shash, plaintext, psize, output); kfree(shash); failed: crypto_free_shash(tfm); return ret; } int phylink_gen_key(struct hci_conn *conn, u8 *data, u8 *len, u8 *type) { struct hci_dev *hdev = conn->hdev; struct link_key *key; u8 keybuf[HCI_AMP_LINK_KEY_SIZE]; u8 gamp_key[HCI_AMP_LINK_KEY_SIZE]; int err; if (!hci_conn_check_link_mode(conn)) return -EACCES; BT_DBG("conn %p key_type %d", conn, conn->key_type); /* Legacy key */ if (conn->key_type < 3) { bt_dev_err(hdev, "legacy key type %u", conn->key_type); return -EACCES; } *type = conn->key_type; *len = HCI_AMP_LINK_KEY_SIZE; key = hci_find_link_key(hdev, &conn->dst); if (!key) { BT_DBG("No Link key for conn %p dst %pMR", conn, &conn->dst); return -EACCES; } /* BR/EDR Link Key concatenated together with itself */ memcpy(&keybuf[0], key->val, HCI_LINK_KEY_SIZE); memcpy(&keybuf[HCI_LINK_KEY_SIZE], key->val, HCI_LINK_KEY_SIZE); /* Derive Generic AMP Link Key (gamp) */ err = hmac_sha256(keybuf, HCI_AMP_LINK_KEY_SIZE, "gamp", 4, gamp_key); if (err) { bt_dev_err(hdev, "could not derive Generic AMP Key: err %d", err); return err; } if (conn->key_type == HCI_LK_DEBUG_COMBINATION) { BT_DBG("Use Generic AMP Key (gamp)"); memcpy(data, gamp_key, HCI_AMP_LINK_KEY_SIZE); return err; } /* Derive Dedicated AMP Link Key: "802b" is 802.11 PAL keyID */ return hmac_sha256(gamp_key, HCI_AMP_LINK_KEY_SIZE, "802b", 4, data); } static void read_local_amp_assoc_complete(struct hci_dev *hdev, u8 status, u16 opcode, struct sk_buff *skb) { struct hci_rp_read_local_amp_assoc *rp = (void *)skb->data; struct amp_assoc *assoc = &hdev->loc_assoc; size_t rem_len, frag_len; BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); if (rp->status) goto send_rsp; frag_len = skb->len - sizeof(*rp); rem_len = __le16_to_cpu(rp->rem_len); if (rem_len > frag_len) { BT_DBG("frag_len %zu rem_len %zu", frag_len, rem_len); memcpy(assoc->data + assoc->offset, rp->frag, frag_len); assoc->offset += frag_len; /* Read other fragments */ amp_read_loc_assoc_frag(hdev, rp->phy_handle); return; } memcpy(assoc->data + assoc->offset, rp->frag, rem_len); assoc->len = assoc->offset + rem_len; assoc->offset = 0; send_rsp: /* Send A2MP Rsp when all fragments are received */ a2mp_send_getampassoc_rsp(hdev, rp->status); a2mp_send_create_phy_link_req(hdev, rp->status); } void amp_read_loc_assoc_frag(struct hci_dev *hdev, u8 phy_handle) { struct hci_cp_read_local_amp_assoc cp; struct amp_assoc *loc_assoc = &hdev->loc_assoc; struct hci_request req; int err; BT_DBG("%s handle %u", hdev->name, phy_handle); cp.phy_handle = phy_handle; cp.max_len = cpu_to_le16(hdev->amp_assoc_size); cp.len_so_far = cpu_to_le16(loc_assoc->offset); hci_req_init(&req, hdev); hci_req_add(&req, HCI_OP_READ_LOCAL_AMP_ASSOC, sizeof(cp), &cp); err = hci_req_run_skb(&req, read_local_amp_assoc_complete); if (err < 0) a2mp_send_getampassoc_rsp(hdev, A2MP_STATUS_INVALID_CTRL_ID); } void amp_read_loc_assoc(struct hci_dev *hdev, struct amp_mgr *mgr) { struct hci_cp_read_local_amp_assoc cp; struct hci_request req; int err; memset(&hdev->loc_assoc, 0, sizeof(struct amp_assoc)); memset(&cp, 0, sizeof(cp)); cp.max_len = cpu_to_le16(hdev->amp_assoc_size); set_bit(READ_LOC_AMP_ASSOC, &mgr->state); hci_req_init(&req, hdev); hci_req_add(&req, HCI_OP_READ_LOCAL_AMP_ASSOC, sizeof(cp), &cp); err = hci_req_run_skb(&req, read_local_amp_assoc_complete); if (err < 0) a2mp_send_getampassoc_rsp(hdev, A2MP_STATUS_INVALID_CTRL_ID); } void amp_read_loc_assoc_final_data(struct hci_dev *hdev, struct hci_conn *hcon) { struct hci_cp_read_local_amp_assoc cp; struct amp_mgr *mgr = hcon->amp_mgr; struct hci_request req; int err; if (!mgr) return; cp.phy_handle = hcon->handle; cp.len_so_far = cpu_to_le16(0); cp.max_len = cpu_to_le16(hdev->amp_assoc_size); set_bit(READ_LOC_AMP_ASSOC_FINAL, &mgr->state); /* Read Local AMP Assoc final link information data */ hci_req_init(&req, hdev); hci_req_add(&req, HCI_OP_READ_LOCAL_AMP_ASSOC, sizeof(cp), &cp); err = hci_req_run_skb(&req, read_local_amp_assoc_complete); if (err < 0) a2mp_send_getampassoc_rsp(hdev, A2MP_STATUS_INVALID_CTRL_ID); } static void write_remote_amp_assoc_complete(struct hci_dev *hdev, u8 status, u16 opcode, struct sk_buff *skb) { struct hci_rp_write_remote_amp_assoc *rp = (void *)skb->data; BT_DBG("%s status 0x%2.2x phy_handle 0x%2.2x", hdev->name, rp->status, rp->phy_handle); if (rp->status) return; amp_write_rem_assoc_continue(hdev, rp->phy_handle); } /* Write AMP Assoc data fragments, returns true with last fragment written*/ static bool amp_write_rem_assoc_frag(struct hci_dev *hdev, struct hci_conn *hcon) { struct hci_cp_write_remote_amp_assoc *cp; struct amp_mgr *mgr = hcon->amp_mgr; struct amp_ctrl *ctrl; struct hci_request req; u16 frag_len, len; ctrl = amp_ctrl_lookup(mgr, hcon->remote_id); if (!ctrl) return false; if (!ctrl->assoc_rem_len) { BT_DBG("all fragments are written"); ctrl->assoc_rem_len = ctrl->assoc_len; ctrl->assoc_len_so_far = 0; amp_ctrl_put(ctrl); return true; } frag_len = min_t(u16, 248, ctrl->assoc_rem_len); len = frag_len + sizeof(*cp); cp = kzalloc(len, GFP_KERNEL); if (!cp) { amp_ctrl_put(ctrl); return false; } BT_DBG("hcon %p ctrl %p frag_len %u assoc_len %u rem_len %u", hcon, ctrl, frag_len, ctrl->assoc_len, ctrl->assoc_rem_len); cp->phy_handle = hcon->handle; cp->len_so_far = cpu_to_le16(ctrl->assoc_len_so_far); cp->rem_len = cpu_to_le16(ctrl->assoc_rem_len); memcpy(cp->frag, ctrl->assoc, frag_len); ctrl->assoc_len_so_far += frag_len; ctrl->assoc_rem_len -= frag_len; amp_ctrl_put(ctrl); hci_req_init(&req, hdev); hci_req_add(&req, HCI_OP_WRITE_REMOTE_AMP_ASSOC, len, cp); hci_req_run_skb(&req, write_remote_amp_assoc_complete); kfree(cp); return false; } void amp_write_rem_assoc_continue(struct hci_dev *hdev, u8 handle) { struct hci_conn *hcon; BT_DBG("%s phy handle 0x%2.2x", hdev->name, handle); hcon = hci_conn_hash_lookup_handle(hdev, handle); if (!hcon) return; /* Send A2MP create phylink rsp when all fragments are written */ if (amp_write_rem_assoc_frag(hdev, hcon)) a2mp_send_create_phy_link_rsp(hdev, 0); } void amp_write_remote_assoc(struct hci_dev *hdev, u8 handle) { struct hci_conn *hcon; BT_DBG("%s phy handle 0x%2.2x", hdev->name, handle); hcon = hci_conn_hash_lookup_handle(hdev, handle); if (!hcon) return; BT_DBG("%s phy handle 0x%2.2x hcon %p", hdev->name, handle, hcon); amp_write_rem_assoc_frag(hdev, hcon); } static void create_phylink_complete(struct hci_dev *hdev, u8 status, u16 opcode) { struct hci_cp_create_phy_link *cp; BT_DBG("%s status 0x%2.2x", hdev->name, status); cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_PHY_LINK); if (!cp) return; hci_dev_lock(hdev); if (status) { struct hci_conn *hcon; hcon = hci_conn_hash_lookup_handle(hdev, cp->phy_handle); if (hcon) hci_conn_del(hcon); } else { amp_write_remote_assoc(hdev, cp->phy_handle); } hci_dev_unlock(hdev); } void amp_create_phylink(struct hci_dev *hdev, struct amp_mgr *mgr, struct hci_conn *hcon) { struct hci_cp_create_phy_link cp; struct hci_request req; cp.phy_handle = hcon->handle; BT_DBG("%s hcon %p phy handle 0x%2.2x", hdev->name, hcon, hcon->handle); if (phylink_gen_key(mgr->l2cap_conn->hcon, cp.key, &cp.key_len, &cp.key_type)) { BT_DBG("Cannot create link key"); return; } hci_req_init(&req, hdev); hci_req_add(&req, HCI_OP_CREATE_PHY_LINK, sizeof(cp), &cp); hci_req_run(&req, create_phylink_complete); } static void accept_phylink_complete(struct hci_dev *hdev, u8 status, u16 opcode) { struct hci_cp_accept_phy_link *cp; BT_DBG("%s status 0x%2.2x", hdev->name, status); if (status) return; cp = hci_sent_cmd_data(hdev, HCI_OP_ACCEPT_PHY_LINK); if (!cp) return; amp_write_remote_assoc(hdev, cp->phy_handle); } void amp_accept_phylink(struct hci_dev *hdev, struct amp_mgr *mgr, struct hci_conn *hcon) { struct hci_cp_accept_phy_link cp; struct hci_request req; cp.phy_handle = hcon->handle; BT_DBG("%s hcon %p phy handle 0x%2.2x", hdev->name, hcon, hcon->handle); if (phylink_gen_key(mgr->l2cap_conn->hcon, cp.key, &cp.key_len, &cp.key_type)) { BT_DBG("Cannot create link key"); return; } hci_req_init(&req, hdev); hci_req_add(&req, HCI_OP_ACCEPT_PHY_LINK, sizeof(cp), &cp); hci_req_run(&req, accept_phylink_complete); } void amp_physical_cfm(struct hci_conn *bredr_hcon, struct hci_conn *hs_hcon) { struct hci_dev *bredr_hdev = hci_dev_hold(bredr_hcon->hdev); struct amp_mgr *mgr = hs_hcon->amp_mgr; struct l2cap_chan *bredr_chan; BT_DBG("bredr_hcon %p hs_hcon %p mgr %p", bredr_hcon, hs_hcon, mgr); if (!bredr_hdev || !mgr || !mgr->bredr_chan) return; bredr_chan = mgr->bredr_chan; l2cap_chan_lock(bredr_chan); set_bit(FLAG_EFS_ENABLE, &bredr_chan->flags); bredr_chan->remote_amp_id = hs_hcon->remote_id; bredr_chan->local_amp_id = hs_hcon->hdev->id; bredr_chan->hs_hcon = hs_hcon; bredr_chan->conn->mtu = hs_hcon->hdev->block_mtu; __l2cap_physical_cfm(bredr_chan, 0); l2cap_chan_unlock(bredr_chan); hci_dev_put(bredr_hdev); } void amp_create_logical_link(struct l2cap_chan *chan) { struct hci_conn *hs_hcon = chan->hs_hcon; struct hci_cp_create_accept_logical_link cp; struct hci_dev *hdev; BT_DBG("chan %p hs_hcon %p dst %pMR", chan, hs_hcon, &chan->conn->hcon->dst); if (!hs_hcon) return; hdev = hci_dev_hold(chan->hs_hcon->hdev); if (!hdev) return; cp.phy_handle = hs_hcon->handle; cp.tx_flow_spec.id = chan->local_id; cp.tx_flow_spec.stype = chan->local_stype; cp.tx_flow_spec.msdu = cpu_to_le16(chan->local_msdu); cp.tx_flow_spec.sdu_itime = cpu_to_le32(chan->local_sdu_itime); cp.tx_flow_spec.acc_lat = cpu_to_le32(chan->local_acc_lat); cp.tx_flow_spec.flush_to = cpu_to_le32(chan->local_flush_to); cp.rx_flow_spec.id = chan->remote_id; cp.rx_flow_spec.stype = chan->remote_stype; cp.rx_flow_spec.msdu = cpu_to_le16(chan->remote_msdu); cp.rx_flow_spec.sdu_itime = cpu_to_le32(chan->remote_sdu_itime); cp.rx_flow_spec.acc_lat = cpu_to_le32(chan->remote_acc_lat); cp.rx_flow_spec.flush_to = cpu_to_le32(chan->remote_flush_to); if (hs_hcon->out) hci_send_cmd(hdev, HCI_OP_CREATE_LOGICAL_LINK, sizeof(cp), &cp); else hci_send_cmd(hdev, HCI_OP_ACCEPT_LOGICAL_LINK, sizeof(cp), &cp); hci_dev_put(hdev); } void amp_disconnect_logical_link(struct hci_chan *hchan) { struct hci_conn *hcon = hchan->conn; struct hci_cp_disconn_logical_link cp; if (hcon->state != BT_CONNECTED) { BT_DBG("hchan %p not connected", hchan); return; } cp.log_handle = cpu_to_le16(hchan->handle); hci_send_cmd(hcon->hdev, HCI_OP_DISCONN_LOGICAL_LINK, sizeof(cp), &cp); } void amp_destroy_logical_link(struct hci_chan *hchan, u8 reason) { BT_DBG("hchan %p", hchan); hci_chan_del(hchan); }
linux-master
net/bluetooth/amp.c
/* BlueZ - Bluetooth protocol stack for Linux Copyright (C) 2011 Nokia Corporation and/or its subsidiary(-ies). This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License version 2 as published by the Free Software Foundation; THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS SOFTWARE IS DISCLAIMED. */ #include <linux/debugfs.h> #include <linux/scatterlist.h> #include <linux/crypto.h> #include <crypto/aes.h> #include <crypto/algapi.h> #include <crypto/hash.h> #include <crypto/kpp.h> #include <net/bluetooth/bluetooth.h> #include <net/bluetooth/hci_core.h> #include <net/bluetooth/l2cap.h> #include <net/bluetooth/mgmt.h> #include "ecdh_helper.h" #include "smp.h" #define SMP_DEV(hdev) \ ((struct smp_dev *)((struct l2cap_chan *)((hdev)->smp_data))->data) /* Low-level debug macros to be used for stuff that we don't want * accidentally in dmesg, i.e. the values of the various crypto keys * and the inputs & outputs of crypto functions. */ #ifdef DEBUG #define SMP_DBG(fmt, ...) printk(KERN_DEBUG "%s: " fmt, __func__, \ ##__VA_ARGS__) #else #define SMP_DBG(fmt, ...) no_printk(KERN_DEBUG "%s: " fmt, __func__, \ ##__VA_ARGS__) #endif #define SMP_ALLOW_CMD(smp, code) set_bit(code, &smp->allow_cmd) /* Keys which are not distributed with Secure Connections */ #define SMP_SC_NO_DIST (SMP_DIST_ENC_KEY | SMP_DIST_LINK_KEY) #define SMP_TIMEOUT msecs_to_jiffies(30000) #define ID_ADDR_TIMEOUT msecs_to_jiffies(200) #define AUTH_REQ_MASK(dev) (hci_dev_test_flag(dev, HCI_SC_ENABLED) ? \ 0x3f : 0x07) #define KEY_DIST_MASK 0x07 /* Maximum message length that can be passed to aes_cmac */ #define CMAC_MSG_MAX 80 enum { SMP_FLAG_TK_VALID, SMP_FLAG_CFM_PENDING, SMP_FLAG_MITM_AUTH, SMP_FLAG_COMPLETE, SMP_FLAG_INITIATOR, SMP_FLAG_SC, SMP_FLAG_REMOTE_PK, SMP_FLAG_DEBUG_KEY, SMP_FLAG_WAIT_USER, SMP_FLAG_DHKEY_PENDING, SMP_FLAG_REMOTE_OOB, SMP_FLAG_LOCAL_OOB, SMP_FLAG_CT2, }; struct smp_dev { /* Secure Connections OOB data */ bool local_oob; u8 local_pk[64]; u8 local_rand[16]; bool debug_key; struct crypto_shash *tfm_cmac; struct crypto_kpp *tfm_ecdh; }; struct smp_chan { struct l2cap_conn *conn; struct delayed_work security_timer; unsigned long allow_cmd; /* Bitmask of allowed commands */ u8 preq[7]; /* SMP Pairing Request */ u8 prsp[7]; /* SMP Pairing Response */ u8 prnd[16]; /* SMP Pairing Random (local) */ u8 rrnd[16]; /* SMP Pairing Random (remote) */ u8 pcnf[16]; /* SMP Pairing Confirm */ u8 tk[16]; /* SMP Temporary Key */ u8 rr[16]; /* Remote OOB ra/rb value */ u8 lr[16]; /* Local OOB ra/rb value */ u8 enc_key_size; u8 remote_key_dist; bdaddr_t id_addr; u8 id_addr_type; u8 irk[16]; struct smp_csrk *csrk; struct smp_csrk *responder_csrk; struct smp_ltk *ltk; struct smp_ltk *responder_ltk; struct smp_irk *remote_irk; u8 *link_key; unsigned long flags; u8 method; u8 passkey_round; /* Secure Connections variables */ u8 local_pk[64]; u8 remote_pk[64]; u8 dhkey[32]; u8 mackey[16]; struct crypto_shash *tfm_cmac; struct crypto_kpp *tfm_ecdh; }; /* These debug key values are defined in the SMP section of the core * specification. debug_pk is the public debug key and debug_sk the * private debug key. */ static const u8 debug_pk[64] = { 0xe6, 0x9d, 0x35, 0x0e, 0x48, 0x01, 0x03, 0xcc, 0xdb, 0xfd, 0xf4, 0xac, 0x11, 0x91, 0xf4, 0xef, 0xb9, 0xa5, 0xf9, 0xe9, 0xa7, 0x83, 0x2c, 0x5e, 0x2c, 0xbe, 0x97, 0xf2, 0xd2, 0x03, 0xb0, 0x20, 0x8b, 0xd2, 0x89, 0x15, 0xd0, 0x8e, 0x1c, 0x74, 0x24, 0x30, 0xed, 0x8f, 0xc2, 0x45, 0x63, 0x76, 0x5c, 0x15, 0x52, 0x5a, 0xbf, 0x9a, 0x32, 0x63, 0x6d, 0xeb, 0x2a, 0x65, 0x49, 0x9c, 0x80, 0xdc, }; static const u8 debug_sk[32] = { 0xbd, 0x1a, 0x3c, 0xcd, 0xa6, 0xb8, 0x99, 0x58, 0x99, 0xb7, 0x40, 0xeb, 0x7b, 0x60, 0xff, 0x4a, 0x50, 0x3f, 0x10, 0xd2, 0xe3, 0xb3, 0xc9, 0x74, 0x38, 0x5f, 0xc5, 0xa3, 0xd4, 0xf6, 0x49, 0x3f, }; static inline void swap_buf(const u8 *src, u8 *dst, size_t len) { size_t i; for (i = 0; i < len; i++) dst[len - 1 - i] = src[i]; } /* The following functions map to the LE SC SMP crypto functions * AES-CMAC, f4, f5, f6, g2 and h6. */ static int aes_cmac(struct crypto_shash *tfm, const u8 k[16], const u8 *m, size_t len, u8 mac[16]) { uint8_t tmp[16], mac_msb[16], msg_msb[CMAC_MSG_MAX]; int err; if (len > CMAC_MSG_MAX) return -EFBIG; if (!tfm) { BT_ERR("tfm %p", tfm); return -EINVAL; } /* Swap key and message from LSB to MSB */ swap_buf(k, tmp, 16); swap_buf(m, msg_msb, len); SMP_DBG("msg (len %zu) %*phN", len, (int) len, m); SMP_DBG("key %16phN", k); err = crypto_shash_setkey(tfm, tmp, 16); if (err) { BT_ERR("cipher setkey failed: %d", err); return err; } err = crypto_shash_tfm_digest(tfm, msg_msb, len, mac_msb); if (err) { BT_ERR("Hash computation error %d", err); return err; } swap_buf(mac_msb, mac, 16); SMP_DBG("mac %16phN", mac); return 0; } static int smp_f4(struct crypto_shash *tfm_cmac, const u8 u[32], const u8 v[32], const u8 x[16], u8 z, u8 res[16]) { u8 m[65]; int err; SMP_DBG("u %32phN", u); SMP_DBG("v %32phN", v); SMP_DBG("x %16phN z %02x", x, z); m[0] = z; memcpy(m + 1, v, 32); memcpy(m + 33, u, 32); err = aes_cmac(tfm_cmac, x, m, sizeof(m), res); if (err) return err; SMP_DBG("res %16phN", res); return err; } static int smp_f5(struct crypto_shash *tfm_cmac, const u8 w[32], const u8 n1[16], const u8 n2[16], const u8 a1[7], const u8 a2[7], u8 mackey[16], u8 ltk[16]) { /* The btle, salt and length "magic" values are as defined in * the SMP section of the Bluetooth core specification. In ASCII * the btle value ends up being 'btle'. The salt is just a * random number whereas length is the value 256 in little * endian format. */ const u8 btle[4] = { 0x65, 0x6c, 0x74, 0x62 }; const u8 salt[16] = { 0xbe, 0x83, 0x60, 0x5a, 0xdb, 0x0b, 0x37, 0x60, 0x38, 0xa5, 0xf5, 0xaa, 0x91, 0x83, 0x88, 0x6c }; const u8 length[2] = { 0x00, 0x01 }; u8 m[53], t[16]; int err; SMP_DBG("w %32phN", w); SMP_DBG("n1 %16phN n2 %16phN", n1, n2); SMP_DBG("a1 %7phN a2 %7phN", a1, a2); err = aes_cmac(tfm_cmac, salt, w, 32, t); if (err) return err; SMP_DBG("t %16phN", t); memcpy(m, length, 2); memcpy(m + 2, a2, 7); memcpy(m + 9, a1, 7); memcpy(m + 16, n2, 16); memcpy(m + 32, n1, 16); memcpy(m + 48, btle, 4); m[52] = 0; /* Counter */ err = aes_cmac(tfm_cmac, t, m, sizeof(m), mackey); if (err) return err; SMP_DBG("mackey %16phN", mackey); m[52] = 1; /* Counter */ err = aes_cmac(tfm_cmac, t, m, sizeof(m), ltk); if (err) return err; SMP_DBG("ltk %16phN", ltk); return 0; } static int smp_f6(struct crypto_shash *tfm_cmac, const u8 w[16], const u8 n1[16], const u8 n2[16], const u8 r[16], const u8 io_cap[3], const u8 a1[7], const u8 a2[7], u8 res[16]) { u8 m[65]; int err; SMP_DBG("w %16phN", w); SMP_DBG("n1 %16phN n2 %16phN", n1, n2); SMP_DBG("r %16phN io_cap %3phN a1 %7phN a2 %7phN", r, io_cap, a1, a2); memcpy(m, a2, 7); memcpy(m + 7, a1, 7); memcpy(m + 14, io_cap, 3); memcpy(m + 17, r, 16); memcpy(m + 33, n2, 16); memcpy(m + 49, n1, 16); err = aes_cmac(tfm_cmac, w, m, sizeof(m), res); if (err) return err; SMP_DBG("res %16phN", res); return err; } static int smp_g2(struct crypto_shash *tfm_cmac, const u8 u[32], const u8 v[32], const u8 x[16], const u8 y[16], u32 *val) { u8 m[80], tmp[16]; int err; SMP_DBG("u %32phN", u); SMP_DBG("v %32phN", v); SMP_DBG("x %16phN y %16phN", x, y); memcpy(m, y, 16); memcpy(m + 16, v, 32); memcpy(m + 48, u, 32); err = aes_cmac(tfm_cmac, x, m, sizeof(m), tmp); if (err) return err; *val = get_unaligned_le32(tmp); *val %= 1000000; SMP_DBG("val %06u", *val); return 0; } static int smp_h6(struct crypto_shash *tfm_cmac, const u8 w[16], const u8 key_id[4], u8 res[16]) { int err; SMP_DBG("w %16phN key_id %4phN", w, key_id); err = aes_cmac(tfm_cmac, w, key_id, 4, res); if (err) return err; SMP_DBG("res %16phN", res); return err; } static int smp_h7(struct crypto_shash *tfm_cmac, const u8 w[16], const u8 salt[16], u8 res[16]) { int err; SMP_DBG("w %16phN salt %16phN", w, salt); err = aes_cmac(tfm_cmac, salt, w, 16, res); if (err) return err; SMP_DBG("res %16phN", res); return err; } /* The following functions map to the legacy SMP crypto functions e, c1, * s1 and ah. */ static int smp_e(const u8 *k, u8 *r) { struct crypto_aes_ctx ctx; uint8_t tmp[16], data[16]; int err; SMP_DBG("k %16phN r %16phN", k, r); /* The most significant octet of key corresponds to k[0] */ swap_buf(k, tmp, 16); err = aes_expandkey(&ctx, tmp, 16); if (err) { BT_ERR("cipher setkey failed: %d", err); return err; } /* Most significant octet of plaintextData corresponds to data[0] */ swap_buf(r, data, 16); aes_encrypt(&ctx, data, data); /* Most significant octet of encryptedData corresponds to data[0] */ swap_buf(data, r, 16); SMP_DBG("r %16phN", r); memzero_explicit(&ctx, sizeof(ctx)); return err; } static int smp_c1(const u8 k[16], const u8 r[16], const u8 preq[7], const u8 pres[7], u8 _iat, const bdaddr_t *ia, u8 _rat, const bdaddr_t *ra, u8 res[16]) { u8 p1[16], p2[16]; int err; SMP_DBG("k %16phN r %16phN", k, r); SMP_DBG("iat %u ia %6phN rat %u ra %6phN", _iat, ia, _rat, ra); SMP_DBG("preq %7phN pres %7phN", preq, pres); memset(p1, 0, 16); /* p1 = pres || preq || _rat || _iat */ p1[0] = _iat; p1[1] = _rat; memcpy(p1 + 2, preq, 7); memcpy(p1 + 9, pres, 7); SMP_DBG("p1 %16phN", p1); /* res = r XOR p1 */ crypto_xor_cpy(res, r, p1, sizeof(p1)); /* res = e(k, res) */ err = smp_e(k, res); if (err) { BT_ERR("Encrypt data error"); return err; } /* p2 = padding || ia || ra */ memcpy(p2, ra, 6); memcpy(p2 + 6, ia, 6); memset(p2 + 12, 0, 4); SMP_DBG("p2 %16phN", p2); /* res = res XOR p2 */ crypto_xor(res, p2, sizeof(p2)); /* res = e(k, res) */ err = smp_e(k, res); if (err) BT_ERR("Encrypt data error"); return err; } static int smp_s1(const u8 k[16], const u8 r1[16], const u8 r2[16], u8 _r[16]) { int err; /* Just least significant octets from r1 and r2 are considered */ memcpy(_r, r2, 8); memcpy(_r + 8, r1, 8); err = smp_e(k, _r); if (err) BT_ERR("Encrypt data error"); return err; } static int smp_ah(const u8 irk[16], const u8 r[3], u8 res[3]) { u8 _res[16]; int err; /* r' = padding || r */ memcpy(_res, r, 3); memset(_res + 3, 0, 13); err = smp_e(irk, _res); if (err) { BT_ERR("Encrypt error"); return err; } /* The output of the random address function ah is: * ah(k, r) = e(k, r') mod 2^24 * The output of the security function e is then truncated to 24 bits * by taking the least significant 24 bits of the output of e as the * result of ah. */ memcpy(res, _res, 3); return 0; } bool smp_irk_matches(struct hci_dev *hdev, const u8 irk[16], const bdaddr_t *bdaddr) { struct l2cap_chan *chan = hdev->smp_data; u8 hash[3]; int err; if (!chan || !chan->data) return false; bt_dev_dbg(hdev, "RPA %pMR IRK %*phN", bdaddr, 16, irk); err = smp_ah(irk, &bdaddr->b[3], hash); if (err) return false; return !crypto_memneq(bdaddr->b, hash, 3); } int smp_generate_rpa(struct hci_dev *hdev, const u8 irk[16], bdaddr_t *rpa) { struct l2cap_chan *chan = hdev->smp_data; int err; if (!chan || !chan->data) return -EOPNOTSUPP; get_random_bytes(&rpa->b[3], 3); rpa->b[5] &= 0x3f; /* Clear two most significant bits */ rpa->b[5] |= 0x40; /* Set second most significant bit */ err = smp_ah(irk, &rpa->b[3], rpa->b); if (err < 0) return err; bt_dev_dbg(hdev, "RPA %pMR", rpa); return 0; } int smp_generate_oob(struct hci_dev *hdev, u8 hash[16], u8 rand[16]) { struct l2cap_chan *chan = hdev->smp_data; struct smp_dev *smp; int err; if (!chan || !chan->data) return -EOPNOTSUPP; smp = chan->data; if (hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS)) { bt_dev_dbg(hdev, "Using debug keys"); err = set_ecdh_privkey(smp->tfm_ecdh, debug_sk); if (err) return err; memcpy(smp->local_pk, debug_pk, 64); smp->debug_key = true; } else { while (true) { /* Generate key pair for Secure Connections */ err = generate_ecdh_keys(smp->tfm_ecdh, smp->local_pk); if (err) return err; /* This is unlikely, but we need to check that * we didn't accidentally generate a debug key. */ if (crypto_memneq(smp->local_pk, debug_pk, 64)) break; } smp->debug_key = false; } SMP_DBG("OOB Public Key X: %32phN", smp->local_pk); SMP_DBG("OOB Public Key Y: %32phN", smp->local_pk + 32); get_random_bytes(smp->local_rand, 16); err = smp_f4(smp->tfm_cmac, smp->local_pk, smp->local_pk, smp->local_rand, 0, hash); if (err < 0) return err; memcpy(rand, smp->local_rand, 16); smp->local_oob = true; return 0; } static void smp_send_cmd(struct l2cap_conn *conn, u8 code, u16 len, void *data) { struct l2cap_chan *chan = conn->smp; struct smp_chan *smp; struct kvec iv[2]; struct msghdr msg; if (!chan) return; bt_dev_dbg(conn->hcon->hdev, "code 0x%2.2x", code); iv[0].iov_base = &code; iv[0].iov_len = 1; iv[1].iov_base = data; iv[1].iov_len = len; memset(&msg, 0, sizeof(msg)); iov_iter_kvec(&msg.msg_iter, ITER_SOURCE, iv, 2, 1 + len); l2cap_chan_send(chan, &msg, 1 + len); if (!chan->data) return; smp = chan->data; cancel_delayed_work_sync(&smp->security_timer); schedule_delayed_work(&smp->security_timer, SMP_TIMEOUT); } static u8 authreq_to_seclevel(u8 authreq) { if (authreq & SMP_AUTH_MITM) { if (authreq & SMP_AUTH_SC) return BT_SECURITY_FIPS; else return BT_SECURITY_HIGH; } else { return BT_SECURITY_MEDIUM; } } static __u8 seclevel_to_authreq(__u8 sec_level) { switch (sec_level) { case BT_SECURITY_FIPS: case BT_SECURITY_HIGH: return SMP_AUTH_MITM | SMP_AUTH_BONDING; case BT_SECURITY_MEDIUM: return SMP_AUTH_BONDING; default: return SMP_AUTH_NONE; } } static void build_pairing_cmd(struct l2cap_conn *conn, struct smp_cmd_pairing *req, struct smp_cmd_pairing *rsp, __u8 authreq) { struct l2cap_chan *chan = conn->smp; struct smp_chan *smp = chan->data; struct hci_conn *hcon = conn->hcon; struct hci_dev *hdev = hcon->hdev; u8 local_dist = 0, remote_dist = 0, oob_flag = SMP_OOB_NOT_PRESENT; if (hci_dev_test_flag(hdev, HCI_BONDABLE)) { local_dist = SMP_DIST_ENC_KEY | SMP_DIST_SIGN; remote_dist = SMP_DIST_ENC_KEY | SMP_DIST_SIGN; authreq |= SMP_AUTH_BONDING; } else { authreq &= ~SMP_AUTH_BONDING; } if (hci_dev_test_flag(hdev, HCI_RPA_RESOLVING)) remote_dist |= SMP_DIST_ID_KEY; if (hci_dev_test_flag(hdev, HCI_PRIVACY)) local_dist |= SMP_DIST_ID_KEY; if (hci_dev_test_flag(hdev, HCI_SC_ENABLED) && (authreq & SMP_AUTH_SC)) { struct oob_data *oob_data; u8 bdaddr_type; if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) { local_dist |= SMP_DIST_LINK_KEY; remote_dist |= SMP_DIST_LINK_KEY; } if (hcon->dst_type == ADDR_LE_DEV_PUBLIC) bdaddr_type = BDADDR_LE_PUBLIC; else bdaddr_type = BDADDR_LE_RANDOM; oob_data = hci_find_remote_oob_data(hdev, &hcon->dst, bdaddr_type); if (oob_data && oob_data->present) { set_bit(SMP_FLAG_REMOTE_OOB, &smp->flags); oob_flag = SMP_OOB_PRESENT; memcpy(smp->rr, oob_data->rand256, 16); memcpy(smp->pcnf, oob_data->hash256, 16); SMP_DBG("OOB Remote Confirmation: %16phN", smp->pcnf); SMP_DBG("OOB Remote Random: %16phN", smp->rr); } } else { authreq &= ~SMP_AUTH_SC; } if (rsp == NULL) { req->io_capability = conn->hcon->io_capability; req->oob_flag = oob_flag; req->max_key_size = hdev->le_max_key_size; req->init_key_dist = local_dist; req->resp_key_dist = remote_dist; req->auth_req = (authreq & AUTH_REQ_MASK(hdev)); smp->remote_key_dist = remote_dist; return; } rsp->io_capability = conn->hcon->io_capability; rsp->oob_flag = oob_flag; rsp->max_key_size = hdev->le_max_key_size; rsp->init_key_dist = req->init_key_dist & remote_dist; rsp->resp_key_dist = req->resp_key_dist & local_dist; rsp->auth_req = (authreq & AUTH_REQ_MASK(hdev)); smp->remote_key_dist = rsp->init_key_dist; } static u8 check_enc_key_size(struct l2cap_conn *conn, __u8 max_key_size) { struct l2cap_chan *chan = conn->smp; struct hci_dev *hdev = conn->hcon->hdev; struct smp_chan *smp = chan->data; if (conn->hcon->pending_sec_level == BT_SECURITY_FIPS && max_key_size != SMP_MAX_ENC_KEY_SIZE) return SMP_ENC_KEY_SIZE; if (max_key_size > hdev->le_max_key_size || max_key_size < SMP_MIN_ENC_KEY_SIZE) return SMP_ENC_KEY_SIZE; smp->enc_key_size = max_key_size; return 0; } static void smp_chan_destroy(struct l2cap_conn *conn) { struct l2cap_chan *chan = conn->smp; struct smp_chan *smp = chan->data; struct hci_conn *hcon = conn->hcon; bool complete; BUG_ON(!smp); cancel_delayed_work_sync(&smp->security_timer); complete = test_bit(SMP_FLAG_COMPLETE, &smp->flags); mgmt_smp_complete(hcon, complete); kfree_sensitive(smp->csrk); kfree_sensitive(smp->responder_csrk); kfree_sensitive(smp->link_key); crypto_free_shash(smp->tfm_cmac); crypto_free_kpp(smp->tfm_ecdh); /* Ensure that we don't leave any debug key around if debug key * support hasn't been explicitly enabled. */ if (smp->ltk && smp->ltk->type == SMP_LTK_P256_DEBUG && !hci_dev_test_flag(hcon->hdev, HCI_KEEP_DEBUG_KEYS)) { list_del_rcu(&smp->ltk->list); kfree_rcu(smp->ltk, rcu); smp->ltk = NULL; } /* If pairing failed clean up any keys we might have */ if (!complete) { if (smp->ltk) { list_del_rcu(&smp->ltk->list); kfree_rcu(smp->ltk, rcu); } if (smp->responder_ltk) { list_del_rcu(&smp->responder_ltk->list); kfree_rcu(smp->responder_ltk, rcu); } if (smp->remote_irk) { list_del_rcu(&smp->remote_irk->list); kfree_rcu(smp->remote_irk, rcu); } } chan->data = NULL; kfree_sensitive(smp); hci_conn_drop(hcon); } static void smp_failure(struct l2cap_conn *conn, u8 reason) { struct hci_conn *hcon = conn->hcon; struct l2cap_chan *chan = conn->smp; if (reason) smp_send_cmd(conn, SMP_CMD_PAIRING_FAIL, sizeof(reason), &reason); mgmt_auth_failed(hcon, HCI_ERROR_AUTH_FAILURE); if (chan->data) smp_chan_destroy(conn); } #define JUST_WORKS 0x00 #define JUST_CFM 0x01 #define REQ_PASSKEY 0x02 #define CFM_PASSKEY 0x03 #define REQ_OOB 0x04 #define DSP_PASSKEY 0x05 #define OVERLAP 0xFF static const u8 gen_method[5][5] = { { JUST_WORKS, JUST_CFM, REQ_PASSKEY, JUST_WORKS, REQ_PASSKEY }, { JUST_WORKS, JUST_CFM, REQ_PASSKEY, JUST_WORKS, REQ_PASSKEY }, { CFM_PASSKEY, CFM_PASSKEY, REQ_PASSKEY, JUST_WORKS, CFM_PASSKEY }, { JUST_WORKS, JUST_CFM, JUST_WORKS, JUST_WORKS, JUST_CFM }, { CFM_PASSKEY, CFM_PASSKEY, REQ_PASSKEY, JUST_WORKS, OVERLAP }, }; static const u8 sc_method[5][5] = { { JUST_WORKS, JUST_CFM, REQ_PASSKEY, JUST_WORKS, REQ_PASSKEY }, { JUST_WORKS, CFM_PASSKEY, REQ_PASSKEY, JUST_WORKS, CFM_PASSKEY }, { DSP_PASSKEY, DSP_PASSKEY, REQ_PASSKEY, JUST_WORKS, DSP_PASSKEY }, { JUST_WORKS, JUST_CFM, JUST_WORKS, JUST_WORKS, JUST_CFM }, { DSP_PASSKEY, CFM_PASSKEY, REQ_PASSKEY, JUST_WORKS, CFM_PASSKEY }, }; static u8 get_auth_method(struct smp_chan *smp, u8 local_io, u8 remote_io) { /* If either side has unknown io_caps, use JUST_CFM (which gets * converted later to JUST_WORKS if we're initiators. */ if (local_io > SMP_IO_KEYBOARD_DISPLAY || remote_io > SMP_IO_KEYBOARD_DISPLAY) return JUST_CFM; if (test_bit(SMP_FLAG_SC, &smp->flags)) return sc_method[remote_io][local_io]; return gen_method[remote_io][local_io]; } static int tk_request(struct l2cap_conn *conn, u8 remote_oob, u8 auth, u8 local_io, u8 remote_io) { struct hci_conn *hcon = conn->hcon; struct l2cap_chan *chan = conn->smp; struct smp_chan *smp = chan->data; u32 passkey = 0; int ret; /* Initialize key for JUST WORKS */ memset(smp->tk, 0, sizeof(smp->tk)); clear_bit(SMP_FLAG_TK_VALID, &smp->flags); bt_dev_dbg(hcon->hdev, "auth:%u lcl:%u rem:%u", auth, local_io, remote_io); /* If neither side wants MITM, either "just" confirm an incoming * request or use just-works for outgoing ones. The JUST_CFM * will be converted to JUST_WORKS if necessary later in this * function. If either side has MITM look up the method from the * table. */ if (!(auth & SMP_AUTH_MITM)) smp->method = JUST_CFM; else smp->method = get_auth_method(smp, local_io, remote_io); /* Don't confirm locally initiated pairing attempts */ if (smp->method == JUST_CFM && test_bit(SMP_FLAG_INITIATOR, &smp->flags)) smp->method = JUST_WORKS; /* Don't bother user space with no IO capabilities */ if (smp->method == JUST_CFM && hcon->io_capability == HCI_IO_NO_INPUT_OUTPUT) smp->method = JUST_WORKS; /* If Just Works, Continue with Zero TK and ask user-space for * confirmation */ if (smp->method == JUST_WORKS) { ret = mgmt_user_confirm_request(hcon->hdev, &hcon->dst, hcon->type, hcon->dst_type, passkey, 1); if (ret) return ret; set_bit(SMP_FLAG_WAIT_USER, &smp->flags); return 0; } /* If this function is used for SC -> legacy fallback we * can only recover the just-works case. */ if (test_bit(SMP_FLAG_SC, &smp->flags)) return -EINVAL; /* Not Just Works/Confirm results in MITM Authentication */ if (smp->method != JUST_CFM) { set_bit(SMP_FLAG_MITM_AUTH, &smp->flags); if (hcon->pending_sec_level < BT_SECURITY_HIGH) hcon->pending_sec_level = BT_SECURITY_HIGH; } /* If both devices have Keyboard-Display I/O, the initiator * Confirms and the responder Enters the passkey. */ if (smp->method == OVERLAP) { if (hcon->role == HCI_ROLE_MASTER) smp->method = CFM_PASSKEY; else smp->method = REQ_PASSKEY; } /* Generate random passkey. */ if (smp->method == CFM_PASSKEY) { memset(smp->tk, 0, sizeof(smp->tk)); get_random_bytes(&passkey, sizeof(passkey)); passkey %= 1000000; put_unaligned_le32(passkey, smp->tk); bt_dev_dbg(hcon->hdev, "PassKey: %u", passkey); set_bit(SMP_FLAG_TK_VALID, &smp->flags); } if (smp->method == REQ_PASSKEY) ret = mgmt_user_passkey_request(hcon->hdev, &hcon->dst, hcon->type, hcon->dst_type); else if (smp->method == JUST_CFM) ret = mgmt_user_confirm_request(hcon->hdev, &hcon->dst, hcon->type, hcon->dst_type, passkey, 1); else ret = mgmt_user_passkey_notify(hcon->hdev, &hcon->dst, hcon->type, hcon->dst_type, passkey, 0); return ret; } static u8 smp_confirm(struct smp_chan *smp) { struct l2cap_conn *conn = smp->conn; struct smp_cmd_pairing_confirm cp; int ret; bt_dev_dbg(conn->hcon->hdev, "conn %p", conn); ret = smp_c1(smp->tk, smp->prnd, smp->preq, smp->prsp, conn->hcon->init_addr_type, &conn->hcon->init_addr, conn->hcon->resp_addr_type, &conn->hcon->resp_addr, cp.confirm_val); if (ret) return SMP_UNSPECIFIED; clear_bit(SMP_FLAG_CFM_PENDING, &smp->flags); smp_send_cmd(smp->conn, SMP_CMD_PAIRING_CONFIRM, sizeof(cp), &cp); if (conn->hcon->out) SMP_ALLOW_CMD(smp, SMP_CMD_PAIRING_CONFIRM); else SMP_ALLOW_CMD(smp, SMP_CMD_PAIRING_RANDOM); return 0; } static u8 smp_random(struct smp_chan *smp) { struct l2cap_conn *conn = smp->conn; struct hci_conn *hcon = conn->hcon; u8 confirm[16]; int ret; bt_dev_dbg(conn->hcon->hdev, "conn %p %s", conn, conn->hcon->out ? "initiator" : "responder"); ret = smp_c1(smp->tk, smp->rrnd, smp->preq, smp->prsp, hcon->init_addr_type, &hcon->init_addr, hcon->resp_addr_type, &hcon->resp_addr, confirm); if (ret) return SMP_UNSPECIFIED; if (crypto_memneq(smp->pcnf, confirm, sizeof(smp->pcnf))) { bt_dev_err(hcon->hdev, "pairing failed " "(confirmation values mismatch)"); return SMP_CONFIRM_FAILED; } if (hcon->out) { u8 stk[16]; __le64 rand = 0; __le16 ediv = 0; smp_s1(smp->tk, smp->rrnd, smp->prnd, stk); if (test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &hcon->flags)) return SMP_UNSPECIFIED; hci_le_start_enc(hcon, ediv, rand, stk, smp->enc_key_size); hcon->enc_key_size = smp->enc_key_size; set_bit(HCI_CONN_STK_ENCRYPT, &hcon->flags); } else { u8 stk[16], auth; __le64 rand = 0; __le16 ediv = 0; smp_send_cmd(conn, SMP_CMD_PAIRING_RANDOM, sizeof(smp->prnd), smp->prnd); smp_s1(smp->tk, smp->prnd, smp->rrnd, stk); if (hcon->pending_sec_level == BT_SECURITY_HIGH) auth = 1; else auth = 0; /* Even though there's no _RESPONDER suffix this is the * responder STK we're adding for later lookup (the initiator * STK never needs to be stored). */ hci_add_ltk(hcon->hdev, &hcon->dst, hcon->dst_type, SMP_STK, auth, stk, smp->enc_key_size, ediv, rand); } return 0; } static void smp_notify_keys(struct l2cap_conn *conn) { struct l2cap_chan *chan = conn->smp; struct smp_chan *smp = chan->data; struct hci_conn *hcon = conn->hcon; struct hci_dev *hdev = hcon->hdev; struct smp_cmd_pairing *req = (void *) &smp->preq[1]; struct smp_cmd_pairing *rsp = (void *) &smp->prsp[1]; bool persistent; if (hcon->type == ACL_LINK) { if (hcon->key_type == HCI_LK_DEBUG_COMBINATION) persistent = false; else persistent = !test_bit(HCI_CONN_FLUSH_KEY, &hcon->flags); } else { /* The LTKs, IRKs and CSRKs should be persistent only if * both sides had the bonding bit set in their * authentication requests. */ persistent = !!((req->auth_req & rsp->auth_req) & SMP_AUTH_BONDING); } if (smp->remote_irk) { mgmt_new_irk(hdev, smp->remote_irk, persistent); /* Now that user space can be considered to know the * identity address track the connection based on it * from now on (assuming this is an LE link). */ if (hcon->type == LE_LINK) { bacpy(&hcon->dst, &smp->remote_irk->bdaddr); hcon->dst_type = smp->remote_irk->addr_type; /* Use a short delay to make sure the new address is * propagated _before_ the channels. */ queue_delayed_work(hdev->workqueue, &conn->id_addr_timer, ID_ADDR_TIMEOUT); } } if (smp->csrk) { smp->csrk->bdaddr_type = hcon->dst_type; bacpy(&smp->csrk->bdaddr, &hcon->dst); mgmt_new_csrk(hdev, smp->csrk, persistent); } if (smp->responder_csrk) { smp->responder_csrk->bdaddr_type = hcon->dst_type; bacpy(&smp->responder_csrk->bdaddr, &hcon->dst); mgmt_new_csrk(hdev, smp->responder_csrk, persistent); } if (smp->ltk) { smp->ltk->bdaddr_type = hcon->dst_type; bacpy(&smp->ltk->bdaddr, &hcon->dst); mgmt_new_ltk(hdev, smp->ltk, persistent); } if (smp->responder_ltk) { smp->responder_ltk->bdaddr_type = hcon->dst_type; bacpy(&smp->responder_ltk->bdaddr, &hcon->dst); mgmt_new_ltk(hdev, smp->responder_ltk, persistent); } if (smp->link_key) { struct link_key *key; u8 type; if (test_bit(SMP_FLAG_DEBUG_KEY, &smp->flags)) type = HCI_LK_DEBUG_COMBINATION; else if (hcon->sec_level == BT_SECURITY_FIPS) type = HCI_LK_AUTH_COMBINATION_P256; else type = HCI_LK_UNAUTH_COMBINATION_P256; key = hci_add_link_key(hdev, smp->conn->hcon, &hcon->dst, smp->link_key, type, 0, &persistent); if (key) { mgmt_new_link_key(hdev, key, persistent); /* Don't keep debug keys around if the relevant * flag is not set. */ if (!hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS) && key->type == HCI_LK_DEBUG_COMBINATION) { list_del_rcu(&key->list); kfree_rcu(key, rcu); } } } } static void sc_add_ltk(struct smp_chan *smp) { struct hci_conn *hcon = smp->conn->hcon; u8 key_type, auth; if (test_bit(SMP_FLAG_DEBUG_KEY, &smp->flags)) key_type = SMP_LTK_P256_DEBUG; else key_type = SMP_LTK_P256; if (hcon->pending_sec_level == BT_SECURITY_FIPS) auth = 1; else auth = 0; smp->ltk = hci_add_ltk(hcon->hdev, &hcon->dst, hcon->dst_type, key_type, auth, smp->tk, smp->enc_key_size, 0, 0); } static void sc_generate_link_key(struct smp_chan *smp) { /* From core spec. Spells out in ASCII as 'lebr'. */ const u8 lebr[4] = { 0x72, 0x62, 0x65, 0x6c }; smp->link_key = kzalloc(16, GFP_KERNEL); if (!smp->link_key) return; if (test_bit(SMP_FLAG_CT2, &smp->flags)) { /* SALT = 0x000000000000000000000000746D7031 */ const u8 salt[16] = { 0x31, 0x70, 0x6d, 0x74 }; if (smp_h7(smp->tfm_cmac, smp->tk, salt, smp->link_key)) { kfree_sensitive(smp->link_key); smp->link_key = NULL; return; } } else { /* From core spec. Spells out in ASCII as 'tmp1'. */ const u8 tmp1[4] = { 0x31, 0x70, 0x6d, 0x74 }; if (smp_h6(smp->tfm_cmac, smp->tk, tmp1, smp->link_key)) { kfree_sensitive(smp->link_key); smp->link_key = NULL; return; } } if (smp_h6(smp->tfm_cmac, smp->link_key, lebr, smp->link_key)) { kfree_sensitive(smp->link_key); smp->link_key = NULL; return; } } static void smp_allow_key_dist(struct smp_chan *smp) { /* Allow the first expected phase 3 PDU. The rest of the PDUs * will be allowed in each PDU handler to ensure we receive * them in the correct order. */ if (smp->remote_key_dist & SMP_DIST_ENC_KEY) SMP_ALLOW_CMD(smp, SMP_CMD_ENCRYPT_INFO); else if (smp->remote_key_dist & SMP_DIST_ID_KEY) SMP_ALLOW_CMD(smp, SMP_CMD_IDENT_INFO); else if (smp->remote_key_dist & SMP_DIST_SIGN) SMP_ALLOW_CMD(smp, SMP_CMD_SIGN_INFO); } static void sc_generate_ltk(struct smp_chan *smp) { /* From core spec. Spells out in ASCII as 'brle'. */ const u8 brle[4] = { 0x65, 0x6c, 0x72, 0x62 }; struct hci_conn *hcon = smp->conn->hcon; struct hci_dev *hdev = hcon->hdev; struct link_key *key; key = hci_find_link_key(hdev, &hcon->dst); if (!key) { bt_dev_err(hdev, "no Link Key found to generate LTK"); return; } if (key->type == HCI_LK_DEBUG_COMBINATION) set_bit(SMP_FLAG_DEBUG_KEY, &smp->flags); if (test_bit(SMP_FLAG_CT2, &smp->flags)) { /* SALT = 0x000000000000000000000000746D7032 */ const u8 salt[16] = { 0x32, 0x70, 0x6d, 0x74 }; if (smp_h7(smp->tfm_cmac, key->val, salt, smp->tk)) return; } else { /* From core spec. Spells out in ASCII as 'tmp2'. */ const u8 tmp2[4] = { 0x32, 0x70, 0x6d, 0x74 }; if (smp_h6(smp->tfm_cmac, key->val, tmp2, smp->tk)) return; } if (smp_h6(smp->tfm_cmac, smp->tk, brle, smp->tk)) return; sc_add_ltk(smp); } static void smp_distribute_keys(struct smp_chan *smp) { struct smp_cmd_pairing *req, *rsp; struct l2cap_conn *conn = smp->conn; struct hci_conn *hcon = conn->hcon; struct hci_dev *hdev = hcon->hdev; __u8 *keydist; bt_dev_dbg(hdev, "conn %p", conn); rsp = (void *) &smp->prsp[1]; /* The responder sends its keys first */ if (hcon->out && (smp->remote_key_dist & KEY_DIST_MASK)) { smp_allow_key_dist(smp); return; } req = (void *) &smp->preq[1]; if (hcon->out) { keydist = &rsp->init_key_dist; *keydist &= req->init_key_dist; } else { keydist = &rsp->resp_key_dist; *keydist &= req->resp_key_dist; } if (test_bit(SMP_FLAG_SC, &smp->flags)) { if (hcon->type == LE_LINK && (*keydist & SMP_DIST_LINK_KEY)) sc_generate_link_key(smp); if (hcon->type == ACL_LINK && (*keydist & SMP_DIST_ENC_KEY)) sc_generate_ltk(smp); /* Clear the keys which are generated but not distributed */ *keydist &= ~SMP_SC_NO_DIST; } bt_dev_dbg(hdev, "keydist 0x%x", *keydist); if (*keydist & SMP_DIST_ENC_KEY) { struct smp_cmd_encrypt_info enc; struct smp_cmd_initiator_ident ident; struct smp_ltk *ltk; u8 authenticated; __le16 ediv; __le64 rand; /* Make sure we generate only the significant amount of * bytes based on the encryption key size, and set the rest * of the value to zeroes. */ get_random_bytes(enc.ltk, smp->enc_key_size); memset(enc.ltk + smp->enc_key_size, 0, sizeof(enc.ltk) - smp->enc_key_size); get_random_bytes(&ediv, sizeof(ediv)); get_random_bytes(&rand, sizeof(rand)); smp_send_cmd(conn, SMP_CMD_ENCRYPT_INFO, sizeof(enc), &enc); authenticated = hcon->sec_level == BT_SECURITY_HIGH; ltk = hci_add_ltk(hdev, &hcon->dst, hcon->dst_type, SMP_LTK_RESPONDER, authenticated, enc.ltk, smp->enc_key_size, ediv, rand); smp->responder_ltk = ltk; ident.ediv = ediv; ident.rand = rand; smp_send_cmd(conn, SMP_CMD_INITIATOR_IDENT, sizeof(ident), &ident); *keydist &= ~SMP_DIST_ENC_KEY; } if (*keydist & SMP_DIST_ID_KEY) { struct smp_cmd_ident_addr_info addrinfo; struct smp_cmd_ident_info idinfo; memcpy(idinfo.irk, hdev->irk, sizeof(idinfo.irk)); smp_send_cmd(conn, SMP_CMD_IDENT_INFO, sizeof(idinfo), &idinfo); /* The hci_conn contains the local identity address * after the connection has been established. * * This is true even when the connection has been * established using a resolvable random address. */ bacpy(&addrinfo.bdaddr, &hcon->src); addrinfo.addr_type = hcon->src_type; smp_send_cmd(conn, SMP_CMD_IDENT_ADDR_INFO, sizeof(addrinfo), &addrinfo); *keydist &= ~SMP_DIST_ID_KEY; } if (*keydist & SMP_DIST_SIGN) { struct smp_cmd_sign_info sign; struct smp_csrk *csrk; /* Generate a new random key */ get_random_bytes(sign.csrk, sizeof(sign.csrk)); csrk = kzalloc(sizeof(*csrk), GFP_KERNEL); if (csrk) { if (hcon->sec_level > BT_SECURITY_MEDIUM) csrk->type = MGMT_CSRK_LOCAL_AUTHENTICATED; else csrk->type = MGMT_CSRK_LOCAL_UNAUTHENTICATED; memcpy(csrk->val, sign.csrk, sizeof(csrk->val)); } smp->responder_csrk = csrk; smp_send_cmd(conn, SMP_CMD_SIGN_INFO, sizeof(sign), &sign); *keydist &= ~SMP_DIST_SIGN; } /* If there are still keys to be received wait for them */ if (smp->remote_key_dist & KEY_DIST_MASK) { smp_allow_key_dist(smp); return; } set_bit(SMP_FLAG_COMPLETE, &smp->flags); smp_notify_keys(conn); smp_chan_destroy(conn); } static void smp_timeout(struct work_struct *work) { struct smp_chan *smp = container_of(work, struct smp_chan, security_timer.work); struct l2cap_conn *conn = smp->conn; bt_dev_dbg(conn->hcon->hdev, "conn %p", conn); hci_disconnect(conn->hcon, HCI_ERROR_REMOTE_USER_TERM); } static struct smp_chan *smp_chan_create(struct l2cap_conn *conn) { struct hci_conn *hcon = conn->hcon; struct l2cap_chan *chan = conn->smp; struct smp_chan *smp; smp = kzalloc(sizeof(*smp), GFP_ATOMIC); if (!smp) return NULL; smp->tfm_cmac = crypto_alloc_shash("cmac(aes)", 0, 0); if (IS_ERR(smp->tfm_cmac)) { bt_dev_err(hcon->hdev, "Unable to create CMAC crypto context"); goto zfree_smp; } smp->tfm_ecdh = crypto_alloc_kpp("ecdh-nist-p256", 0, 0); if (IS_ERR(smp->tfm_ecdh)) { bt_dev_err(hcon->hdev, "Unable to create ECDH crypto context"); goto free_shash; } smp->conn = conn; chan->data = smp; SMP_ALLOW_CMD(smp, SMP_CMD_PAIRING_FAIL); INIT_DELAYED_WORK(&smp->security_timer, smp_timeout); hci_conn_hold(hcon); return smp; free_shash: crypto_free_shash(smp->tfm_cmac); zfree_smp: kfree_sensitive(smp); return NULL; } static int sc_mackey_and_ltk(struct smp_chan *smp, u8 mackey[16], u8 ltk[16]) { struct hci_conn *hcon = smp->conn->hcon; u8 *na, *nb, a[7], b[7]; if (hcon->out) { na = smp->prnd; nb = smp->rrnd; } else { na = smp->rrnd; nb = smp->prnd; } memcpy(a, &hcon->init_addr, 6); memcpy(b, &hcon->resp_addr, 6); a[6] = hcon->init_addr_type; b[6] = hcon->resp_addr_type; return smp_f5(smp->tfm_cmac, smp->dhkey, na, nb, a, b, mackey, ltk); } static void sc_dhkey_check(struct smp_chan *smp) { struct hci_conn *hcon = smp->conn->hcon; struct smp_cmd_dhkey_check check; u8 a[7], b[7], *local_addr, *remote_addr; u8 io_cap[3], r[16]; memcpy(a, &hcon->init_addr, 6); memcpy(b, &hcon->resp_addr, 6); a[6] = hcon->init_addr_type; b[6] = hcon->resp_addr_type; if (hcon->out) { local_addr = a; remote_addr = b; memcpy(io_cap, &smp->preq[1], 3); } else { local_addr = b; remote_addr = a; memcpy(io_cap, &smp->prsp[1], 3); } memset(r, 0, sizeof(r)); if (smp->method == REQ_PASSKEY || smp->method == DSP_PASSKEY) put_unaligned_le32(hcon->passkey_notify, r); if (smp->method == REQ_OOB) memcpy(r, smp->rr, 16); smp_f6(smp->tfm_cmac, smp->mackey, smp->prnd, smp->rrnd, r, io_cap, local_addr, remote_addr, check.e); smp_send_cmd(smp->conn, SMP_CMD_DHKEY_CHECK, sizeof(check), &check); } static u8 sc_passkey_send_confirm(struct smp_chan *smp) { struct l2cap_conn *conn = smp->conn; struct hci_conn *hcon = conn->hcon; struct smp_cmd_pairing_confirm cfm; u8 r; r = ((hcon->passkey_notify >> smp->passkey_round) & 0x01); r |= 0x80; get_random_bytes(smp->prnd, sizeof(smp->prnd)); if (smp_f4(smp->tfm_cmac, smp->local_pk, smp->remote_pk, smp->prnd, r, cfm.confirm_val)) return SMP_UNSPECIFIED; smp_send_cmd(conn, SMP_CMD_PAIRING_CONFIRM, sizeof(cfm), &cfm); return 0; } static u8 sc_passkey_round(struct smp_chan *smp, u8 smp_op) { struct l2cap_conn *conn = smp->conn; struct hci_conn *hcon = conn->hcon; struct hci_dev *hdev = hcon->hdev; u8 cfm[16], r; /* Ignore the PDU if we've already done 20 rounds (0 - 19) */ if (smp->passkey_round >= 20) return 0; switch (smp_op) { case SMP_CMD_PAIRING_RANDOM: r = ((hcon->passkey_notify >> smp->passkey_round) & 0x01); r |= 0x80; if (smp_f4(smp->tfm_cmac, smp->remote_pk, smp->local_pk, smp->rrnd, r, cfm)) return SMP_UNSPECIFIED; if (crypto_memneq(smp->pcnf, cfm, 16)) return SMP_CONFIRM_FAILED; smp->passkey_round++; if (smp->passkey_round == 20) { /* Generate MacKey and LTK */ if (sc_mackey_and_ltk(smp, smp->mackey, smp->tk)) return SMP_UNSPECIFIED; } /* The round is only complete when the initiator * receives pairing random. */ if (!hcon->out) { smp_send_cmd(conn, SMP_CMD_PAIRING_RANDOM, sizeof(smp->prnd), smp->prnd); if (smp->passkey_round == 20) SMP_ALLOW_CMD(smp, SMP_CMD_DHKEY_CHECK); else SMP_ALLOW_CMD(smp, SMP_CMD_PAIRING_CONFIRM); return 0; } /* Start the next round */ if (smp->passkey_round != 20) return sc_passkey_round(smp, 0); /* Passkey rounds are complete - start DHKey Check */ sc_dhkey_check(smp); SMP_ALLOW_CMD(smp, SMP_CMD_DHKEY_CHECK); break; case SMP_CMD_PAIRING_CONFIRM: if (test_bit(SMP_FLAG_WAIT_USER, &smp->flags)) { set_bit(SMP_FLAG_CFM_PENDING, &smp->flags); return 0; } SMP_ALLOW_CMD(smp, SMP_CMD_PAIRING_RANDOM); if (hcon->out) { smp_send_cmd(conn, SMP_CMD_PAIRING_RANDOM, sizeof(smp->prnd), smp->prnd); return 0; } return sc_passkey_send_confirm(smp); case SMP_CMD_PUBLIC_KEY: default: /* Initiating device starts the round */ if (!hcon->out) return 0; bt_dev_dbg(hdev, "Starting passkey round %u", smp->passkey_round + 1); SMP_ALLOW_CMD(smp, SMP_CMD_PAIRING_CONFIRM); return sc_passkey_send_confirm(smp); } return 0; } static int sc_user_reply(struct smp_chan *smp, u16 mgmt_op, __le32 passkey) { struct l2cap_conn *conn = smp->conn; struct hci_conn *hcon = conn->hcon; u8 smp_op; clear_bit(SMP_FLAG_WAIT_USER, &smp->flags); switch (mgmt_op) { case MGMT_OP_USER_PASSKEY_NEG_REPLY: smp_failure(smp->conn, SMP_PASSKEY_ENTRY_FAILED); return 0; case MGMT_OP_USER_CONFIRM_NEG_REPLY: smp_failure(smp->conn, SMP_NUMERIC_COMP_FAILED); return 0; case MGMT_OP_USER_PASSKEY_REPLY: hcon->passkey_notify = le32_to_cpu(passkey); smp->passkey_round = 0; if (test_and_clear_bit(SMP_FLAG_CFM_PENDING, &smp->flags)) smp_op = SMP_CMD_PAIRING_CONFIRM; else smp_op = 0; if (sc_passkey_round(smp, smp_op)) return -EIO; return 0; } /* Initiator sends DHKey check first */ if (hcon->out) { sc_dhkey_check(smp); SMP_ALLOW_CMD(smp, SMP_CMD_DHKEY_CHECK); } else if (test_and_clear_bit(SMP_FLAG_DHKEY_PENDING, &smp->flags)) { sc_dhkey_check(smp); sc_add_ltk(smp); } return 0; } int smp_user_confirm_reply(struct hci_conn *hcon, u16 mgmt_op, __le32 passkey) { struct l2cap_conn *conn = hcon->l2cap_data; struct l2cap_chan *chan; struct smp_chan *smp; u32 value; int err; if (!conn) return -ENOTCONN; bt_dev_dbg(conn->hcon->hdev, ""); chan = conn->smp; if (!chan) return -ENOTCONN; l2cap_chan_lock(chan); if (!chan->data) { err = -ENOTCONN; goto unlock; } smp = chan->data; if (test_bit(SMP_FLAG_SC, &smp->flags)) { err = sc_user_reply(smp, mgmt_op, passkey); goto unlock; } switch (mgmt_op) { case MGMT_OP_USER_PASSKEY_REPLY: value = le32_to_cpu(passkey); memset(smp->tk, 0, sizeof(smp->tk)); bt_dev_dbg(conn->hcon->hdev, "PassKey: %u", value); put_unaligned_le32(value, smp->tk); fallthrough; case MGMT_OP_USER_CONFIRM_REPLY: set_bit(SMP_FLAG_TK_VALID, &smp->flags); break; case MGMT_OP_USER_PASSKEY_NEG_REPLY: case MGMT_OP_USER_CONFIRM_NEG_REPLY: smp_failure(conn, SMP_PASSKEY_ENTRY_FAILED); err = 0; goto unlock; default: smp_failure(conn, SMP_PASSKEY_ENTRY_FAILED); err = -EOPNOTSUPP; goto unlock; } err = 0; /* If it is our turn to send Pairing Confirm, do so now */ if (test_bit(SMP_FLAG_CFM_PENDING, &smp->flags)) { u8 rsp = smp_confirm(smp); if (rsp) smp_failure(conn, rsp); } unlock: l2cap_chan_unlock(chan); return err; } static void build_bredr_pairing_cmd(struct smp_chan *smp, struct smp_cmd_pairing *req, struct smp_cmd_pairing *rsp) { struct l2cap_conn *conn = smp->conn; struct hci_dev *hdev = conn->hcon->hdev; u8 local_dist = 0, remote_dist = 0; if (hci_dev_test_flag(hdev, HCI_BONDABLE)) { local_dist = SMP_DIST_ENC_KEY | SMP_DIST_SIGN; remote_dist = SMP_DIST_ENC_KEY | SMP_DIST_SIGN; } if (hci_dev_test_flag(hdev, HCI_RPA_RESOLVING)) remote_dist |= SMP_DIST_ID_KEY; if (hci_dev_test_flag(hdev, HCI_PRIVACY)) local_dist |= SMP_DIST_ID_KEY; if (!rsp) { memset(req, 0, sizeof(*req)); req->auth_req = SMP_AUTH_CT2; req->init_key_dist = local_dist; req->resp_key_dist = remote_dist; req->max_key_size = conn->hcon->enc_key_size; smp->remote_key_dist = remote_dist; return; } memset(rsp, 0, sizeof(*rsp)); rsp->auth_req = SMP_AUTH_CT2; rsp->max_key_size = conn->hcon->enc_key_size; rsp->init_key_dist = req->init_key_dist & remote_dist; rsp->resp_key_dist = req->resp_key_dist & local_dist; smp->remote_key_dist = rsp->init_key_dist; } static u8 smp_cmd_pairing_req(struct l2cap_conn *conn, struct sk_buff *skb) { struct smp_cmd_pairing rsp, *req = (void *) skb->data; struct l2cap_chan *chan = conn->smp; struct hci_dev *hdev = conn->hcon->hdev; struct smp_chan *smp; u8 key_size, auth, sec_level; int ret; bt_dev_dbg(hdev, "conn %p", conn); if (skb->len < sizeof(*req)) return SMP_INVALID_PARAMS; if (conn->hcon->role != HCI_ROLE_SLAVE) return SMP_CMD_NOTSUPP; if (!chan->data) smp = smp_chan_create(conn); else smp = chan->data; if (!smp) return SMP_UNSPECIFIED; /* We didn't start the pairing, so match remote */ auth = req->auth_req & AUTH_REQ_MASK(hdev); if (!hci_dev_test_flag(hdev, HCI_BONDABLE) && (auth & SMP_AUTH_BONDING)) return SMP_PAIRING_NOTSUPP; if (hci_dev_test_flag(hdev, HCI_SC_ONLY) && !(auth & SMP_AUTH_SC)) return SMP_AUTH_REQUIREMENTS; smp->preq[0] = SMP_CMD_PAIRING_REQ; memcpy(&smp->preq[1], req, sizeof(*req)); skb_pull(skb, sizeof(*req)); /* If the remote side's OOB flag is set it means it has * successfully received our local OOB data - therefore set the * flag to indicate that local OOB is in use. */ if (req->oob_flag == SMP_OOB_PRESENT && SMP_DEV(hdev)->local_oob) set_bit(SMP_FLAG_LOCAL_OOB, &smp->flags); /* SMP over BR/EDR requires special treatment */ if (conn->hcon->type == ACL_LINK) { /* We must have a BR/EDR SC link */ if (!test_bit(HCI_CONN_AES_CCM, &conn->hcon->flags) && !hci_dev_test_flag(hdev, HCI_FORCE_BREDR_SMP)) return SMP_CROSS_TRANSP_NOT_ALLOWED; set_bit(SMP_FLAG_SC, &smp->flags); build_bredr_pairing_cmd(smp, req, &rsp); if (req->auth_req & SMP_AUTH_CT2) set_bit(SMP_FLAG_CT2, &smp->flags); key_size = min(req->max_key_size, rsp.max_key_size); if (check_enc_key_size(conn, key_size)) return SMP_ENC_KEY_SIZE; /* Clear bits which are generated but not distributed */ smp->remote_key_dist &= ~SMP_SC_NO_DIST; smp->prsp[0] = SMP_CMD_PAIRING_RSP; memcpy(&smp->prsp[1], &rsp, sizeof(rsp)); smp_send_cmd(conn, SMP_CMD_PAIRING_RSP, sizeof(rsp), &rsp); smp_distribute_keys(smp); return 0; } build_pairing_cmd(conn, req, &rsp, auth); if (rsp.auth_req & SMP_AUTH_SC) { set_bit(SMP_FLAG_SC, &smp->flags); if (rsp.auth_req & SMP_AUTH_CT2) set_bit(SMP_FLAG_CT2, &smp->flags); } if (conn->hcon->io_capability == HCI_IO_NO_INPUT_OUTPUT) sec_level = BT_SECURITY_MEDIUM; else sec_level = authreq_to_seclevel(auth); if (sec_level > conn->hcon->pending_sec_level) conn->hcon->pending_sec_level = sec_level; /* If we need MITM check that it can be achieved */ if (conn->hcon->pending_sec_level >= BT_SECURITY_HIGH) { u8 method; method = get_auth_method(smp, conn->hcon->io_capability, req->io_capability); if (method == JUST_WORKS || method == JUST_CFM) return SMP_AUTH_REQUIREMENTS; } key_size = min(req->max_key_size, rsp.max_key_size); if (check_enc_key_size(conn, key_size)) return SMP_ENC_KEY_SIZE; get_random_bytes(smp->prnd, sizeof(smp->prnd)); smp->prsp[0] = SMP_CMD_PAIRING_RSP; memcpy(&smp->prsp[1], &rsp, sizeof(rsp)); smp_send_cmd(conn, SMP_CMD_PAIRING_RSP, sizeof(rsp), &rsp); clear_bit(SMP_FLAG_INITIATOR, &smp->flags); /* Strictly speaking we shouldn't allow Pairing Confirm for the * SC case, however some implementations incorrectly copy RFU auth * req bits from our security request, which may create a false * positive SC enablement. */ SMP_ALLOW_CMD(smp, SMP_CMD_PAIRING_CONFIRM); if (test_bit(SMP_FLAG_SC, &smp->flags)) { SMP_ALLOW_CMD(smp, SMP_CMD_PUBLIC_KEY); /* Clear bits which are generated but not distributed */ smp->remote_key_dist &= ~SMP_SC_NO_DIST; /* Wait for Public Key from Initiating Device */ return 0; } /* Request setup of TK */ ret = tk_request(conn, 0, auth, rsp.io_capability, req->io_capability); if (ret) return SMP_UNSPECIFIED; return 0; } static u8 sc_send_public_key(struct smp_chan *smp) { struct hci_dev *hdev = smp->conn->hcon->hdev; bt_dev_dbg(hdev, ""); if (test_bit(SMP_FLAG_LOCAL_OOB, &smp->flags)) { struct l2cap_chan *chan = hdev->smp_data; struct smp_dev *smp_dev; if (!chan || !chan->data) return SMP_UNSPECIFIED; smp_dev = chan->data; memcpy(smp->local_pk, smp_dev->local_pk, 64); memcpy(smp->lr, smp_dev->local_rand, 16); if (smp_dev->debug_key) set_bit(SMP_FLAG_DEBUG_KEY, &smp->flags); goto done; } if (hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS)) { bt_dev_dbg(hdev, "Using debug keys"); if (set_ecdh_privkey(smp->tfm_ecdh, debug_sk)) return SMP_UNSPECIFIED; memcpy(smp->local_pk, debug_pk, 64); set_bit(SMP_FLAG_DEBUG_KEY, &smp->flags); } else { while (true) { /* Generate key pair for Secure Connections */ if (generate_ecdh_keys(smp->tfm_ecdh, smp->local_pk)) return SMP_UNSPECIFIED; /* This is unlikely, but we need to check that * we didn't accidentally generate a debug key. */ if (crypto_memneq(smp->local_pk, debug_pk, 64)) break; } } done: SMP_DBG("Local Public Key X: %32phN", smp->local_pk); SMP_DBG("Local Public Key Y: %32phN", smp->local_pk + 32); smp_send_cmd(smp->conn, SMP_CMD_PUBLIC_KEY, 64, smp->local_pk); return 0; } static u8 smp_cmd_pairing_rsp(struct l2cap_conn *conn, struct sk_buff *skb) { struct smp_cmd_pairing *req, *rsp = (void *) skb->data; struct l2cap_chan *chan = conn->smp; struct smp_chan *smp = chan->data; struct hci_dev *hdev = conn->hcon->hdev; u8 key_size, auth; int ret; bt_dev_dbg(hdev, "conn %p", conn); if (skb->len < sizeof(*rsp)) return SMP_INVALID_PARAMS; if (conn->hcon->role != HCI_ROLE_MASTER) return SMP_CMD_NOTSUPP; skb_pull(skb, sizeof(*rsp)); req = (void *) &smp->preq[1]; key_size = min(req->max_key_size, rsp->max_key_size); if (check_enc_key_size(conn, key_size)) return SMP_ENC_KEY_SIZE; auth = rsp->auth_req & AUTH_REQ_MASK(hdev); if (hci_dev_test_flag(hdev, HCI_SC_ONLY) && !(auth & SMP_AUTH_SC)) return SMP_AUTH_REQUIREMENTS; /* If the remote side's OOB flag is set it means it has * successfully received our local OOB data - therefore set the * flag to indicate that local OOB is in use. */ if (rsp->oob_flag == SMP_OOB_PRESENT && SMP_DEV(hdev)->local_oob) set_bit(SMP_FLAG_LOCAL_OOB, &smp->flags); smp->prsp[0] = SMP_CMD_PAIRING_RSP; memcpy(&smp->prsp[1], rsp, sizeof(*rsp)); /* Update remote key distribution in case the remote cleared * some bits that we had enabled in our request. */ smp->remote_key_dist &= rsp->resp_key_dist; if ((req->auth_req & SMP_AUTH_CT2) && (auth & SMP_AUTH_CT2)) set_bit(SMP_FLAG_CT2, &smp->flags); /* For BR/EDR this means we're done and can start phase 3 */ if (conn->hcon->type == ACL_LINK) { /* Clear bits which are generated but not distributed */ smp->remote_key_dist &= ~SMP_SC_NO_DIST; smp_distribute_keys(smp); return 0; } if ((req->auth_req & SMP_AUTH_SC) && (auth & SMP_AUTH_SC)) set_bit(SMP_FLAG_SC, &smp->flags); else if (conn->hcon->pending_sec_level > BT_SECURITY_HIGH) conn->hcon->pending_sec_level = BT_SECURITY_HIGH; /* If we need MITM check that it can be achieved */ if (conn->hcon->pending_sec_level >= BT_SECURITY_HIGH) { u8 method; method = get_auth_method(smp, req->io_capability, rsp->io_capability); if (method == JUST_WORKS || method == JUST_CFM) return SMP_AUTH_REQUIREMENTS; } get_random_bytes(smp->prnd, sizeof(smp->prnd)); /* Update remote key distribution in case the remote cleared * some bits that we had enabled in our request. */ smp->remote_key_dist &= rsp->resp_key_dist; if (test_bit(SMP_FLAG_SC, &smp->flags)) { /* Clear bits which are generated but not distributed */ smp->remote_key_dist &= ~SMP_SC_NO_DIST; SMP_ALLOW_CMD(smp, SMP_CMD_PUBLIC_KEY); return sc_send_public_key(smp); } auth |= req->auth_req; ret = tk_request(conn, 0, auth, req->io_capability, rsp->io_capability); if (ret) return SMP_UNSPECIFIED; set_bit(SMP_FLAG_CFM_PENDING, &smp->flags); /* Can't compose response until we have been confirmed */ if (test_bit(SMP_FLAG_TK_VALID, &smp->flags)) return smp_confirm(smp); return 0; } static u8 sc_check_confirm(struct smp_chan *smp) { struct l2cap_conn *conn = smp->conn; bt_dev_dbg(conn->hcon->hdev, ""); if (smp->method == REQ_PASSKEY || smp->method == DSP_PASSKEY) return sc_passkey_round(smp, SMP_CMD_PAIRING_CONFIRM); if (conn->hcon->out) { smp_send_cmd(conn, SMP_CMD_PAIRING_RANDOM, sizeof(smp->prnd), smp->prnd); SMP_ALLOW_CMD(smp, SMP_CMD_PAIRING_RANDOM); } return 0; } /* Work-around for some implementations that incorrectly copy RFU bits * from our security request and thereby create the impression that * we're doing SC when in fact the remote doesn't support it. */ static int fixup_sc_false_positive(struct smp_chan *smp) { struct l2cap_conn *conn = smp->conn; struct hci_conn *hcon = conn->hcon; struct hci_dev *hdev = hcon->hdev; struct smp_cmd_pairing *req, *rsp; u8 auth; /* The issue is only observed when we're in responder role */ if (hcon->out) return SMP_UNSPECIFIED; if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) { bt_dev_err(hdev, "refusing legacy fallback in SC-only mode"); return SMP_UNSPECIFIED; } bt_dev_err(hdev, "trying to fall back to legacy SMP"); req = (void *) &smp->preq[1]; rsp = (void *) &smp->prsp[1]; /* Rebuild key dist flags which may have been cleared for SC */ smp->remote_key_dist = (req->init_key_dist & rsp->resp_key_dist); auth = req->auth_req & AUTH_REQ_MASK(hdev); if (tk_request(conn, 0, auth, rsp->io_capability, req->io_capability)) { bt_dev_err(hdev, "failed to fall back to legacy SMP"); return SMP_UNSPECIFIED; } clear_bit(SMP_FLAG_SC, &smp->flags); return 0; } static u8 smp_cmd_pairing_confirm(struct l2cap_conn *conn, struct sk_buff *skb) { struct l2cap_chan *chan = conn->smp; struct smp_chan *smp = chan->data; struct hci_conn *hcon = conn->hcon; struct hci_dev *hdev = hcon->hdev; bt_dev_dbg(hdev, "conn %p %s", conn, hcon->out ? "initiator" : "responder"); if (skb->len < sizeof(smp->pcnf)) return SMP_INVALID_PARAMS; memcpy(smp->pcnf, skb->data, sizeof(smp->pcnf)); skb_pull(skb, sizeof(smp->pcnf)); if (test_bit(SMP_FLAG_SC, &smp->flags)) { int ret; /* Public Key exchange must happen before any other steps */ if (test_bit(SMP_FLAG_REMOTE_PK, &smp->flags)) return sc_check_confirm(smp); bt_dev_err(hdev, "Unexpected SMP Pairing Confirm"); ret = fixup_sc_false_positive(smp); if (ret) return ret; } if (conn->hcon->out) { smp_send_cmd(conn, SMP_CMD_PAIRING_RANDOM, sizeof(smp->prnd), smp->prnd); SMP_ALLOW_CMD(smp, SMP_CMD_PAIRING_RANDOM); return 0; } if (test_bit(SMP_FLAG_TK_VALID, &smp->flags)) return smp_confirm(smp); set_bit(SMP_FLAG_CFM_PENDING, &smp->flags); return 0; } static u8 smp_cmd_pairing_random(struct l2cap_conn *conn, struct sk_buff *skb) { struct l2cap_chan *chan = conn->smp; struct smp_chan *smp = chan->data; struct hci_conn *hcon = conn->hcon; u8 *pkax, *pkbx, *na, *nb, confirm_hint; u32 passkey; int err; bt_dev_dbg(hcon->hdev, "conn %p", conn); if (skb->len < sizeof(smp->rrnd)) return SMP_INVALID_PARAMS; memcpy(smp->rrnd, skb->data, sizeof(smp->rrnd)); skb_pull(skb, sizeof(smp->rrnd)); if (!test_bit(SMP_FLAG_SC, &smp->flags)) return smp_random(smp); if (hcon->out) { pkax = smp->local_pk; pkbx = smp->remote_pk; na = smp->prnd; nb = smp->rrnd; } else { pkax = smp->remote_pk; pkbx = smp->local_pk; na = smp->rrnd; nb = smp->prnd; } if (smp->method == REQ_OOB) { if (!hcon->out) smp_send_cmd(conn, SMP_CMD_PAIRING_RANDOM, sizeof(smp->prnd), smp->prnd); SMP_ALLOW_CMD(smp, SMP_CMD_DHKEY_CHECK); goto mackey_and_ltk; } /* Passkey entry has special treatment */ if (smp->method == REQ_PASSKEY || smp->method == DSP_PASSKEY) return sc_passkey_round(smp, SMP_CMD_PAIRING_RANDOM); if (hcon->out) { u8 cfm[16]; err = smp_f4(smp->tfm_cmac, smp->remote_pk, smp->local_pk, smp->rrnd, 0, cfm); if (err) return SMP_UNSPECIFIED; if (crypto_memneq(smp->pcnf, cfm, 16)) return SMP_CONFIRM_FAILED; } else { smp_send_cmd(conn, SMP_CMD_PAIRING_RANDOM, sizeof(smp->prnd), smp->prnd); SMP_ALLOW_CMD(smp, SMP_CMD_DHKEY_CHECK); /* Only Just-Works pairing requires extra checks */ if (smp->method != JUST_WORKS) goto mackey_and_ltk; /* If there already exists long term key in local host, leave * the decision to user space since the remote device could * be legitimate or malicious. */ if (hci_find_ltk(hcon->hdev, &hcon->dst, hcon->dst_type, hcon->role)) { /* Set passkey to 0. The value can be any number since * it'll be ignored anyway. */ passkey = 0; confirm_hint = 1; goto confirm; } } mackey_and_ltk: /* Generate MacKey and LTK */ err = sc_mackey_and_ltk(smp, smp->mackey, smp->tk); if (err) return SMP_UNSPECIFIED; if (smp->method == REQ_OOB) { if (hcon->out) { sc_dhkey_check(smp); SMP_ALLOW_CMD(smp, SMP_CMD_DHKEY_CHECK); } return 0; } err = smp_g2(smp->tfm_cmac, pkax, pkbx, na, nb, &passkey); if (err) return SMP_UNSPECIFIED; confirm_hint = 0; confirm: if (smp->method == JUST_WORKS) confirm_hint = 1; err = mgmt_user_confirm_request(hcon->hdev, &hcon->dst, hcon->type, hcon->dst_type, passkey, confirm_hint); if (err) return SMP_UNSPECIFIED; set_bit(SMP_FLAG_WAIT_USER, &smp->flags); return 0; } static bool smp_ltk_encrypt(struct l2cap_conn *conn, u8 sec_level) { struct smp_ltk *key; struct hci_conn *hcon = conn->hcon; key = hci_find_ltk(hcon->hdev, &hcon->dst, hcon->dst_type, hcon->role); if (!key) return false; if (smp_ltk_sec_level(key) < sec_level) return false; if (test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &hcon->flags)) return true; hci_le_start_enc(hcon, key->ediv, key->rand, key->val, key->enc_size); hcon->enc_key_size = key->enc_size; /* We never store STKs for initiator role, so clear this flag */ clear_bit(HCI_CONN_STK_ENCRYPT, &hcon->flags); return true; } bool smp_sufficient_security(struct hci_conn *hcon, u8 sec_level, enum smp_key_pref key_pref) { if (sec_level == BT_SECURITY_LOW) return true; /* If we're encrypted with an STK but the caller prefers using * LTK claim insufficient security. This way we allow the * connection to be re-encrypted with an LTK, even if the LTK * provides the same level of security. Only exception is if we * don't have an LTK (e.g. because of key distribution bits). */ if (key_pref == SMP_USE_LTK && test_bit(HCI_CONN_STK_ENCRYPT, &hcon->flags) && hci_find_ltk(hcon->hdev, &hcon->dst, hcon->dst_type, hcon->role)) return false; if (hcon->sec_level >= sec_level) return true; return false; } static u8 smp_cmd_security_req(struct l2cap_conn *conn, struct sk_buff *skb) { struct smp_cmd_security_req *rp = (void *) skb->data; struct smp_cmd_pairing cp; struct hci_conn *hcon = conn->hcon; struct hci_dev *hdev = hcon->hdev; struct smp_chan *smp; u8 sec_level, auth; bt_dev_dbg(hdev, "conn %p", conn); if (skb->len < sizeof(*rp)) return SMP_INVALID_PARAMS; if (hcon->role != HCI_ROLE_MASTER) return SMP_CMD_NOTSUPP; auth = rp->auth_req & AUTH_REQ_MASK(hdev); if (hci_dev_test_flag(hdev, HCI_SC_ONLY) && !(auth & SMP_AUTH_SC)) return SMP_AUTH_REQUIREMENTS; if (hcon->io_capability == HCI_IO_NO_INPUT_OUTPUT) sec_level = BT_SECURITY_MEDIUM; else sec_level = authreq_to_seclevel(auth); if (smp_sufficient_security(hcon, sec_level, SMP_USE_LTK)) { /* If link is already encrypted with sufficient security we * still need refresh encryption as per Core Spec 5.0 Vol 3, * Part H 2.4.6 */ smp_ltk_encrypt(conn, hcon->sec_level); return 0; } if (sec_level > hcon->pending_sec_level) hcon->pending_sec_level = sec_level; if (smp_ltk_encrypt(conn, hcon->pending_sec_level)) return 0; smp = smp_chan_create(conn); if (!smp) return SMP_UNSPECIFIED; if (!hci_dev_test_flag(hdev, HCI_BONDABLE) && (auth & SMP_AUTH_BONDING)) return SMP_PAIRING_NOTSUPP; skb_pull(skb, sizeof(*rp)); memset(&cp, 0, sizeof(cp)); build_pairing_cmd(conn, &cp, NULL, auth); smp->preq[0] = SMP_CMD_PAIRING_REQ; memcpy(&smp->preq[1], &cp, sizeof(cp)); smp_send_cmd(conn, SMP_CMD_PAIRING_REQ, sizeof(cp), &cp); SMP_ALLOW_CMD(smp, SMP_CMD_PAIRING_RSP); return 0; } int smp_conn_security(struct hci_conn *hcon, __u8 sec_level) { struct l2cap_conn *conn = hcon->l2cap_data; struct l2cap_chan *chan; struct smp_chan *smp; __u8 authreq; int ret; bt_dev_dbg(hcon->hdev, "conn %p hcon %p level 0x%2.2x", conn, hcon, sec_level); /* This may be NULL if there's an unexpected disconnection */ if (!conn) return 1; if (!hci_dev_test_flag(hcon->hdev, HCI_LE_ENABLED)) return 1; if (smp_sufficient_security(hcon, sec_level, SMP_USE_LTK)) return 1; if (sec_level > hcon->pending_sec_level) hcon->pending_sec_level = sec_level; if (hcon->role == HCI_ROLE_MASTER) if (smp_ltk_encrypt(conn, hcon->pending_sec_level)) return 0; chan = conn->smp; if (!chan) { bt_dev_err(hcon->hdev, "security requested but not available"); return 1; } l2cap_chan_lock(chan); /* If SMP is already in progress ignore this request */ if (chan->data) { ret = 0; goto unlock; } smp = smp_chan_create(conn); if (!smp) { ret = 1; goto unlock; } authreq = seclevel_to_authreq(sec_level); if (hci_dev_test_flag(hcon->hdev, HCI_SC_ENABLED)) { authreq |= SMP_AUTH_SC; if (hci_dev_test_flag(hcon->hdev, HCI_SSP_ENABLED)) authreq |= SMP_AUTH_CT2; } /* Don't attempt to set MITM if setting is overridden by debugfs * Needed to pass certification test SM/MAS/PKE/BV-01-C */ if (!hci_dev_test_flag(hcon->hdev, HCI_FORCE_NO_MITM)) { /* Require MITM if IO Capability allows or the security level * requires it. */ if (hcon->io_capability != HCI_IO_NO_INPUT_OUTPUT || hcon->pending_sec_level > BT_SECURITY_MEDIUM) authreq |= SMP_AUTH_MITM; } if (hcon->role == HCI_ROLE_MASTER) { struct smp_cmd_pairing cp; build_pairing_cmd(conn, &cp, NULL, authreq); smp->preq[0] = SMP_CMD_PAIRING_REQ; memcpy(&smp->preq[1], &cp, sizeof(cp)); smp_send_cmd(conn, SMP_CMD_PAIRING_REQ, sizeof(cp), &cp); SMP_ALLOW_CMD(smp, SMP_CMD_PAIRING_RSP); } else { struct smp_cmd_security_req cp; cp.auth_req = authreq; smp_send_cmd(conn, SMP_CMD_SECURITY_REQ, sizeof(cp), &cp); SMP_ALLOW_CMD(smp, SMP_CMD_PAIRING_REQ); } set_bit(SMP_FLAG_INITIATOR, &smp->flags); ret = 0; unlock: l2cap_chan_unlock(chan); return ret; } int smp_cancel_and_remove_pairing(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type) { struct hci_conn *hcon; struct l2cap_conn *conn; struct l2cap_chan *chan; struct smp_chan *smp; int err; err = hci_remove_ltk(hdev, bdaddr, addr_type); hci_remove_irk(hdev, bdaddr, addr_type); hcon = hci_conn_hash_lookup_le(hdev, bdaddr, addr_type); if (!hcon) goto done; conn = hcon->l2cap_data; if (!conn) goto done; chan = conn->smp; if (!chan) goto done; l2cap_chan_lock(chan); smp = chan->data; if (smp) { /* Set keys to NULL to make sure smp_failure() does not try to * remove and free already invalidated rcu list entries. */ smp->ltk = NULL; smp->responder_ltk = NULL; smp->remote_irk = NULL; if (test_bit(SMP_FLAG_COMPLETE, &smp->flags)) smp_failure(conn, 0); else smp_failure(conn, SMP_UNSPECIFIED); err = 0; } l2cap_chan_unlock(chan); done: return err; } static int smp_cmd_encrypt_info(struct l2cap_conn *conn, struct sk_buff *skb) { struct smp_cmd_encrypt_info *rp = (void *) skb->data; struct l2cap_chan *chan = conn->smp; struct smp_chan *smp = chan->data; bt_dev_dbg(conn->hcon->hdev, "conn %p", conn); if (skb->len < sizeof(*rp)) return SMP_INVALID_PARAMS; /* Pairing is aborted if any blocked keys are distributed */ if (hci_is_blocked_key(conn->hcon->hdev, HCI_BLOCKED_KEY_TYPE_LTK, rp->ltk)) { bt_dev_warn_ratelimited(conn->hcon->hdev, "LTK blocked for %pMR", &conn->hcon->dst); return SMP_INVALID_PARAMS; } SMP_ALLOW_CMD(smp, SMP_CMD_INITIATOR_IDENT); skb_pull(skb, sizeof(*rp)); memcpy(smp->tk, rp->ltk, sizeof(smp->tk)); return 0; } static int smp_cmd_initiator_ident(struct l2cap_conn *conn, struct sk_buff *skb) { struct smp_cmd_initiator_ident *rp = (void *)skb->data; struct l2cap_chan *chan = conn->smp; struct smp_chan *smp = chan->data; struct hci_dev *hdev = conn->hcon->hdev; struct hci_conn *hcon = conn->hcon; struct smp_ltk *ltk; u8 authenticated; bt_dev_dbg(hdev, "conn %p", conn); if (skb->len < sizeof(*rp)) return SMP_INVALID_PARAMS; /* Mark the information as received */ smp->remote_key_dist &= ~SMP_DIST_ENC_KEY; if (smp->remote_key_dist & SMP_DIST_ID_KEY) SMP_ALLOW_CMD(smp, SMP_CMD_IDENT_INFO); else if (smp->remote_key_dist & SMP_DIST_SIGN) SMP_ALLOW_CMD(smp, SMP_CMD_SIGN_INFO); skb_pull(skb, sizeof(*rp)); authenticated = (hcon->sec_level == BT_SECURITY_HIGH); ltk = hci_add_ltk(hdev, &hcon->dst, hcon->dst_type, SMP_LTK, authenticated, smp->tk, smp->enc_key_size, rp->ediv, rp->rand); smp->ltk = ltk; if (!(smp->remote_key_dist & KEY_DIST_MASK)) smp_distribute_keys(smp); return 0; } static int smp_cmd_ident_info(struct l2cap_conn *conn, struct sk_buff *skb) { struct smp_cmd_ident_info *info = (void *) skb->data; struct l2cap_chan *chan = conn->smp; struct smp_chan *smp = chan->data; bt_dev_dbg(conn->hcon->hdev, ""); if (skb->len < sizeof(*info)) return SMP_INVALID_PARAMS; /* Pairing is aborted if any blocked keys are distributed */ if (hci_is_blocked_key(conn->hcon->hdev, HCI_BLOCKED_KEY_TYPE_IRK, info->irk)) { bt_dev_warn_ratelimited(conn->hcon->hdev, "Identity key blocked for %pMR", &conn->hcon->dst); return SMP_INVALID_PARAMS; } SMP_ALLOW_CMD(smp, SMP_CMD_IDENT_ADDR_INFO); skb_pull(skb, sizeof(*info)); memcpy(smp->irk, info->irk, 16); return 0; } static int smp_cmd_ident_addr_info(struct l2cap_conn *conn, struct sk_buff *skb) { struct smp_cmd_ident_addr_info *info = (void *) skb->data; struct l2cap_chan *chan = conn->smp; struct smp_chan *smp = chan->data; struct hci_conn *hcon = conn->hcon; bdaddr_t rpa; bt_dev_dbg(hcon->hdev, ""); if (skb->len < sizeof(*info)) return SMP_INVALID_PARAMS; /* Mark the information as received */ smp->remote_key_dist &= ~SMP_DIST_ID_KEY; if (smp->remote_key_dist & SMP_DIST_SIGN) SMP_ALLOW_CMD(smp, SMP_CMD_SIGN_INFO); skb_pull(skb, sizeof(*info)); /* Strictly speaking the Core Specification (4.1) allows sending * an empty address which would force us to rely on just the IRK * as "identity information". However, since such * implementations are not known of and in order to not over * complicate our implementation, simply pretend that we never * received an IRK for such a device. * * The Identity Address must also be a Static Random or Public * Address, which hci_is_identity_address() checks for. */ if (!bacmp(&info->bdaddr, BDADDR_ANY) || !hci_is_identity_address(&info->bdaddr, info->addr_type)) { bt_dev_err(hcon->hdev, "ignoring IRK with no identity address"); goto distribute; } /* Drop IRK if peer is using identity address during pairing but is * providing different address as identity information. * * Microsoft Surface Precision Mouse is known to have this bug. */ if (hci_is_identity_address(&hcon->dst, hcon->dst_type) && (bacmp(&info->bdaddr, &hcon->dst) || info->addr_type != hcon->dst_type)) { bt_dev_err(hcon->hdev, "ignoring IRK with invalid identity address"); goto distribute; } bacpy(&smp->id_addr, &info->bdaddr); smp->id_addr_type = info->addr_type; if (hci_bdaddr_is_rpa(&hcon->dst, hcon->dst_type)) bacpy(&rpa, &hcon->dst); else bacpy(&rpa, BDADDR_ANY); smp->remote_irk = hci_add_irk(conn->hcon->hdev, &smp->id_addr, smp->id_addr_type, smp->irk, &rpa); distribute: if (!(smp->remote_key_dist & KEY_DIST_MASK)) smp_distribute_keys(smp); return 0; } static int smp_cmd_sign_info(struct l2cap_conn *conn, struct sk_buff *skb) { struct smp_cmd_sign_info *rp = (void *) skb->data; struct l2cap_chan *chan = conn->smp; struct smp_chan *smp = chan->data; struct smp_csrk *csrk; bt_dev_dbg(conn->hcon->hdev, "conn %p", conn); if (skb->len < sizeof(*rp)) return SMP_INVALID_PARAMS; /* Mark the information as received */ smp->remote_key_dist &= ~SMP_DIST_SIGN; skb_pull(skb, sizeof(*rp)); csrk = kzalloc(sizeof(*csrk), GFP_KERNEL); if (csrk) { if (conn->hcon->sec_level > BT_SECURITY_MEDIUM) csrk->type = MGMT_CSRK_REMOTE_AUTHENTICATED; else csrk->type = MGMT_CSRK_REMOTE_UNAUTHENTICATED; memcpy(csrk->val, rp->csrk, sizeof(csrk->val)); } smp->csrk = csrk; smp_distribute_keys(smp); return 0; } static u8 sc_select_method(struct smp_chan *smp) { struct l2cap_conn *conn = smp->conn; struct hci_conn *hcon = conn->hcon; struct smp_cmd_pairing *local, *remote; u8 local_mitm, remote_mitm, local_io, remote_io, method; if (test_bit(SMP_FLAG_REMOTE_OOB, &smp->flags) || test_bit(SMP_FLAG_LOCAL_OOB, &smp->flags)) return REQ_OOB; /* The preq/prsp contain the raw Pairing Request/Response PDUs * which are needed as inputs to some crypto functions. To get * the "struct smp_cmd_pairing" from them we need to skip the * first byte which contains the opcode. */ if (hcon->out) { local = (void *) &smp->preq[1]; remote = (void *) &smp->prsp[1]; } else { local = (void *) &smp->prsp[1]; remote = (void *) &smp->preq[1]; } local_io = local->io_capability; remote_io = remote->io_capability; local_mitm = (local->auth_req & SMP_AUTH_MITM); remote_mitm = (remote->auth_req & SMP_AUTH_MITM); /* If either side wants MITM, look up the method from the table, * otherwise use JUST WORKS. */ if (local_mitm || remote_mitm) method = get_auth_method(smp, local_io, remote_io); else method = JUST_WORKS; /* Don't confirm locally initiated pairing attempts */ if (method == JUST_CFM && test_bit(SMP_FLAG_INITIATOR, &smp->flags)) method = JUST_WORKS; return method; } static int smp_cmd_public_key(struct l2cap_conn *conn, struct sk_buff *skb) { struct smp_cmd_public_key *key = (void *) skb->data; struct hci_conn *hcon = conn->hcon; struct l2cap_chan *chan = conn->smp; struct smp_chan *smp = chan->data; struct hci_dev *hdev = hcon->hdev; struct crypto_kpp *tfm_ecdh; struct smp_cmd_pairing_confirm cfm; int err; bt_dev_dbg(hdev, "conn %p", conn); if (skb->len < sizeof(*key)) return SMP_INVALID_PARAMS; /* Check if remote and local public keys are the same and debug key is * not in use. */ if (!test_bit(SMP_FLAG_DEBUG_KEY, &smp->flags) && !crypto_memneq(key, smp->local_pk, 64)) { bt_dev_err(hdev, "Remote and local public keys are identical"); return SMP_UNSPECIFIED; } memcpy(smp->remote_pk, key, 64); if (test_bit(SMP_FLAG_REMOTE_OOB, &smp->flags)) { err = smp_f4(smp->tfm_cmac, smp->remote_pk, smp->remote_pk, smp->rr, 0, cfm.confirm_val); if (err) return SMP_UNSPECIFIED; if (crypto_memneq(cfm.confirm_val, smp->pcnf, 16)) return SMP_CONFIRM_FAILED; } /* Non-initiating device sends its public key after receiving * the key from the initiating device. */ if (!hcon->out) { err = sc_send_public_key(smp); if (err) return err; } SMP_DBG("Remote Public Key X: %32phN", smp->remote_pk); SMP_DBG("Remote Public Key Y: %32phN", smp->remote_pk + 32); /* Compute the shared secret on the same crypto tfm on which the private * key was set/generated. */ if (test_bit(SMP_FLAG_LOCAL_OOB, &smp->flags)) { struct l2cap_chan *hchan = hdev->smp_data; struct smp_dev *smp_dev; if (!hchan || !hchan->data) return SMP_UNSPECIFIED; smp_dev = hchan->data; tfm_ecdh = smp_dev->tfm_ecdh; } else { tfm_ecdh = smp->tfm_ecdh; } if (compute_ecdh_secret(tfm_ecdh, smp->remote_pk, smp->dhkey)) return SMP_UNSPECIFIED; SMP_DBG("DHKey %32phN", smp->dhkey); set_bit(SMP_FLAG_REMOTE_PK, &smp->flags); smp->method = sc_select_method(smp); bt_dev_dbg(hdev, "selected method 0x%02x", smp->method); /* JUST_WORKS and JUST_CFM result in an unauthenticated key */ if (smp->method == JUST_WORKS || smp->method == JUST_CFM) hcon->pending_sec_level = BT_SECURITY_MEDIUM; else hcon->pending_sec_level = BT_SECURITY_FIPS; if (!crypto_memneq(debug_pk, smp->remote_pk, 64)) set_bit(SMP_FLAG_DEBUG_KEY, &smp->flags); if (smp->method == DSP_PASSKEY) { get_random_bytes(&hcon->passkey_notify, sizeof(hcon->passkey_notify)); hcon->passkey_notify %= 1000000; hcon->passkey_entered = 0; smp->passkey_round = 0; if (mgmt_user_passkey_notify(hdev, &hcon->dst, hcon->type, hcon->dst_type, hcon->passkey_notify, hcon->passkey_entered)) return SMP_UNSPECIFIED; SMP_ALLOW_CMD(smp, SMP_CMD_PAIRING_CONFIRM); return sc_passkey_round(smp, SMP_CMD_PUBLIC_KEY); } if (smp->method == REQ_OOB) { if (hcon->out) smp_send_cmd(conn, SMP_CMD_PAIRING_RANDOM, sizeof(smp->prnd), smp->prnd); SMP_ALLOW_CMD(smp, SMP_CMD_PAIRING_RANDOM); return 0; } if (hcon->out) SMP_ALLOW_CMD(smp, SMP_CMD_PAIRING_CONFIRM); if (smp->method == REQ_PASSKEY) { if (mgmt_user_passkey_request(hdev, &hcon->dst, hcon->type, hcon->dst_type)) return SMP_UNSPECIFIED; SMP_ALLOW_CMD(smp, SMP_CMD_PAIRING_CONFIRM); set_bit(SMP_FLAG_WAIT_USER, &smp->flags); return 0; } /* The Initiating device waits for the non-initiating device to * send the confirm value. */ if (conn->hcon->out) return 0; err = smp_f4(smp->tfm_cmac, smp->local_pk, smp->remote_pk, smp->prnd, 0, cfm.confirm_val); if (err) return SMP_UNSPECIFIED; smp_send_cmd(conn, SMP_CMD_PAIRING_CONFIRM, sizeof(cfm), &cfm); SMP_ALLOW_CMD(smp, SMP_CMD_PAIRING_RANDOM); return 0; } static int smp_cmd_dhkey_check(struct l2cap_conn *conn, struct sk_buff *skb) { struct smp_cmd_dhkey_check *check = (void *) skb->data; struct l2cap_chan *chan = conn->smp; struct hci_conn *hcon = conn->hcon; struct smp_chan *smp = chan->data; u8 a[7], b[7], *local_addr, *remote_addr; u8 io_cap[3], r[16], e[16]; int err; bt_dev_dbg(hcon->hdev, "conn %p", conn); if (skb->len < sizeof(*check)) return SMP_INVALID_PARAMS; memcpy(a, &hcon->init_addr, 6); memcpy(b, &hcon->resp_addr, 6); a[6] = hcon->init_addr_type; b[6] = hcon->resp_addr_type; if (hcon->out) { local_addr = a; remote_addr = b; memcpy(io_cap, &smp->prsp[1], 3); } else { local_addr = b; remote_addr = a; memcpy(io_cap, &smp->preq[1], 3); } memset(r, 0, sizeof(r)); if (smp->method == REQ_PASSKEY || smp->method == DSP_PASSKEY) put_unaligned_le32(hcon->passkey_notify, r); else if (smp->method == REQ_OOB) memcpy(r, smp->lr, 16); err = smp_f6(smp->tfm_cmac, smp->mackey, smp->rrnd, smp->prnd, r, io_cap, remote_addr, local_addr, e); if (err) return SMP_UNSPECIFIED; if (crypto_memneq(check->e, e, 16)) return SMP_DHKEY_CHECK_FAILED; if (!hcon->out) { if (test_bit(SMP_FLAG_WAIT_USER, &smp->flags)) { set_bit(SMP_FLAG_DHKEY_PENDING, &smp->flags); return 0; } /* Responder sends DHKey check as response to initiator */ sc_dhkey_check(smp); } sc_add_ltk(smp); if (hcon->out) { hci_le_start_enc(hcon, 0, 0, smp->tk, smp->enc_key_size); hcon->enc_key_size = smp->enc_key_size; } return 0; } static int smp_cmd_keypress_notify(struct l2cap_conn *conn, struct sk_buff *skb) { struct smp_cmd_keypress_notify *kp = (void *) skb->data; bt_dev_dbg(conn->hcon->hdev, "value 0x%02x", kp->value); return 0; } static int smp_sig_channel(struct l2cap_chan *chan, struct sk_buff *skb) { struct l2cap_conn *conn = chan->conn; struct hci_conn *hcon = conn->hcon; struct smp_chan *smp; __u8 code, reason; int err = 0; if (skb->len < 1) return -EILSEQ; if (!hci_dev_test_flag(hcon->hdev, HCI_LE_ENABLED)) { reason = SMP_PAIRING_NOTSUPP; goto done; } code = skb->data[0]; skb_pull(skb, sizeof(code)); smp = chan->data; if (code > SMP_CMD_MAX) goto drop; if (smp && !test_and_clear_bit(code, &smp->allow_cmd)) goto drop; /* If we don't have a context the only allowed commands are * pairing request and security request. */ if (!smp && code != SMP_CMD_PAIRING_REQ && code != SMP_CMD_SECURITY_REQ) goto drop; switch (code) { case SMP_CMD_PAIRING_REQ: reason = smp_cmd_pairing_req(conn, skb); break; case SMP_CMD_PAIRING_FAIL: smp_failure(conn, 0); err = -EPERM; break; case SMP_CMD_PAIRING_RSP: reason = smp_cmd_pairing_rsp(conn, skb); break; case SMP_CMD_SECURITY_REQ: reason = smp_cmd_security_req(conn, skb); break; case SMP_CMD_PAIRING_CONFIRM: reason = smp_cmd_pairing_confirm(conn, skb); break; case SMP_CMD_PAIRING_RANDOM: reason = smp_cmd_pairing_random(conn, skb); break; case SMP_CMD_ENCRYPT_INFO: reason = smp_cmd_encrypt_info(conn, skb); break; case SMP_CMD_INITIATOR_IDENT: reason = smp_cmd_initiator_ident(conn, skb); break; case SMP_CMD_IDENT_INFO: reason = smp_cmd_ident_info(conn, skb); break; case SMP_CMD_IDENT_ADDR_INFO: reason = smp_cmd_ident_addr_info(conn, skb); break; case SMP_CMD_SIGN_INFO: reason = smp_cmd_sign_info(conn, skb); break; case SMP_CMD_PUBLIC_KEY: reason = smp_cmd_public_key(conn, skb); break; case SMP_CMD_DHKEY_CHECK: reason = smp_cmd_dhkey_check(conn, skb); break; case SMP_CMD_KEYPRESS_NOTIFY: reason = smp_cmd_keypress_notify(conn, skb); break; default: bt_dev_dbg(hcon->hdev, "Unknown command code 0x%2.2x", code); reason = SMP_CMD_NOTSUPP; goto done; } done: if (!err) { if (reason) smp_failure(conn, reason); kfree_skb(skb); } return err; drop: bt_dev_err(hcon->hdev, "unexpected SMP command 0x%02x from %pMR", code, &hcon->dst); kfree_skb(skb); return 0; } static void smp_teardown_cb(struct l2cap_chan *chan, int err) { struct l2cap_conn *conn = chan->conn; bt_dev_dbg(conn->hcon->hdev, "chan %p", chan); if (chan->data) smp_chan_destroy(conn); conn->smp = NULL; l2cap_chan_put(chan); } static void bredr_pairing(struct l2cap_chan *chan) { struct l2cap_conn *conn = chan->conn; struct hci_conn *hcon = conn->hcon; struct hci_dev *hdev = hcon->hdev; struct smp_cmd_pairing req; struct smp_chan *smp; bt_dev_dbg(hdev, "chan %p", chan); /* Only new pairings are interesting */ if (!test_bit(HCI_CONN_NEW_LINK_KEY, &hcon->flags)) return; /* Don't bother if we're not encrypted */ if (!test_bit(HCI_CONN_ENCRYPT, &hcon->flags)) return; /* Only initiator may initiate SMP over BR/EDR */ if (hcon->role != HCI_ROLE_MASTER) return; /* Secure Connections support must be enabled */ if (!hci_dev_test_flag(hdev, HCI_SC_ENABLED)) return; /* BR/EDR must use Secure Connections for SMP */ if (!test_bit(HCI_CONN_AES_CCM, &hcon->flags) && !hci_dev_test_flag(hdev, HCI_FORCE_BREDR_SMP)) return; /* If our LE support is not enabled don't do anything */ if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) return; /* Don't bother if remote LE support is not enabled */ if (!lmp_host_le_capable(hcon)) return; /* Remote must support SMP fixed chan for BR/EDR */ if (!(conn->remote_fixed_chan & L2CAP_FC_SMP_BREDR)) return; /* Don't bother if SMP is already ongoing */ if (chan->data) return; smp = smp_chan_create(conn); if (!smp) { bt_dev_err(hdev, "unable to create SMP context for BR/EDR"); return; } set_bit(SMP_FLAG_SC, &smp->flags); bt_dev_dbg(hdev, "starting SMP over BR/EDR"); /* Prepare and send the BR/EDR SMP Pairing Request */ build_bredr_pairing_cmd(smp, &req, NULL); smp->preq[0] = SMP_CMD_PAIRING_REQ; memcpy(&smp->preq[1], &req, sizeof(req)); smp_send_cmd(conn, SMP_CMD_PAIRING_REQ, sizeof(req), &req); SMP_ALLOW_CMD(smp, SMP_CMD_PAIRING_RSP); } static void smp_resume_cb(struct l2cap_chan *chan) { struct smp_chan *smp = chan->data; struct l2cap_conn *conn = chan->conn; struct hci_conn *hcon = conn->hcon; bt_dev_dbg(hcon->hdev, "chan %p", chan); if (hcon->type == ACL_LINK) { bredr_pairing(chan); return; } if (!smp) return; if (!test_bit(HCI_CONN_ENCRYPT, &hcon->flags)) return; cancel_delayed_work(&smp->security_timer); smp_distribute_keys(smp); } static void smp_ready_cb(struct l2cap_chan *chan) { struct l2cap_conn *conn = chan->conn; struct hci_conn *hcon = conn->hcon; bt_dev_dbg(hcon->hdev, "chan %p", chan); /* No need to call l2cap_chan_hold() here since we already own * the reference taken in smp_new_conn_cb(). This is just the * first time that we tie it to a specific pointer. The code in * l2cap_core.c ensures that there's no risk this function wont * get called if smp_new_conn_cb was previously called. */ conn->smp = chan; if (hcon->type == ACL_LINK && test_bit(HCI_CONN_ENCRYPT, &hcon->flags)) bredr_pairing(chan); } static int smp_recv_cb(struct l2cap_chan *chan, struct sk_buff *skb) { int err; bt_dev_dbg(chan->conn->hcon->hdev, "chan %p", chan); err = smp_sig_channel(chan, skb); if (err) { struct smp_chan *smp = chan->data; if (smp) cancel_delayed_work_sync(&smp->security_timer); hci_disconnect(chan->conn->hcon, HCI_ERROR_AUTH_FAILURE); } return err; } static struct sk_buff *smp_alloc_skb_cb(struct l2cap_chan *chan, unsigned long hdr_len, unsigned long len, int nb) { struct sk_buff *skb; skb = bt_skb_alloc(hdr_len + len, GFP_KERNEL); if (!skb) return ERR_PTR(-ENOMEM); skb->priority = HCI_PRIO_MAX; bt_cb(skb)->l2cap.chan = chan; return skb; } static const struct l2cap_ops smp_chan_ops = { .name = "Security Manager", .ready = smp_ready_cb, .recv = smp_recv_cb, .alloc_skb = smp_alloc_skb_cb, .teardown = smp_teardown_cb, .resume = smp_resume_cb, .new_connection = l2cap_chan_no_new_connection, .state_change = l2cap_chan_no_state_change, .close = l2cap_chan_no_close, .defer = l2cap_chan_no_defer, .suspend = l2cap_chan_no_suspend, .set_shutdown = l2cap_chan_no_set_shutdown, .get_sndtimeo = l2cap_chan_no_get_sndtimeo, }; static inline struct l2cap_chan *smp_new_conn_cb(struct l2cap_chan *pchan) { struct l2cap_chan *chan; BT_DBG("pchan %p", pchan); chan = l2cap_chan_create(); if (!chan) return NULL; chan->chan_type = pchan->chan_type; chan->ops = &smp_chan_ops; chan->scid = pchan->scid; chan->dcid = chan->scid; chan->imtu = pchan->imtu; chan->omtu = pchan->omtu; chan->mode = pchan->mode; /* Other L2CAP channels may request SMP routines in order to * change the security level. This means that the SMP channel * lock must be considered in its own category to avoid lockdep * warnings. */ atomic_set(&chan->nesting, L2CAP_NESTING_SMP); BT_DBG("created chan %p", chan); return chan; } static const struct l2cap_ops smp_root_chan_ops = { .name = "Security Manager Root", .new_connection = smp_new_conn_cb, /* None of these are implemented for the root channel */ .close = l2cap_chan_no_close, .alloc_skb = l2cap_chan_no_alloc_skb, .recv = l2cap_chan_no_recv, .state_change = l2cap_chan_no_state_change, .teardown = l2cap_chan_no_teardown, .ready = l2cap_chan_no_ready, .defer = l2cap_chan_no_defer, .suspend = l2cap_chan_no_suspend, .resume = l2cap_chan_no_resume, .set_shutdown = l2cap_chan_no_set_shutdown, .get_sndtimeo = l2cap_chan_no_get_sndtimeo, }; static struct l2cap_chan *smp_add_cid(struct hci_dev *hdev, u16 cid) { struct l2cap_chan *chan; struct smp_dev *smp; struct crypto_shash *tfm_cmac; struct crypto_kpp *tfm_ecdh; if (cid == L2CAP_CID_SMP_BREDR) { smp = NULL; goto create_chan; } smp = kzalloc(sizeof(*smp), GFP_KERNEL); if (!smp) return ERR_PTR(-ENOMEM); tfm_cmac = crypto_alloc_shash("cmac(aes)", 0, 0); if (IS_ERR(tfm_cmac)) { bt_dev_err(hdev, "Unable to create CMAC crypto context"); kfree_sensitive(smp); return ERR_CAST(tfm_cmac); } tfm_ecdh = crypto_alloc_kpp("ecdh-nist-p256", 0, 0); if (IS_ERR(tfm_ecdh)) { bt_dev_err(hdev, "Unable to create ECDH crypto context"); crypto_free_shash(tfm_cmac); kfree_sensitive(smp); return ERR_CAST(tfm_ecdh); } smp->local_oob = false; smp->tfm_cmac = tfm_cmac; smp->tfm_ecdh = tfm_ecdh; create_chan: chan = l2cap_chan_create(); if (!chan) { if (smp) { crypto_free_shash(smp->tfm_cmac); crypto_free_kpp(smp->tfm_ecdh); kfree_sensitive(smp); } return ERR_PTR(-ENOMEM); } chan->data = smp; l2cap_add_scid(chan, cid); l2cap_chan_set_defaults(chan); if (cid == L2CAP_CID_SMP) { u8 bdaddr_type; hci_copy_identity_address(hdev, &chan->src, &bdaddr_type); if (bdaddr_type == ADDR_LE_DEV_PUBLIC) chan->src_type = BDADDR_LE_PUBLIC; else chan->src_type = BDADDR_LE_RANDOM; } else { bacpy(&chan->src, &hdev->bdaddr); chan->src_type = BDADDR_BREDR; } chan->state = BT_LISTEN; chan->mode = L2CAP_MODE_BASIC; chan->imtu = L2CAP_DEFAULT_MTU; chan->ops = &smp_root_chan_ops; /* Set correct nesting level for a parent/listening channel */ atomic_set(&chan->nesting, L2CAP_NESTING_PARENT); return chan; } static void smp_del_chan(struct l2cap_chan *chan) { struct smp_dev *smp; BT_DBG("chan %p", chan); smp = chan->data; if (smp) { chan->data = NULL; crypto_free_shash(smp->tfm_cmac); crypto_free_kpp(smp->tfm_ecdh); kfree_sensitive(smp); } l2cap_chan_put(chan); } int smp_force_bredr(struct hci_dev *hdev, bool enable) { if (enable == hci_dev_test_flag(hdev, HCI_FORCE_BREDR_SMP)) return -EALREADY; if (enable) { struct l2cap_chan *chan; chan = smp_add_cid(hdev, L2CAP_CID_SMP_BREDR); if (IS_ERR(chan)) return PTR_ERR(chan); hdev->smp_bredr_data = chan; } else { struct l2cap_chan *chan; chan = hdev->smp_bredr_data; hdev->smp_bredr_data = NULL; smp_del_chan(chan); } hci_dev_change_flag(hdev, HCI_FORCE_BREDR_SMP); return 0; } int smp_register(struct hci_dev *hdev) { struct l2cap_chan *chan; bt_dev_dbg(hdev, ""); /* If the controller does not support Low Energy operation, then * there is also no need to register any SMP channel. */ if (!lmp_le_capable(hdev)) return 0; if (WARN_ON(hdev->smp_data)) { chan = hdev->smp_data; hdev->smp_data = NULL; smp_del_chan(chan); } chan = smp_add_cid(hdev, L2CAP_CID_SMP); if (IS_ERR(chan)) return PTR_ERR(chan); hdev->smp_data = chan; if (!lmp_sc_capable(hdev)) { /* Flag can be already set here (due to power toggle) */ if (!hci_dev_test_flag(hdev, HCI_FORCE_BREDR_SMP)) return 0; } if (WARN_ON(hdev->smp_bredr_data)) { chan = hdev->smp_bredr_data; hdev->smp_bredr_data = NULL; smp_del_chan(chan); } chan = smp_add_cid(hdev, L2CAP_CID_SMP_BREDR); if (IS_ERR(chan)) { int err = PTR_ERR(chan); chan = hdev->smp_data; hdev->smp_data = NULL; smp_del_chan(chan); return err; } hdev->smp_bredr_data = chan; return 0; } void smp_unregister(struct hci_dev *hdev) { struct l2cap_chan *chan; if (hdev->smp_bredr_data) { chan = hdev->smp_bredr_data; hdev->smp_bredr_data = NULL; smp_del_chan(chan); } if (hdev->smp_data) { chan = hdev->smp_data; hdev->smp_data = NULL; smp_del_chan(chan); } } #if IS_ENABLED(CONFIG_BT_SELFTEST_SMP) static int __init test_debug_key(struct crypto_kpp *tfm_ecdh) { u8 pk[64]; int err; err = set_ecdh_privkey(tfm_ecdh, debug_sk); if (err) return err; err = generate_ecdh_public_key(tfm_ecdh, pk); if (err) return err; if (crypto_memneq(pk, debug_pk, 64)) return -EINVAL; return 0; } static int __init test_ah(void) { const u8 irk[16] = { 0x9b, 0x7d, 0x39, 0x0a, 0xa6, 0x10, 0x10, 0x34, 0x05, 0xad, 0xc8, 0x57, 0xa3, 0x34, 0x02, 0xec }; const u8 r[3] = { 0x94, 0x81, 0x70 }; const u8 exp[3] = { 0xaa, 0xfb, 0x0d }; u8 res[3]; int err; err = smp_ah(irk, r, res); if (err) return err; if (crypto_memneq(res, exp, 3)) return -EINVAL; return 0; } static int __init test_c1(void) { const u8 k[16] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }; const u8 r[16] = { 0xe0, 0x2e, 0x70, 0xc6, 0x4e, 0x27, 0x88, 0x63, 0x0e, 0x6f, 0xad, 0x56, 0x21, 0xd5, 0x83, 0x57 }; const u8 preq[7] = { 0x01, 0x01, 0x00, 0x00, 0x10, 0x07, 0x07 }; const u8 pres[7] = { 0x02, 0x03, 0x00, 0x00, 0x08, 0x00, 0x05 }; const u8 _iat = 0x01; const u8 _rat = 0x00; const bdaddr_t ra = { { 0xb6, 0xb5, 0xb4, 0xb3, 0xb2, 0xb1 } }; const bdaddr_t ia = { { 0xa6, 0xa5, 0xa4, 0xa3, 0xa2, 0xa1 } }; const u8 exp[16] = { 0x86, 0x3b, 0xf1, 0xbe, 0xc5, 0x4d, 0xa7, 0xd2, 0xea, 0x88, 0x89, 0x87, 0xef, 0x3f, 0x1e, 0x1e }; u8 res[16]; int err; err = smp_c1(k, r, preq, pres, _iat, &ia, _rat, &ra, res); if (err) return err; if (crypto_memneq(res, exp, 16)) return -EINVAL; return 0; } static int __init test_s1(void) { const u8 k[16] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }; const u8 r1[16] = { 0x88, 0x77, 0x66, 0x55, 0x44, 0x33, 0x22, 0x11 }; const u8 r2[16] = { 0x00, 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99 }; const u8 exp[16] = { 0x62, 0xa0, 0x6d, 0x79, 0xae, 0x16, 0x42, 0x5b, 0x9b, 0xf4, 0xb0, 0xe8, 0xf0, 0xe1, 0x1f, 0x9a }; u8 res[16]; int err; err = smp_s1(k, r1, r2, res); if (err) return err; if (crypto_memneq(res, exp, 16)) return -EINVAL; return 0; } static int __init test_f4(struct crypto_shash *tfm_cmac) { const u8 u[32] = { 0xe6, 0x9d, 0x35, 0x0e, 0x48, 0x01, 0x03, 0xcc, 0xdb, 0xfd, 0xf4, 0xac, 0x11, 0x91, 0xf4, 0xef, 0xb9, 0xa5, 0xf9, 0xe9, 0xa7, 0x83, 0x2c, 0x5e, 0x2c, 0xbe, 0x97, 0xf2, 0xd2, 0x03, 0xb0, 0x20 }; const u8 v[32] = { 0xfd, 0xc5, 0x7f, 0xf4, 0x49, 0xdd, 0x4f, 0x6b, 0xfb, 0x7c, 0x9d, 0xf1, 0xc2, 0x9a, 0xcb, 0x59, 0x2a, 0xe7, 0xd4, 0xee, 0xfb, 0xfc, 0x0a, 0x90, 0x9a, 0xbb, 0xf6, 0x32, 0x3d, 0x8b, 0x18, 0x55 }; const u8 x[16] = { 0xab, 0xae, 0x2b, 0x71, 0xec, 0xb2, 0xff, 0xff, 0x3e, 0x73, 0x77, 0xd1, 0x54, 0x84, 0xcb, 0xd5 }; const u8 z = 0x00; const u8 exp[16] = { 0x2d, 0x87, 0x74, 0xa9, 0xbe, 0xa1, 0xed, 0xf1, 0x1c, 0xbd, 0xa9, 0x07, 0xf1, 0x16, 0xc9, 0xf2 }; u8 res[16]; int err; err = smp_f4(tfm_cmac, u, v, x, z, res); if (err) return err; if (crypto_memneq(res, exp, 16)) return -EINVAL; return 0; } static int __init test_f5(struct crypto_shash *tfm_cmac) { const u8 w[32] = { 0x98, 0xa6, 0xbf, 0x73, 0xf3, 0x34, 0x8d, 0x86, 0xf1, 0x66, 0xf8, 0xb4, 0x13, 0x6b, 0x79, 0x99, 0x9b, 0x7d, 0x39, 0x0a, 0xa6, 0x10, 0x10, 0x34, 0x05, 0xad, 0xc8, 0x57, 0xa3, 0x34, 0x02, 0xec }; const u8 n1[16] = { 0xab, 0xae, 0x2b, 0x71, 0xec, 0xb2, 0xff, 0xff, 0x3e, 0x73, 0x77, 0xd1, 0x54, 0x84, 0xcb, 0xd5 }; const u8 n2[16] = { 0xcf, 0xc4, 0x3d, 0xff, 0xf7, 0x83, 0x65, 0x21, 0x6e, 0x5f, 0xa7, 0x25, 0xcc, 0xe7, 0xe8, 0xa6 }; const u8 a1[7] = { 0xce, 0xbf, 0x37, 0x37, 0x12, 0x56, 0x00 }; const u8 a2[7] = { 0xc1, 0xcf, 0x2d, 0x70, 0x13, 0xa7, 0x00 }; const u8 exp_ltk[16] = { 0x38, 0x0a, 0x75, 0x94, 0xb5, 0x22, 0x05, 0x98, 0x23, 0xcd, 0xd7, 0x69, 0x11, 0x79, 0x86, 0x69 }; const u8 exp_mackey[16] = { 0x20, 0x6e, 0x63, 0xce, 0x20, 0x6a, 0x3f, 0xfd, 0x02, 0x4a, 0x08, 0xa1, 0x76, 0xf1, 0x65, 0x29 }; u8 mackey[16], ltk[16]; int err; err = smp_f5(tfm_cmac, w, n1, n2, a1, a2, mackey, ltk); if (err) return err; if (crypto_memneq(mackey, exp_mackey, 16)) return -EINVAL; if (crypto_memneq(ltk, exp_ltk, 16)) return -EINVAL; return 0; } static int __init test_f6(struct crypto_shash *tfm_cmac) { const u8 w[16] = { 0x20, 0x6e, 0x63, 0xce, 0x20, 0x6a, 0x3f, 0xfd, 0x02, 0x4a, 0x08, 0xa1, 0x76, 0xf1, 0x65, 0x29 }; const u8 n1[16] = { 0xab, 0xae, 0x2b, 0x71, 0xec, 0xb2, 0xff, 0xff, 0x3e, 0x73, 0x77, 0xd1, 0x54, 0x84, 0xcb, 0xd5 }; const u8 n2[16] = { 0xcf, 0xc4, 0x3d, 0xff, 0xf7, 0x83, 0x65, 0x21, 0x6e, 0x5f, 0xa7, 0x25, 0xcc, 0xe7, 0xe8, 0xa6 }; const u8 r[16] = { 0xc8, 0x0f, 0x2d, 0x0c, 0xd2, 0x42, 0xda, 0x08, 0x54, 0xbb, 0x53, 0xb4, 0x3b, 0x34, 0xa3, 0x12 }; const u8 io_cap[3] = { 0x02, 0x01, 0x01 }; const u8 a1[7] = { 0xce, 0xbf, 0x37, 0x37, 0x12, 0x56, 0x00 }; const u8 a2[7] = { 0xc1, 0xcf, 0x2d, 0x70, 0x13, 0xa7, 0x00 }; const u8 exp[16] = { 0x61, 0x8f, 0x95, 0xda, 0x09, 0x0b, 0x6c, 0xd2, 0xc5, 0xe8, 0xd0, 0x9c, 0x98, 0x73, 0xc4, 0xe3 }; u8 res[16]; int err; err = smp_f6(tfm_cmac, w, n1, n2, r, io_cap, a1, a2, res); if (err) return err; if (crypto_memneq(res, exp, 16)) return -EINVAL; return 0; } static int __init test_g2(struct crypto_shash *tfm_cmac) { const u8 u[32] = { 0xe6, 0x9d, 0x35, 0x0e, 0x48, 0x01, 0x03, 0xcc, 0xdb, 0xfd, 0xf4, 0xac, 0x11, 0x91, 0xf4, 0xef, 0xb9, 0xa5, 0xf9, 0xe9, 0xa7, 0x83, 0x2c, 0x5e, 0x2c, 0xbe, 0x97, 0xf2, 0xd2, 0x03, 0xb0, 0x20 }; const u8 v[32] = { 0xfd, 0xc5, 0x7f, 0xf4, 0x49, 0xdd, 0x4f, 0x6b, 0xfb, 0x7c, 0x9d, 0xf1, 0xc2, 0x9a, 0xcb, 0x59, 0x2a, 0xe7, 0xd4, 0xee, 0xfb, 0xfc, 0x0a, 0x90, 0x9a, 0xbb, 0xf6, 0x32, 0x3d, 0x8b, 0x18, 0x55 }; const u8 x[16] = { 0xab, 0xae, 0x2b, 0x71, 0xec, 0xb2, 0xff, 0xff, 0x3e, 0x73, 0x77, 0xd1, 0x54, 0x84, 0xcb, 0xd5 }; const u8 y[16] = { 0xcf, 0xc4, 0x3d, 0xff, 0xf7, 0x83, 0x65, 0x21, 0x6e, 0x5f, 0xa7, 0x25, 0xcc, 0xe7, 0xe8, 0xa6 }; const u32 exp_val = 0x2f9ed5ba % 1000000; u32 val; int err; err = smp_g2(tfm_cmac, u, v, x, y, &val); if (err) return err; if (val != exp_val) return -EINVAL; return 0; } static int __init test_h6(struct crypto_shash *tfm_cmac) { const u8 w[16] = { 0x9b, 0x7d, 0x39, 0x0a, 0xa6, 0x10, 0x10, 0x34, 0x05, 0xad, 0xc8, 0x57, 0xa3, 0x34, 0x02, 0xec }; const u8 key_id[4] = { 0x72, 0x62, 0x65, 0x6c }; const u8 exp[16] = { 0x99, 0x63, 0xb1, 0x80, 0xe2, 0xa9, 0xd3, 0xe8, 0x1c, 0xc9, 0x6d, 0xe7, 0x02, 0xe1, 0x9a, 0x2d }; u8 res[16]; int err; err = smp_h6(tfm_cmac, w, key_id, res); if (err) return err; if (crypto_memneq(res, exp, 16)) return -EINVAL; return 0; } static char test_smp_buffer[32]; static ssize_t test_smp_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { return simple_read_from_buffer(user_buf, count, ppos, test_smp_buffer, strlen(test_smp_buffer)); } static const struct file_operations test_smp_fops = { .open = simple_open, .read = test_smp_read, .llseek = default_llseek, }; static int __init run_selftests(struct crypto_shash *tfm_cmac, struct crypto_kpp *tfm_ecdh) { ktime_t calltime, delta, rettime; unsigned long long duration; int err; calltime = ktime_get(); err = test_debug_key(tfm_ecdh); if (err) { BT_ERR("debug_key test failed"); goto done; } err = test_ah(); if (err) { BT_ERR("smp_ah test failed"); goto done; } err = test_c1(); if (err) { BT_ERR("smp_c1 test failed"); goto done; } err = test_s1(); if (err) { BT_ERR("smp_s1 test failed"); goto done; } err = test_f4(tfm_cmac); if (err) { BT_ERR("smp_f4 test failed"); goto done; } err = test_f5(tfm_cmac); if (err) { BT_ERR("smp_f5 test failed"); goto done; } err = test_f6(tfm_cmac); if (err) { BT_ERR("smp_f6 test failed"); goto done; } err = test_g2(tfm_cmac); if (err) { BT_ERR("smp_g2 test failed"); goto done; } err = test_h6(tfm_cmac); if (err) { BT_ERR("smp_h6 test failed"); goto done; } rettime = ktime_get(); delta = ktime_sub(rettime, calltime); duration = (unsigned long long) ktime_to_ns(delta) >> 10; BT_INFO("SMP test passed in %llu usecs", duration); done: if (!err) snprintf(test_smp_buffer, sizeof(test_smp_buffer), "PASS (%llu usecs)\n", duration); else snprintf(test_smp_buffer, sizeof(test_smp_buffer), "FAIL\n"); debugfs_create_file("selftest_smp", 0444, bt_debugfs, NULL, &test_smp_fops); return err; } int __init bt_selftest_smp(void) { struct crypto_shash *tfm_cmac; struct crypto_kpp *tfm_ecdh; int err; tfm_cmac = crypto_alloc_shash("cmac(aes)", 0, 0); if (IS_ERR(tfm_cmac)) { BT_ERR("Unable to create CMAC crypto context"); return PTR_ERR(tfm_cmac); } tfm_ecdh = crypto_alloc_kpp("ecdh-nist-p256", 0, 0); if (IS_ERR(tfm_ecdh)) { BT_ERR("Unable to create ECDH crypto context"); crypto_free_shash(tfm_cmac); return PTR_ERR(tfm_ecdh); } err = run_selftests(tfm_cmac, tfm_ecdh); crypto_free_shash(tfm_cmac); crypto_free_kpp(tfm_ecdh); return err; } #endif
linux-master
net/bluetooth/smp.c
/* BlueZ - Bluetooth protocol stack for Linux Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved. Copyright 2023 NXP Written 2000,2001 by Maxim Krasnyansky <[email protected]> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License version 2 as published by the Free Software Foundation; THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS SOFTWARE IS DISCLAIMED. */ /* Bluetooth HCI event handling. */ #include <asm/unaligned.h> #include <net/bluetooth/bluetooth.h> #include <net/bluetooth/hci_core.h> #include <net/bluetooth/mgmt.h> #include "hci_request.h" #include "hci_debugfs.h" #include "a2mp.h" #include "amp.h" #include "smp.h" #include "msft.h" #include "eir.h" #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \ "\x00\x00\x00\x00\x00\x00\x00\x00" #define secs_to_jiffies(_secs) msecs_to_jiffies((_secs) * 1000) /* Handle HCI Event packets */ static void *hci_ev_skb_pull(struct hci_dev *hdev, struct sk_buff *skb, u8 ev, size_t len) { void *data; data = skb_pull_data(skb, len); if (!data) bt_dev_err(hdev, "Malformed Event: 0x%2.2x", ev); return data; } static void *hci_cc_skb_pull(struct hci_dev *hdev, struct sk_buff *skb, u16 op, size_t len) { void *data; data = skb_pull_data(skb, len); if (!data) bt_dev_err(hdev, "Malformed Command Complete: 0x%4.4x", op); return data; } static void *hci_le_ev_skb_pull(struct hci_dev *hdev, struct sk_buff *skb, u8 ev, size_t len) { void *data; data = skb_pull_data(skb, len); if (!data) bt_dev_err(hdev, "Malformed LE Event: 0x%2.2x", ev); return data; } static u8 hci_cc_inquiry_cancel(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_ev_status *rp = data; bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); /* It is possible that we receive Inquiry Complete event right * before we receive Inquiry Cancel Command Complete event, in * which case the latter event should have status of Command * Disallowed (0x0c). This should not be treated as error, since * we actually achieve what Inquiry Cancel wants to achieve, * which is to end the last Inquiry session. */ if (rp->status == 0x0c && !test_bit(HCI_INQUIRY, &hdev->flags)) { bt_dev_warn(hdev, "Ignoring error of Inquiry Cancel command"); rp->status = 0x00; } if (rp->status) return rp->status; clear_bit(HCI_INQUIRY, &hdev->flags); smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */ wake_up_bit(&hdev->flags, HCI_INQUIRY); hci_dev_lock(hdev); /* Set discovery state to stopped if we're not doing LE active * scanning. */ if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) || hdev->le_scan_type != LE_SCAN_ACTIVE) hci_discovery_set_state(hdev, DISCOVERY_STOPPED); hci_dev_unlock(hdev); hci_conn_check_pending(hdev); return rp->status; } static u8 hci_cc_periodic_inq(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_ev_status *rp = data; bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); if (rp->status) return rp->status; hci_dev_set_flag(hdev, HCI_PERIODIC_INQ); return rp->status; } static u8 hci_cc_exit_periodic_inq(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_ev_status *rp = data; bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); if (rp->status) return rp->status; hci_dev_clear_flag(hdev, HCI_PERIODIC_INQ); hci_conn_check_pending(hdev); return rp->status; } static u8 hci_cc_remote_name_req_cancel(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_ev_status *rp = data; bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); return rp->status; } static u8 hci_cc_role_discovery(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_rp_role_discovery *rp = data; struct hci_conn *conn; bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); if (rp->status) return rp->status; hci_dev_lock(hdev); conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle)); if (conn) conn->role = rp->role; hci_dev_unlock(hdev); return rp->status; } static u8 hci_cc_read_link_policy(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_rp_read_link_policy *rp = data; struct hci_conn *conn; bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); if (rp->status) return rp->status; hci_dev_lock(hdev); conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle)); if (conn) conn->link_policy = __le16_to_cpu(rp->policy); hci_dev_unlock(hdev); return rp->status; } static u8 hci_cc_write_link_policy(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_rp_write_link_policy *rp = data; struct hci_conn *conn; void *sent; bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); if (rp->status) return rp->status; sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY); if (!sent) return rp->status; hci_dev_lock(hdev); conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle)); if (conn) conn->link_policy = get_unaligned_le16(sent + 2); hci_dev_unlock(hdev); return rp->status; } static u8 hci_cc_read_def_link_policy(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_rp_read_def_link_policy *rp = data; bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); if (rp->status) return rp->status; hdev->link_policy = __le16_to_cpu(rp->policy); return rp->status; } static u8 hci_cc_write_def_link_policy(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_ev_status *rp = data; void *sent; bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); if (rp->status) return rp->status; sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY); if (!sent) return rp->status; hdev->link_policy = get_unaligned_le16(sent); return rp->status; } static u8 hci_cc_reset(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_ev_status *rp = data; bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); clear_bit(HCI_RESET, &hdev->flags); if (rp->status) return rp->status; /* Reset all non-persistent flags */ hci_dev_clear_volatile_flags(hdev); hci_discovery_set_state(hdev, DISCOVERY_STOPPED); hdev->inq_tx_power = HCI_TX_POWER_INVALID; hdev->adv_tx_power = HCI_TX_POWER_INVALID; memset(hdev->adv_data, 0, sizeof(hdev->adv_data)); hdev->adv_data_len = 0; memset(hdev->scan_rsp_data, 0, sizeof(hdev->scan_rsp_data)); hdev->scan_rsp_data_len = 0; hdev->le_scan_type = LE_SCAN_PASSIVE; hdev->ssp_debug_mode = 0; hci_bdaddr_list_clear(&hdev->le_accept_list); hci_bdaddr_list_clear(&hdev->le_resolv_list); return rp->status; } static u8 hci_cc_read_stored_link_key(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_rp_read_stored_link_key *rp = data; struct hci_cp_read_stored_link_key *sent; bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); sent = hci_sent_cmd_data(hdev, HCI_OP_READ_STORED_LINK_KEY); if (!sent) return rp->status; if (!rp->status && sent->read_all == 0x01) { hdev->stored_max_keys = le16_to_cpu(rp->max_keys); hdev->stored_num_keys = le16_to_cpu(rp->num_keys); } return rp->status; } static u8 hci_cc_delete_stored_link_key(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_rp_delete_stored_link_key *rp = data; u16 num_keys; bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); if (rp->status) return rp->status; num_keys = le16_to_cpu(rp->num_keys); if (num_keys <= hdev->stored_num_keys) hdev->stored_num_keys -= num_keys; else hdev->stored_num_keys = 0; return rp->status; } static u8 hci_cc_write_local_name(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_ev_status *rp = data; void *sent; bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME); if (!sent) return rp->status; hci_dev_lock(hdev); if (hci_dev_test_flag(hdev, HCI_MGMT)) mgmt_set_local_name_complete(hdev, sent, rp->status); else if (!rp->status) memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH); hci_dev_unlock(hdev); return rp->status; } static u8 hci_cc_read_local_name(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_rp_read_local_name *rp = data; bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); if (rp->status) return rp->status; if (hci_dev_test_flag(hdev, HCI_SETUP) || hci_dev_test_flag(hdev, HCI_CONFIG)) memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH); return rp->status; } static u8 hci_cc_write_auth_enable(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_ev_status *rp = data; void *sent; bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE); if (!sent) return rp->status; hci_dev_lock(hdev); if (!rp->status) { __u8 param = *((__u8 *) sent); if (param == AUTH_ENABLED) set_bit(HCI_AUTH, &hdev->flags); else clear_bit(HCI_AUTH, &hdev->flags); } if (hci_dev_test_flag(hdev, HCI_MGMT)) mgmt_auth_enable_complete(hdev, rp->status); hci_dev_unlock(hdev); return rp->status; } static u8 hci_cc_write_encrypt_mode(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_ev_status *rp = data; __u8 param; void *sent; bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); if (rp->status) return rp->status; sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE); if (!sent) return rp->status; param = *((__u8 *) sent); if (param) set_bit(HCI_ENCRYPT, &hdev->flags); else clear_bit(HCI_ENCRYPT, &hdev->flags); return rp->status; } static u8 hci_cc_write_scan_enable(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_ev_status *rp = data; __u8 param; void *sent; bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE); if (!sent) return rp->status; param = *((__u8 *) sent); hci_dev_lock(hdev); if (rp->status) { hdev->discov_timeout = 0; goto done; } if (param & SCAN_INQUIRY) set_bit(HCI_ISCAN, &hdev->flags); else clear_bit(HCI_ISCAN, &hdev->flags); if (param & SCAN_PAGE) set_bit(HCI_PSCAN, &hdev->flags); else clear_bit(HCI_PSCAN, &hdev->flags); done: hci_dev_unlock(hdev); return rp->status; } static u8 hci_cc_set_event_filter(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_ev_status *rp = data; struct hci_cp_set_event_filter *cp; void *sent; bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); if (rp->status) return rp->status; sent = hci_sent_cmd_data(hdev, HCI_OP_SET_EVENT_FLT); if (!sent) return rp->status; cp = (struct hci_cp_set_event_filter *)sent; if (cp->flt_type == HCI_FLT_CLEAR_ALL) hci_dev_clear_flag(hdev, HCI_EVENT_FILTER_CONFIGURED); else hci_dev_set_flag(hdev, HCI_EVENT_FILTER_CONFIGURED); return rp->status; } static u8 hci_cc_read_class_of_dev(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_rp_read_class_of_dev *rp = data; bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); if (rp->status) return rp->status; memcpy(hdev->dev_class, rp->dev_class, 3); bt_dev_dbg(hdev, "class 0x%.2x%.2x%.2x", hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]); return rp->status; } static u8 hci_cc_write_class_of_dev(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_ev_status *rp = data; void *sent; bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV); if (!sent) return rp->status; hci_dev_lock(hdev); if (!rp->status) memcpy(hdev->dev_class, sent, 3); if (hci_dev_test_flag(hdev, HCI_MGMT)) mgmt_set_class_of_dev_complete(hdev, sent, rp->status); hci_dev_unlock(hdev); return rp->status; } static u8 hci_cc_read_voice_setting(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_rp_read_voice_setting *rp = data; __u16 setting; bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); if (rp->status) return rp->status; setting = __le16_to_cpu(rp->voice_setting); if (hdev->voice_setting == setting) return rp->status; hdev->voice_setting = setting; bt_dev_dbg(hdev, "voice setting 0x%4.4x", setting); if (hdev->notify) hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING); return rp->status; } static u8 hci_cc_write_voice_setting(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_ev_status *rp = data; __u16 setting; void *sent; bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); if (rp->status) return rp->status; sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING); if (!sent) return rp->status; setting = get_unaligned_le16(sent); if (hdev->voice_setting == setting) return rp->status; hdev->voice_setting = setting; bt_dev_dbg(hdev, "voice setting 0x%4.4x", setting); if (hdev->notify) hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING); return rp->status; } static u8 hci_cc_read_num_supported_iac(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_rp_read_num_supported_iac *rp = data; bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); if (rp->status) return rp->status; hdev->num_iac = rp->num_iac; bt_dev_dbg(hdev, "num iac %d", hdev->num_iac); return rp->status; } static u8 hci_cc_write_ssp_mode(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_ev_status *rp = data; struct hci_cp_write_ssp_mode *sent; bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE); if (!sent) return rp->status; hci_dev_lock(hdev); if (!rp->status) { if (sent->mode) hdev->features[1][0] |= LMP_HOST_SSP; else hdev->features[1][0] &= ~LMP_HOST_SSP; } if (!rp->status) { if (sent->mode) hci_dev_set_flag(hdev, HCI_SSP_ENABLED); else hci_dev_clear_flag(hdev, HCI_SSP_ENABLED); } hci_dev_unlock(hdev); return rp->status; } static u8 hci_cc_write_sc_support(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_ev_status *rp = data; struct hci_cp_write_sc_support *sent; bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SC_SUPPORT); if (!sent) return rp->status; hci_dev_lock(hdev); if (!rp->status) { if (sent->support) hdev->features[1][0] |= LMP_HOST_SC; else hdev->features[1][0] &= ~LMP_HOST_SC; } if (!hci_dev_test_flag(hdev, HCI_MGMT) && !rp->status) { if (sent->support) hci_dev_set_flag(hdev, HCI_SC_ENABLED); else hci_dev_clear_flag(hdev, HCI_SC_ENABLED); } hci_dev_unlock(hdev); return rp->status; } static u8 hci_cc_read_local_version(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_rp_read_local_version *rp = data; bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); if (rp->status) return rp->status; if (hci_dev_test_flag(hdev, HCI_SETUP) || hci_dev_test_flag(hdev, HCI_CONFIG)) { hdev->hci_ver = rp->hci_ver; hdev->hci_rev = __le16_to_cpu(rp->hci_rev); hdev->lmp_ver = rp->lmp_ver; hdev->manufacturer = __le16_to_cpu(rp->manufacturer); hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver); } return rp->status; } static u8 hci_cc_read_enc_key_size(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_rp_read_enc_key_size *rp = data; struct hci_conn *conn; u16 handle; u8 status = rp->status; bt_dev_dbg(hdev, "status 0x%2.2x", status); handle = le16_to_cpu(rp->handle); hci_dev_lock(hdev); conn = hci_conn_hash_lookup_handle(hdev, handle); if (!conn) { status = 0xFF; goto done; } /* While unexpected, the read_enc_key_size command may fail. The most * secure approach is to then assume the key size is 0 to force a * disconnection. */ if (status) { bt_dev_err(hdev, "failed to read key size for handle %u", handle); conn->enc_key_size = 0; } else { conn->enc_key_size = rp->key_size; status = 0; } hci_encrypt_cfm(conn, 0); done: hci_dev_unlock(hdev); return status; } static u8 hci_cc_read_local_commands(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_rp_read_local_commands *rp = data; bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); if (rp->status) return rp->status; if (hci_dev_test_flag(hdev, HCI_SETUP) || hci_dev_test_flag(hdev, HCI_CONFIG)) memcpy(hdev->commands, rp->commands, sizeof(hdev->commands)); return rp->status; } static u8 hci_cc_read_auth_payload_timeout(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_rp_read_auth_payload_to *rp = data; struct hci_conn *conn; bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); if (rp->status) return rp->status; hci_dev_lock(hdev); conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle)); if (conn) conn->auth_payload_timeout = __le16_to_cpu(rp->timeout); hci_dev_unlock(hdev); return rp->status; } static u8 hci_cc_write_auth_payload_timeout(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_rp_write_auth_payload_to *rp = data; struct hci_conn *conn; void *sent; bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_PAYLOAD_TO); if (!sent) return rp->status; hci_dev_lock(hdev); conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle)); if (!conn) { rp->status = 0xff; goto unlock; } if (!rp->status) conn->auth_payload_timeout = get_unaligned_le16(sent + 2); hci_encrypt_cfm(conn, 0); unlock: hci_dev_unlock(hdev); return rp->status; } static u8 hci_cc_read_local_features(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_rp_read_local_features *rp = data; bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); if (rp->status) return rp->status; memcpy(hdev->features, rp->features, 8); /* Adjust default settings according to features * supported by device. */ if (hdev->features[0][0] & LMP_3SLOT) hdev->pkt_type |= (HCI_DM3 | HCI_DH3); if (hdev->features[0][0] & LMP_5SLOT) hdev->pkt_type |= (HCI_DM5 | HCI_DH5); if (hdev->features[0][1] & LMP_HV2) { hdev->pkt_type |= (HCI_HV2); hdev->esco_type |= (ESCO_HV2); } if (hdev->features[0][1] & LMP_HV3) { hdev->pkt_type |= (HCI_HV3); hdev->esco_type |= (ESCO_HV3); } if (lmp_esco_capable(hdev)) hdev->esco_type |= (ESCO_EV3); if (hdev->features[0][4] & LMP_EV4) hdev->esco_type |= (ESCO_EV4); if (hdev->features[0][4] & LMP_EV5) hdev->esco_type |= (ESCO_EV5); if (hdev->features[0][5] & LMP_EDR_ESCO_2M) hdev->esco_type |= (ESCO_2EV3); if (hdev->features[0][5] & LMP_EDR_ESCO_3M) hdev->esco_type |= (ESCO_3EV3); if (hdev->features[0][5] & LMP_EDR_3S_ESCO) hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5); return rp->status; } static u8 hci_cc_read_local_ext_features(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_rp_read_local_ext_features *rp = data; bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); if (rp->status) return rp->status; if (hdev->max_page < rp->max_page) { if (test_bit(HCI_QUIRK_BROKEN_LOCAL_EXT_FEATURES_PAGE_2, &hdev->quirks)) bt_dev_warn(hdev, "broken local ext features page 2"); else hdev->max_page = rp->max_page; } if (rp->page < HCI_MAX_PAGES) memcpy(hdev->features[rp->page], rp->features, 8); return rp->status; } static u8 hci_cc_read_flow_control_mode(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_rp_read_flow_control_mode *rp = data; bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); if (rp->status) return rp->status; hdev->flow_ctl_mode = rp->mode; return rp->status; } static u8 hci_cc_read_buffer_size(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_rp_read_buffer_size *rp = data; bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); if (rp->status) return rp->status; hdev->acl_mtu = __le16_to_cpu(rp->acl_mtu); hdev->sco_mtu = rp->sco_mtu; hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt); hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt); if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) { hdev->sco_mtu = 64; hdev->sco_pkts = 8; } hdev->acl_cnt = hdev->acl_pkts; hdev->sco_cnt = hdev->sco_pkts; BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu, hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts); return rp->status; } static u8 hci_cc_read_bd_addr(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_rp_read_bd_addr *rp = data; bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); if (rp->status) return rp->status; if (test_bit(HCI_INIT, &hdev->flags)) bacpy(&hdev->bdaddr, &rp->bdaddr); if (hci_dev_test_flag(hdev, HCI_SETUP)) bacpy(&hdev->setup_addr, &rp->bdaddr); return rp->status; } static u8 hci_cc_read_local_pairing_opts(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_rp_read_local_pairing_opts *rp = data; bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); if (rp->status) return rp->status; if (hci_dev_test_flag(hdev, HCI_SETUP) || hci_dev_test_flag(hdev, HCI_CONFIG)) { hdev->pairing_opts = rp->pairing_opts; hdev->max_enc_key_size = rp->max_key_size; } return rp->status; } static u8 hci_cc_read_page_scan_activity(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_rp_read_page_scan_activity *rp = data; bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); if (rp->status) return rp->status; if (test_bit(HCI_INIT, &hdev->flags)) { hdev->page_scan_interval = __le16_to_cpu(rp->interval); hdev->page_scan_window = __le16_to_cpu(rp->window); } return rp->status; } static u8 hci_cc_write_page_scan_activity(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_ev_status *rp = data; struct hci_cp_write_page_scan_activity *sent; bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); if (rp->status) return rp->status; sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY); if (!sent) return rp->status; hdev->page_scan_interval = __le16_to_cpu(sent->interval); hdev->page_scan_window = __le16_to_cpu(sent->window); return rp->status; } static u8 hci_cc_read_page_scan_type(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_rp_read_page_scan_type *rp = data; bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); if (rp->status) return rp->status; if (test_bit(HCI_INIT, &hdev->flags)) hdev->page_scan_type = rp->type; return rp->status; } static u8 hci_cc_write_page_scan_type(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_ev_status *rp = data; u8 *type; bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); if (rp->status) return rp->status; type = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE); if (type) hdev->page_scan_type = *type; return rp->status; } static u8 hci_cc_read_data_block_size(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_rp_read_data_block_size *rp = data; bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); if (rp->status) return rp->status; hdev->block_mtu = __le16_to_cpu(rp->max_acl_len); hdev->block_len = __le16_to_cpu(rp->block_len); hdev->num_blocks = __le16_to_cpu(rp->num_blocks); hdev->block_cnt = hdev->num_blocks; BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu, hdev->block_cnt, hdev->block_len); return rp->status; } static u8 hci_cc_read_clock(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_rp_read_clock *rp = data; struct hci_cp_read_clock *cp; struct hci_conn *conn; bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); if (rp->status) return rp->status; hci_dev_lock(hdev); cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK); if (!cp) goto unlock; if (cp->which == 0x00) { hdev->clock = le32_to_cpu(rp->clock); goto unlock; } conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle)); if (conn) { conn->clock = le32_to_cpu(rp->clock); conn->clock_accuracy = le16_to_cpu(rp->accuracy); } unlock: hci_dev_unlock(hdev); return rp->status; } static u8 hci_cc_read_local_amp_info(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_rp_read_local_amp_info *rp = data; bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); if (rp->status) return rp->status; hdev->amp_status = rp->amp_status; hdev->amp_total_bw = __le32_to_cpu(rp->total_bw); hdev->amp_max_bw = __le32_to_cpu(rp->max_bw); hdev->amp_min_latency = __le32_to_cpu(rp->min_latency); hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu); hdev->amp_type = rp->amp_type; hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap); hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size); hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to); hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to); return rp->status; } static u8 hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_rp_read_inq_rsp_tx_power *rp = data; bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); if (rp->status) return rp->status; hdev->inq_tx_power = rp->tx_power; return rp->status; } static u8 hci_cc_read_def_err_data_reporting(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_rp_read_def_err_data_reporting *rp = data; bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); if (rp->status) return rp->status; hdev->err_data_reporting = rp->err_data_reporting; return rp->status; } static u8 hci_cc_write_def_err_data_reporting(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_ev_status *rp = data; struct hci_cp_write_def_err_data_reporting *cp; bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); if (rp->status) return rp->status; cp = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_ERR_DATA_REPORTING); if (!cp) return rp->status; hdev->err_data_reporting = cp->err_data_reporting; return rp->status; } static u8 hci_cc_pin_code_reply(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_rp_pin_code_reply *rp = data; struct hci_cp_pin_code_reply *cp; struct hci_conn *conn; bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); hci_dev_lock(hdev); if (hci_dev_test_flag(hdev, HCI_MGMT)) mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status); if (rp->status) goto unlock; cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY); if (!cp) goto unlock; conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr); if (conn) conn->pin_length = cp->pin_len; unlock: hci_dev_unlock(hdev); return rp->status; } static u8 hci_cc_pin_code_neg_reply(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_rp_pin_code_neg_reply *rp = data; bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); hci_dev_lock(hdev); if (hci_dev_test_flag(hdev, HCI_MGMT)) mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr, rp->status); hci_dev_unlock(hdev); return rp->status; } static u8 hci_cc_le_read_buffer_size(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_rp_le_read_buffer_size *rp = data; bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); if (rp->status) return rp->status; hdev->le_mtu = __le16_to_cpu(rp->le_mtu); hdev->le_pkts = rp->le_max_pkt; hdev->le_cnt = hdev->le_pkts; BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts); return rp->status; } static u8 hci_cc_le_read_local_features(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_rp_le_read_local_features *rp = data; BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); if (rp->status) return rp->status; memcpy(hdev->le_features, rp->features, 8); return rp->status; } static u8 hci_cc_le_read_adv_tx_power(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_rp_le_read_adv_tx_power *rp = data; bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); if (rp->status) return rp->status; hdev->adv_tx_power = rp->tx_power; return rp->status; } static u8 hci_cc_user_confirm_reply(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_rp_user_confirm_reply *rp = data; bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); hci_dev_lock(hdev); if (hci_dev_test_flag(hdev, HCI_MGMT)) mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0, rp->status); hci_dev_unlock(hdev); return rp->status; } static u8 hci_cc_user_confirm_neg_reply(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_rp_user_confirm_reply *rp = data; bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); hci_dev_lock(hdev); if (hci_dev_test_flag(hdev, HCI_MGMT)) mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0, rp->status); hci_dev_unlock(hdev); return rp->status; } static u8 hci_cc_user_passkey_reply(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_rp_user_confirm_reply *rp = data; bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); hci_dev_lock(hdev); if (hci_dev_test_flag(hdev, HCI_MGMT)) mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0, rp->status); hci_dev_unlock(hdev); return rp->status; } static u8 hci_cc_user_passkey_neg_reply(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_rp_user_confirm_reply *rp = data; bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); hci_dev_lock(hdev); if (hci_dev_test_flag(hdev, HCI_MGMT)) mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0, rp->status); hci_dev_unlock(hdev); return rp->status; } static u8 hci_cc_read_local_oob_data(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_rp_read_local_oob_data *rp = data; bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); return rp->status; } static u8 hci_cc_read_local_oob_ext_data(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_rp_read_local_oob_ext_data *rp = data; bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); return rp->status; } static u8 hci_cc_le_set_random_addr(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_ev_status *rp = data; bdaddr_t *sent; bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); if (rp->status) return rp->status; sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_RANDOM_ADDR); if (!sent) return rp->status; hci_dev_lock(hdev); bacpy(&hdev->random_addr, sent); if (!bacmp(&hdev->rpa, sent)) { hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED); queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, secs_to_jiffies(hdev->rpa_timeout)); } hci_dev_unlock(hdev); return rp->status; } static u8 hci_cc_le_set_default_phy(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_ev_status *rp = data; struct hci_cp_le_set_default_phy *cp; bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); if (rp->status) return rp->status; cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_DEFAULT_PHY); if (!cp) return rp->status; hci_dev_lock(hdev); hdev->le_tx_def_phys = cp->tx_phys; hdev->le_rx_def_phys = cp->rx_phys; hci_dev_unlock(hdev); return rp->status; } static u8 hci_cc_le_set_adv_set_random_addr(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_ev_status *rp = data; struct hci_cp_le_set_adv_set_rand_addr *cp; struct adv_info *adv; bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); if (rp->status) return rp->status; cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_SET_RAND_ADDR); /* Update only in case the adv instance since handle 0x00 shall be using * HCI_OP_LE_SET_RANDOM_ADDR since that allows both extended and * non-extended adverting. */ if (!cp || !cp->handle) return rp->status; hci_dev_lock(hdev); adv = hci_find_adv_instance(hdev, cp->handle); if (adv) { bacpy(&adv->random_addr, &cp->bdaddr); if (!bacmp(&hdev->rpa, &cp->bdaddr)) { adv->rpa_expired = false; queue_delayed_work(hdev->workqueue, &adv->rpa_expired_cb, secs_to_jiffies(hdev->rpa_timeout)); } } hci_dev_unlock(hdev); return rp->status; } static u8 hci_cc_le_remove_adv_set(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_ev_status *rp = data; u8 *instance; int err; bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); if (rp->status) return rp->status; instance = hci_sent_cmd_data(hdev, HCI_OP_LE_REMOVE_ADV_SET); if (!instance) return rp->status; hci_dev_lock(hdev); err = hci_remove_adv_instance(hdev, *instance); if (!err) mgmt_advertising_removed(hci_skb_sk(hdev->sent_cmd), hdev, *instance); hci_dev_unlock(hdev); return rp->status; } static u8 hci_cc_le_clear_adv_sets(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_ev_status *rp = data; struct adv_info *adv, *n; int err; bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); if (rp->status) return rp->status; if (!hci_sent_cmd_data(hdev, HCI_OP_LE_CLEAR_ADV_SETS)) return rp->status; hci_dev_lock(hdev); list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) { u8 instance = adv->instance; err = hci_remove_adv_instance(hdev, instance); if (!err) mgmt_advertising_removed(hci_skb_sk(hdev->sent_cmd), hdev, instance); } hci_dev_unlock(hdev); return rp->status; } static u8 hci_cc_le_read_transmit_power(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_rp_le_read_transmit_power *rp = data; bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); if (rp->status) return rp->status; hdev->min_le_tx_power = rp->min_le_tx_power; hdev->max_le_tx_power = rp->max_le_tx_power; return rp->status; } static u8 hci_cc_le_set_privacy_mode(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_ev_status *rp = data; struct hci_cp_le_set_privacy_mode *cp; struct hci_conn_params *params; bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); if (rp->status) return rp->status; cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_PRIVACY_MODE); if (!cp) return rp->status; hci_dev_lock(hdev); params = hci_conn_params_lookup(hdev, &cp->bdaddr, cp->bdaddr_type); if (params) WRITE_ONCE(params->privacy_mode, cp->mode); hci_dev_unlock(hdev); return rp->status; } static u8 hci_cc_le_set_adv_enable(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_ev_status *rp = data; __u8 *sent; bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); if (rp->status) return rp->status; sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_ENABLE); if (!sent) return rp->status; hci_dev_lock(hdev); /* If we're doing connection initiation as peripheral. Set a * timeout in case something goes wrong. */ if (*sent) { struct hci_conn *conn; hci_dev_set_flag(hdev, HCI_LE_ADV); conn = hci_lookup_le_connect(hdev); if (conn) queue_delayed_work(hdev->workqueue, &conn->le_conn_timeout, conn->conn_timeout); } else { hci_dev_clear_flag(hdev, HCI_LE_ADV); } hci_dev_unlock(hdev); return rp->status; } static u8 hci_cc_le_set_ext_adv_enable(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_cp_le_set_ext_adv_enable *cp; struct hci_cp_ext_adv_set *set; struct adv_info *adv = NULL, *n; struct hci_ev_status *rp = data; bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); if (rp->status) return rp->status; cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_ADV_ENABLE); if (!cp) return rp->status; set = (void *)cp->data; hci_dev_lock(hdev); if (cp->num_of_sets) adv = hci_find_adv_instance(hdev, set->handle); if (cp->enable) { struct hci_conn *conn; hci_dev_set_flag(hdev, HCI_LE_ADV); if (adv && !adv->periodic) adv->enabled = true; conn = hci_lookup_le_connect(hdev); if (conn) queue_delayed_work(hdev->workqueue, &conn->le_conn_timeout, conn->conn_timeout); } else { if (cp->num_of_sets) { if (adv) adv->enabled = false; /* If just one instance was disabled check if there are * any other instance enabled before clearing HCI_LE_ADV */ list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) { if (adv->enabled) goto unlock; } } else { /* All instances shall be considered disabled */ list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) adv->enabled = false; } hci_dev_clear_flag(hdev, HCI_LE_ADV); } unlock: hci_dev_unlock(hdev); return rp->status; } static u8 hci_cc_le_set_scan_param(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_cp_le_set_scan_param *cp; struct hci_ev_status *rp = data; bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); if (rp->status) return rp->status; cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_PARAM); if (!cp) return rp->status; hci_dev_lock(hdev); hdev->le_scan_type = cp->type; hci_dev_unlock(hdev); return rp->status; } static u8 hci_cc_le_set_ext_scan_param(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_cp_le_set_ext_scan_params *cp; struct hci_ev_status *rp = data; struct hci_cp_le_scan_phy_params *phy_param; bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); if (rp->status) return rp->status; cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_PARAMS); if (!cp) return rp->status; phy_param = (void *)cp->data; hci_dev_lock(hdev); hdev->le_scan_type = phy_param->type; hci_dev_unlock(hdev); return rp->status; } static bool has_pending_adv_report(struct hci_dev *hdev) { struct discovery_state *d = &hdev->discovery; return bacmp(&d->last_adv_addr, BDADDR_ANY); } static void clear_pending_adv_report(struct hci_dev *hdev) { struct discovery_state *d = &hdev->discovery; bacpy(&d->last_adv_addr, BDADDR_ANY); d->last_adv_data_len = 0; } static void store_pending_adv_report(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type, s8 rssi, u32 flags, u8 *data, u8 len) { struct discovery_state *d = &hdev->discovery; if (len > max_adv_len(hdev)) return; bacpy(&d->last_adv_addr, bdaddr); d->last_adv_addr_type = bdaddr_type; d->last_adv_rssi = rssi; d->last_adv_flags = flags; memcpy(d->last_adv_data, data, len); d->last_adv_data_len = len; } static void le_set_scan_enable_complete(struct hci_dev *hdev, u8 enable) { hci_dev_lock(hdev); switch (enable) { case LE_SCAN_ENABLE: hci_dev_set_flag(hdev, HCI_LE_SCAN); if (hdev->le_scan_type == LE_SCAN_ACTIVE) clear_pending_adv_report(hdev); if (hci_dev_test_flag(hdev, HCI_MESH)) hci_discovery_set_state(hdev, DISCOVERY_FINDING); break; case LE_SCAN_DISABLE: /* We do this here instead of when setting DISCOVERY_STOPPED * since the latter would potentially require waiting for * inquiry to stop too. */ if (has_pending_adv_report(hdev)) { struct discovery_state *d = &hdev->discovery; mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK, d->last_adv_addr_type, NULL, d->last_adv_rssi, d->last_adv_flags, d->last_adv_data, d->last_adv_data_len, NULL, 0, 0); } /* Cancel this timer so that we don't try to disable scanning * when it's already disabled. */ cancel_delayed_work(&hdev->le_scan_disable); hci_dev_clear_flag(hdev, HCI_LE_SCAN); /* The HCI_LE_SCAN_INTERRUPTED flag indicates that we * interrupted scanning due to a connect request. Mark * therefore discovery as stopped. */ if (hci_dev_test_and_clear_flag(hdev, HCI_LE_SCAN_INTERRUPTED)) hci_discovery_set_state(hdev, DISCOVERY_STOPPED); else if (!hci_dev_test_flag(hdev, HCI_LE_ADV) && hdev->discovery.state == DISCOVERY_FINDING) queue_work(hdev->workqueue, &hdev->reenable_adv_work); break; default: bt_dev_err(hdev, "use of reserved LE_Scan_Enable param %d", enable); break; } hci_dev_unlock(hdev); } static u8 hci_cc_le_set_scan_enable(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_cp_le_set_scan_enable *cp; struct hci_ev_status *rp = data; bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); if (rp->status) return rp->status; cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE); if (!cp) return rp->status; le_set_scan_enable_complete(hdev, cp->enable); return rp->status; } static u8 hci_cc_le_set_ext_scan_enable(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_cp_le_set_ext_scan_enable *cp; struct hci_ev_status *rp = data; bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); if (rp->status) return rp->status; cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_ENABLE); if (!cp) return rp->status; le_set_scan_enable_complete(hdev, cp->enable); return rp->status; } static u8 hci_cc_le_read_num_adv_sets(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_rp_le_read_num_supported_adv_sets *rp = data; bt_dev_dbg(hdev, "status 0x%2.2x No of Adv sets %u", rp->status, rp->num_of_sets); if (rp->status) return rp->status; hdev->le_num_of_adv_sets = rp->num_of_sets; return rp->status; } static u8 hci_cc_le_read_accept_list_size(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_rp_le_read_accept_list_size *rp = data; bt_dev_dbg(hdev, "status 0x%2.2x size %u", rp->status, rp->size); if (rp->status) return rp->status; hdev->le_accept_list_size = rp->size; return rp->status; } static u8 hci_cc_le_clear_accept_list(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_ev_status *rp = data; bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); if (rp->status) return rp->status; hci_dev_lock(hdev); hci_bdaddr_list_clear(&hdev->le_accept_list); hci_dev_unlock(hdev); return rp->status; } static u8 hci_cc_le_add_to_accept_list(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_cp_le_add_to_accept_list *sent; struct hci_ev_status *rp = data; bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); if (rp->status) return rp->status; sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_ACCEPT_LIST); if (!sent) return rp->status; hci_dev_lock(hdev); hci_bdaddr_list_add(&hdev->le_accept_list, &sent->bdaddr, sent->bdaddr_type); hci_dev_unlock(hdev); return rp->status; } static u8 hci_cc_le_del_from_accept_list(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_cp_le_del_from_accept_list *sent; struct hci_ev_status *rp = data; bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); if (rp->status) return rp->status; sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_ACCEPT_LIST); if (!sent) return rp->status; hci_dev_lock(hdev); hci_bdaddr_list_del(&hdev->le_accept_list, &sent->bdaddr, sent->bdaddr_type); hci_dev_unlock(hdev); return rp->status; } static u8 hci_cc_le_read_supported_states(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_rp_le_read_supported_states *rp = data; bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); if (rp->status) return rp->status; memcpy(hdev->le_states, rp->le_states, 8); return rp->status; } static u8 hci_cc_le_read_def_data_len(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_rp_le_read_def_data_len *rp = data; bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); if (rp->status) return rp->status; hdev->le_def_tx_len = le16_to_cpu(rp->tx_len); hdev->le_def_tx_time = le16_to_cpu(rp->tx_time); return rp->status; } static u8 hci_cc_le_write_def_data_len(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_cp_le_write_def_data_len *sent; struct hci_ev_status *rp = data; bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); if (rp->status) return rp->status; sent = hci_sent_cmd_data(hdev, HCI_OP_LE_WRITE_DEF_DATA_LEN); if (!sent) return rp->status; hdev->le_def_tx_len = le16_to_cpu(sent->tx_len); hdev->le_def_tx_time = le16_to_cpu(sent->tx_time); return rp->status; } static u8 hci_cc_le_add_to_resolv_list(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_cp_le_add_to_resolv_list *sent; struct hci_ev_status *rp = data; bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); if (rp->status) return rp->status; sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_RESOLV_LIST); if (!sent) return rp->status; hci_dev_lock(hdev); hci_bdaddr_list_add_with_irk(&hdev->le_resolv_list, &sent->bdaddr, sent->bdaddr_type, sent->peer_irk, sent->local_irk); hci_dev_unlock(hdev); return rp->status; } static u8 hci_cc_le_del_from_resolv_list(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_cp_le_del_from_resolv_list *sent; struct hci_ev_status *rp = data; bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); if (rp->status) return rp->status; sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_RESOLV_LIST); if (!sent) return rp->status; hci_dev_lock(hdev); hci_bdaddr_list_del_with_irk(&hdev->le_resolv_list, &sent->bdaddr, sent->bdaddr_type); hci_dev_unlock(hdev); return rp->status; } static u8 hci_cc_le_clear_resolv_list(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_ev_status *rp = data; bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); if (rp->status) return rp->status; hci_dev_lock(hdev); hci_bdaddr_list_clear(&hdev->le_resolv_list); hci_dev_unlock(hdev); return rp->status; } static u8 hci_cc_le_read_resolv_list_size(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_rp_le_read_resolv_list_size *rp = data; bt_dev_dbg(hdev, "status 0x%2.2x size %u", rp->status, rp->size); if (rp->status) return rp->status; hdev->le_resolv_list_size = rp->size; return rp->status; } static u8 hci_cc_le_set_addr_resolution_enable(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_ev_status *rp = data; __u8 *sent; bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); if (rp->status) return rp->status; sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE); if (!sent) return rp->status; hci_dev_lock(hdev); if (*sent) hci_dev_set_flag(hdev, HCI_LL_RPA_RESOLUTION); else hci_dev_clear_flag(hdev, HCI_LL_RPA_RESOLUTION); hci_dev_unlock(hdev); return rp->status; } static u8 hci_cc_le_read_max_data_len(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_rp_le_read_max_data_len *rp = data; bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); if (rp->status) return rp->status; hdev->le_max_tx_len = le16_to_cpu(rp->tx_len); hdev->le_max_tx_time = le16_to_cpu(rp->tx_time); hdev->le_max_rx_len = le16_to_cpu(rp->rx_len); hdev->le_max_rx_time = le16_to_cpu(rp->rx_time); return rp->status; } static u8 hci_cc_write_le_host_supported(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_cp_write_le_host_supported *sent; struct hci_ev_status *rp = data; bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); if (rp->status) return rp->status; sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED); if (!sent) return rp->status; hci_dev_lock(hdev); if (sent->le) { hdev->features[1][0] |= LMP_HOST_LE; hci_dev_set_flag(hdev, HCI_LE_ENABLED); } else { hdev->features[1][0] &= ~LMP_HOST_LE; hci_dev_clear_flag(hdev, HCI_LE_ENABLED); hci_dev_clear_flag(hdev, HCI_ADVERTISING); } if (sent->simul) hdev->features[1][0] |= LMP_HOST_LE_BREDR; else hdev->features[1][0] &= ~LMP_HOST_LE_BREDR; hci_dev_unlock(hdev); return rp->status; } static u8 hci_cc_set_adv_param(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_cp_le_set_adv_param *cp; struct hci_ev_status *rp = data; bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); if (rp->status) return rp->status; cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_PARAM); if (!cp) return rp->status; hci_dev_lock(hdev); hdev->adv_addr_type = cp->own_address_type; hci_dev_unlock(hdev); return rp->status; } static u8 hci_cc_set_ext_adv_param(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_rp_le_set_ext_adv_params *rp = data; struct hci_cp_le_set_ext_adv_params *cp; struct adv_info *adv_instance; bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); if (rp->status) return rp->status; cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_ADV_PARAMS); if (!cp) return rp->status; hci_dev_lock(hdev); hdev->adv_addr_type = cp->own_addr_type; if (!cp->handle) { /* Store in hdev for instance 0 */ hdev->adv_tx_power = rp->tx_power; } else { adv_instance = hci_find_adv_instance(hdev, cp->handle); if (adv_instance) adv_instance->tx_power = rp->tx_power; } /* Update adv data as tx power is known now */ hci_update_adv_data(hdev, cp->handle); hci_dev_unlock(hdev); return rp->status; } static u8 hci_cc_read_rssi(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_rp_read_rssi *rp = data; struct hci_conn *conn; bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); if (rp->status) return rp->status; hci_dev_lock(hdev); conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle)); if (conn) conn->rssi = rp->rssi; hci_dev_unlock(hdev); return rp->status; } static u8 hci_cc_read_tx_power(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_cp_read_tx_power *sent; struct hci_rp_read_tx_power *rp = data; struct hci_conn *conn; bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); if (rp->status) return rp->status; sent = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER); if (!sent) return rp->status; hci_dev_lock(hdev); conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle)); if (!conn) goto unlock; switch (sent->type) { case 0x00: conn->tx_power = rp->tx_power; break; case 0x01: conn->max_tx_power = rp->tx_power; break; } unlock: hci_dev_unlock(hdev); return rp->status; } static u8 hci_cc_write_ssp_debug_mode(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_ev_status *rp = data; u8 *mode; bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); if (rp->status) return rp->status; mode = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE); if (mode) hdev->ssp_debug_mode = *mode; return rp->status; } static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status) { bt_dev_dbg(hdev, "status 0x%2.2x", status); if (status) { hci_conn_check_pending(hdev); return; } set_bit(HCI_INQUIRY, &hdev->flags); } static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status) { struct hci_cp_create_conn *cp; struct hci_conn *conn; bt_dev_dbg(hdev, "status 0x%2.2x", status); cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN); if (!cp) return; hci_dev_lock(hdev); conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr); bt_dev_dbg(hdev, "bdaddr %pMR hcon %p", &cp->bdaddr, conn); if (status) { if (conn && conn->state == BT_CONNECT) { if (status != 0x0c || conn->attempt > 2) { conn->state = BT_CLOSED; hci_connect_cfm(conn, status); hci_conn_del(conn); } else conn->state = BT_CONNECT2; } } else { if (!conn) { conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr, HCI_ROLE_MASTER); if (!conn) bt_dev_err(hdev, "no memory for new connection"); } } hci_dev_unlock(hdev); } static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status) { struct hci_cp_add_sco *cp; struct hci_conn *acl; struct hci_link *link; __u16 handle; bt_dev_dbg(hdev, "status 0x%2.2x", status); if (!status) return; cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO); if (!cp) return; handle = __le16_to_cpu(cp->handle); bt_dev_dbg(hdev, "handle 0x%4.4x", handle); hci_dev_lock(hdev); acl = hci_conn_hash_lookup_handle(hdev, handle); if (acl) { link = list_first_entry_or_null(&acl->link_list, struct hci_link, list); if (link && link->conn) { link->conn->state = BT_CLOSED; hci_connect_cfm(link->conn, status); hci_conn_del(link->conn); } } hci_dev_unlock(hdev); } static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status) { struct hci_cp_auth_requested *cp; struct hci_conn *conn; bt_dev_dbg(hdev, "status 0x%2.2x", status); if (!status) return; cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED); if (!cp) return; hci_dev_lock(hdev); conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); if (conn) { if (conn->state == BT_CONFIG) { hci_connect_cfm(conn, status); hci_conn_drop(conn); } } hci_dev_unlock(hdev); } static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status) { struct hci_cp_set_conn_encrypt *cp; struct hci_conn *conn; bt_dev_dbg(hdev, "status 0x%2.2x", status); if (!status) return; cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT); if (!cp) return; hci_dev_lock(hdev); conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); if (conn) { if (conn->state == BT_CONFIG) { hci_connect_cfm(conn, status); hci_conn_drop(conn); } } hci_dev_unlock(hdev); } static int hci_outgoing_auth_needed(struct hci_dev *hdev, struct hci_conn *conn) { if (conn->state != BT_CONFIG || !conn->out) return 0; if (conn->pending_sec_level == BT_SECURITY_SDP) return 0; /* Only request authentication for SSP connections or non-SSP * devices with sec_level MEDIUM or HIGH or if MITM protection * is requested. */ if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) && conn->pending_sec_level != BT_SECURITY_FIPS && conn->pending_sec_level != BT_SECURITY_HIGH && conn->pending_sec_level != BT_SECURITY_MEDIUM) return 0; return 1; } static int hci_resolve_name(struct hci_dev *hdev, struct inquiry_entry *e) { struct hci_cp_remote_name_req cp; memset(&cp, 0, sizeof(cp)); bacpy(&cp.bdaddr, &e->data.bdaddr); cp.pscan_rep_mode = e->data.pscan_rep_mode; cp.pscan_mode = e->data.pscan_mode; cp.clock_offset = e->data.clock_offset; return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp); } static bool hci_resolve_next_name(struct hci_dev *hdev) { struct discovery_state *discov = &hdev->discovery; struct inquiry_entry *e; if (list_empty(&discov->resolve)) return false; /* We should stop if we already spent too much time resolving names. */ if (time_after(jiffies, discov->name_resolve_timeout)) { bt_dev_warn_ratelimited(hdev, "Name resolve takes too long."); return false; } e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED); if (!e) return false; if (hci_resolve_name(hdev, e) == 0) { e->name_state = NAME_PENDING; return true; } return false; } static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn, bdaddr_t *bdaddr, u8 *name, u8 name_len) { struct discovery_state *discov = &hdev->discovery; struct inquiry_entry *e; /* Update the mgmt connected state if necessary. Be careful with * conn objects that exist but are not (yet) connected however. * Only those in BT_CONFIG or BT_CONNECTED states can be * considered connected. */ if (conn && (conn->state == BT_CONFIG || conn->state == BT_CONNECTED) && !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) mgmt_device_connected(hdev, conn, name, name_len); if (discov->state == DISCOVERY_STOPPED) return; if (discov->state == DISCOVERY_STOPPING) goto discov_complete; if (discov->state != DISCOVERY_RESOLVING) return; e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING); /* If the device was not found in a list of found devices names of which * are pending. there is no need to continue resolving a next name as it * will be done upon receiving another Remote Name Request Complete * Event */ if (!e) return; list_del(&e->list); e->name_state = name ? NAME_KNOWN : NAME_NOT_KNOWN; mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00, e->data.rssi, name, name_len); if (hci_resolve_next_name(hdev)) return; discov_complete: hci_discovery_set_state(hdev, DISCOVERY_STOPPED); } static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status) { struct hci_cp_remote_name_req *cp; struct hci_conn *conn; bt_dev_dbg(hdev, "status 0x%2.2x", status); /* If successful wait for the name req complete event before * checking for the need to do authentication */ if (!status) return; cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ); if (!cp) return; hci_dev_lock(hdev); conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr); if (hci_dev_test_flag(hdev, HCI_MGMT)) hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0); if (!conn) goto unlock; if (!hci_outgoing_auth_needed(hdev, conn)) goto unlock; if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) { struct hci_cp_auth_requested auth_cp; set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags); auth_cp.handle = __cpu_to_le16(conn->handle); hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(auth_cp), &auth_cp); } unlock: hci_dev_unlock(hdev); } static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status) { struct hci_cp_read_remote_features *cp; struct hci_conn *conn; bt_dev_dbg(hdev, "status 0x%2.2x", status); if (!status) return; cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES); if (!cp) return; hci_dev_lock(hdev); conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); if (conn) { if (conn->state == BT_CONFIG) { hci_connect_cfm(conn, status); hci_conn_drop(conn); } } hci_dev_unlock(hdev); } static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status) { struct hci_cp_read_remote_ext_features *cp; struct hci_conn *conn; bt_dev_dbg(hdev, "status 0x%2.2x", status); if (!status) return; cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES); if (!cp) return; hci_dev_lock(hdev); conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); if (conn) { if (conn->state == BT_CONFIG) { hci_connect_cfm(conn, status); hci_conn_drop(conn); } } hci_dev_unlock(hdev); } static void hci_setup_sync_conn_status(struct hci_dev *hdev, __u16 handle, __u8 status) { struct hci_conn *acl; struct hci_link *link; bt_dev_dbg(hdev, "handle 0x%4.4x status 0x%2.2x", handle, status); hci_dev_lock(hdev); acl = hci_conn_hash_lookup_handle(hdev, handle); if (acl) { link = list_first_entry_or_null(&acl->link_list, struct hci_link, list); if (link && link->conn) { link->conn->state = BT_CLOSED; hci_connect_cfm(link->conn, status); hci_conn_del(link->conn); } } hci_dev_unlock(hdev); } static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status) { struct hci_cp_setup_sync_conn *cp; bt_dev_dbg(hdev, "status 0x%2.2x", status); if (!status) return; cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN); if (!cp) return; hci_setup_sync_conn_status(hdev, __le16_to_cpu(cp->handle), status); } static void hci_cs_enhanced_setup_sync_conn(struct hci_dev *hdev, __u8 status) { struct hci_cp_enhanced_setup_sync_conn *cp; bt_dev_dbg(hdev, "status 0x%2.2x", status); if (!status) return; cp = hci_sent_cmd_data(hdev, HCI_OP_ENHANCED_SETUP_SYNC_CONN); if (!cp) return; hci_setup_sync_conn_status(hdev, __le16_to_cpu(cp->handle), status); } static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status) { struct hci_cp_sniff_mode *cp; struct hci_conn *conn; bt_dev_dbg(hdev, "status 0x%2.2x", status); if (!status) return; cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE); if (!cp) return; hci_dev_lock(hdev); conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); if (conn) { clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags); if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags)) hci_sco_setup(conn, status); } hci_dev_unlock(hdev); } static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status) { struct hci_cp_exit_sniff_mode *cp; struct hci_conn *conn; bt_dev_dbg(hdev, "status 0x%2.2x", status); if (!status) return; cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE); if (!cp) return; hci_dev_lock(hdev); conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); if (conn) { clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags); if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags)) hci_sco_setup(conn, status); } hci_dev_unlock(hdev); } static void hci_cs_disconnect(struct hci_dev *hdev, u8 status) { struct hci_cp_disconnect *cp; struct hci_conn_params *params; struct hci_conn *conn; bool mgmt_conn; bt_dev_dbg(hdev, "status 0x%2.2x", status); /* Wait for HCI_EV_DISCONN_COMPLETE if status 0x00 and not suspended * otherwise cleanup the connection immediately. */ if (!status && !hdev->suspended) return; cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT); if (!cp) return; hci_dev_lock(hdev); conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); if (!conn) goto unlock; if (status) { mgmt_disconnect_failed(hdev, &conn->dst, conn->type, conn->dst_type, status); if (conn->type == LE_LINK && conn->role == HCI_ROLE_SLAVE) { hdev->cur_adv_instance = conn->adv_instance; hci_enable_advertising(hdev); } /* Inform sockets conn is gone before we delete it */ hci_disconn_cfm(conn, HCI_ERROR_UNSPECIFIED); goto done; } mgmt_conn = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags); if (conn->type == ACL_LINK) { if (test_and_clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags)) hci_remove_link_key(hdev, &conn->dst); } params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type); if (params) { switch (params->auto_connect) { case HCI_AUTO_CONN_LINK_LOSS: if (cp->reason != HCI_ERROR_CONNECTION_TIMEOUT) break; fallthrough; case HCI_AUTO_CONN_DIRECT: case HCI_AUTO_CONN_ALWAYS: hci_pend_le_list_del_init(params); hci_pend_le_list_add(params, &hdev->pend_le_conns); break; default: break; } } mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type, cp->reason, mgmt_conn); hci_disconn_cfm(conn, cp->reason); done: /* If the disconnection failed for any reason, the upper layer * does not retry to disconnect in current implementation. * Hence, we need to do some basic cleanup here and re-enable * advertising if necessary. */ hci_conn_del(conn); unlock: hci_dev_unlock(hdev); } static u8 ev_bdaddr_type(struct hci_dev *hdev, u8 type, bool *resolved) { /* When using controller based address resolution, then the new * address types 0x02 and 0x03 are used. These types need to be * converted back into either public address or random address type */ switch (type) { case ADDR_LE_DEV_PUBLIC_RESOLVED: if (resolved) *resolved = true; return ADDR_LE_DEV_PUBLIC; case ADDR_LE_DEV_RANDOM_RESOLVED: if (resolved) *resolved = true; return ADDR_LE_DEV_RANDOM; } if (resolved) *resolved = false; return type; } static void cs_le_create_conn(struct hci_dev *hdev, bdaddr_t *peer_addr, u8 peer_addr_type, u8 own_address_type, u8 filter_policy) { struct hci_conn *conn; conn = hci_conn_hash_lookup_le(hdev, peer_addr, peer_addr_type); if (!conn) return; own_address_type = ev_bdaddr_type(hdev, own_address_type, NULL); /* Store the initiator and responder address information which * is needed for SMP. These values will not change during the * lifetime of the connection. */ conn->init_addr_type = own_address_type; if (own_address_type == ADDR_LE_DEV_RANDOM) bacpy(&conn->init_addr, &hdev->random_addr); else bacpy(&conn->init_addr, &hdev->bdaddr); conn->resp_addr_type = peer_addr_type; bacpy(&conn->resp_addr, peer_addr); } static void hci_cs_le_create_conn(struct hci_dev *hdev, u8 status) { struct hci_cp_le_create_conn *cp; bt_dev_dbg(hdev, "status 0x%2.2x", status); /* All connection failure handling is taken care of by the * hci_conn_failed function which is triggered by the HCI * request completion callbacks used for connecting. */ if (status) return; cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN); if (!cp) return; hci_dev_lock(hdev); cs_le_create_conn(hdev, &cp->peer_addr, cp->peer_addr_type, cp->own_address_type, cp->filter_policy); hci_dev_unlock(hdev); } static void hci_cs_le_ext_create_conn(struct hci_dev *hdev, u8 status) { struct hci_cp_le_ext_create_conn *cp; bt_dev_dbg(hdev, "status 0x%2.2x", status); /* All connection failure handling is taken care of by the * hci_conn_failed function which is triggered by the HCI * request completion callbacks used for connecting. */ if (status) return; cp = hci_sent_cmd_data(hdev, HCI_OP_LE_EXT_CREATE_CONN); if (!cp) return; hci_dev_lock(hdev); cs_le_create_conn(hdev, &cp->peer_addr, cp->peer_addr_type, cp->own_addr_type, cp->filter_policy); hci_dev_unlock(hdev); } static void hci_cs_le_read_remote_features(struct hci_dev *hdev, u8 status) { struct hci_cp_le_read_remote_features *cp; struct hci_conn *conn; bt_dev_dbg(hdev, "status 0x%2.2x", status); if (!status) return; cp = hci_sent_cmd_data(hdev, HCI_OP_LE_READ_REMOTE_FEATURES); if (!cp) return; hci_dev_lock(hdev); conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); if (conn) { if (conn->state == BT_CONFIG) { hci_connect_cfm(conn, status); hci_conn_drop(conn); } } hci_dev_unlock(hdev); } static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status) { struct hci_cp_le_start_enc *cp; struct hci_conn *conn; bt_dev_dbg(hdev, "status 0x%2.2x", status); if (!status) return; hci_dev_lock(hdev); cp = hci_sent_cmd_data(hdev, HCI_OP_LE_START_ENC); if (!cp) goto unlock; conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); if (!conn) goto unlock; if (conn->state != BT_CONNECTED) goto unlock; hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE); hci_conn_drop(conn); unlock: hci_dev_unlock(hdev); } static void hci_cs_switch_role(struct hci_dev *hdev, u8 status) { struct hci_cp_switch_role *cp; struct hci_conn *conn; BT_DBG("%s status 0x%2.2x", hdev->name, status); if (!status) return; cp = hci_sent_cmd_data(hdev, HCI_OP_SWITCH_ROLE); if (!cp) return; hci_dev_lock(hdev); conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr); if (conn) clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags); hci_dev_unlock(hdev); } static void hci_inquiry_complete_evt(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_ev_status *ev = data; struct discovery_state *discov = &hdev->discovery; struct inquiry_entry *e; bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); hci_conn_check_pending(hdev); if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags)) return; smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */ wake_up_bit(&hdev->flags, HCI_INQUIRY); if (!hci_dev_test_flag(hdev, HCI_MGMT)) return; hci_dev_lock(hdev); if (discov->state != DISCOVERY_FINDING) goto unlock; if (list_empty(&discov->resolve)) { /* When BR/EDR inquiry is active and no LE scanning is in * progress, then change discovery state to indicate completion. * * When running LE scanning and BR/EDR inquiry simultaneously * and the LE scan already finished, then change the discovery * state to indicate completion. */ if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) || !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) hci_discovery_set_state(hdev, DISCOVERY_STOPPED); goto unlock; } e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED); if (e && hci_resolve_name(hdev, e) == 0) { e->name_state = NAME_PENDING; hci_discovery_set_state(hdev, DISCOVERY_RESOLVING); discov->name_resolve_timeout = jiffies + NAME_RESOLVE_DURATION; } else { /* When BR/EDR inquiry is active and no LE scanning is in * progress, then change discovery state to indicate completion. * * When running LE scanning and BR/EDR inquiry simultaneously * and the LE scan already finished, then change the discovery * state to indicate completion. */ if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) || !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) hci_discovery_set_state(hdev, DISCOVERY_STOPPED); } unlock: hci_dev_unlock(hdev); } static void hci_inquiry_result_evt(struct hci_dev *hdev, void *edata, struct sk_buff *skb) { struct hci_ev_inquiry_result *ev = edata; struct inquiry_data data; int i; if (!hci_ev_skb_pull(hdev, skb, HCI_EV_INQUIRY_RESULT, flex_array_size(ev, info, ev->num))) return; bt_dev_dbg(hdev, "num %d", ev->num); if (!ev->num) return; if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) return; hci_dev_lock(hdev); for (i = 0; i < ev->num; i++) { struct inquiry_info *info = &ev->info[i]; u32 flags; bacpy(&data.bdaddr, &info->bdaddr); data.pscan_rep_mode = info->pscan_rep_mode; data.pscan_period_mode = info->pscan_period_mode; data.pscan_mode = info->pscan_mode; memcpy(data.dev_class, info->dev_class, 3); data.clock_offset = info->clock_offset; data.rssi = HCI_RSSI_INVALID; data.ssp_mode = 0x00; flags = hci_inquiry_cache_update(hdev, &data, false); mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00, info->dev_class, HCI_RSSI_INVALID, flags, NULL, 0, NULL, 0, 0); } hci_dev_unlock(hdev); } static void hci_conn_complete_evt(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_ev_conn_complete *ev = data; struct hci_conn *conn; u8 status = ev->status; bt_dev_dbg(hdev, "status 0x%2.2x", status); hci_dev_lock(hdev); conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr); if (!conn) { /* In case of error status and there is no connection pending * just unlock as there is nothing to cleanup. */ if (ev->status) goto unlock; /* Connection may not exist if auto-connected. Check the bredr * allowlist to see if this device is allowed to auto connect. * If link is an ACL type, create a connection class * automatically. * * Auto-connect will only occur if the event filter is * programmed with a given address. Right now, event filter is * only used during suspend. */ if (ev->link_type == ACL_LINK && hci_bdaddr_list_lookup_with_flags(&hdev->accept_list, &ev->bdaddr, BDADDR_BREDR)) { conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr, HCI_ROLE_SLAVE); if (!conn) { bt_dev_err(hdev, "no memory for new conn"); goto unlock; } } else { if (ev->link_type != SCO_LINK) goto unlock; conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr); if (!conn) goto unlock; conn->type = SCO_LINK; } } /* The HCI_Connection_Complete event is only sent once per connection. * Processing it more than once per connection can corrupt kernel memory. * * As the connection handle is set here for the first time, it indicates * whether the connection is already set up. */ if (!HCI_CONN_HANDLE_UNSET(conn->handle)) { bt_dev_err(hdev, "Ignoring HCI_Connection_Complete for existing connection"); goto unlock; } if (!status) { status = hci_conn_set_handle(conn, __le16_to_cpu(ev->handle)); if (status) goto done; if (conn->type == ACL_LINK) { conn->state = BT_CONFIG; hci_conn_hold(conn); if (!conn->out && !hci_conn_ssp_enabled(conn) && !hci_find_link_key(hdev, &ev->bdaddr)) conn->disc_timeout = HCI_PAIRING_TIMEOUT; else conn->disc_timeout = HCI_DISCONN_TIMEOUT; } else conn->state = BT_CONNECTED; hci_debugfs_create_conn(conn); hci_conn_add_sysfs(conn); if (test_bit(HCI_AUTH, &hdev->flags)) set_bit(HCI_CONN_AUTH, &conn->flags); if (test_bit(HCI_ENCRYPT, &hdev->flags)) set_bit(HCI_CONN_ENCRYPT, &conn->flags); /* Get remote features */ if (conn->type == ACL_LINK) { struct hci_cp_read_remote_features cp; cp.handle = ev->handle; hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES, sizeof(cp), &cp); hci_update_scan(hdev); } /* Set packet type for incoming connection */ if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) { struct hci_cp_change_conn_ptype cp; cp.handle = ev->handle; cp.pkt_type = cpu_to_le16(conn->pkt_type); hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp), &cp); } } if (conn->type == ACL_LINK) hci_sco_setup(conn, ev->status); done: if (status) { hci_conn_failed(conn, status); } else if (ev->link_type == SCO_LINK) { switch (conn->setting & SCO_AIRMODE_MASK) { case SCO_AIRMODE_CVSD: if (hdev->notify) hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_CVSD); break; } hci_connect_cfm(conn, status); } unlock: hci_dev_unlock(hdev); hci_conn_check_pending(hdev); } static void hci_reject_conn(struct hci_dev *hdev, bdaddr_t *bdaddr) { struct hci_cp_reject_conn_req cp; bacpy(&cp.bdaddr, bdaddr); cp.reason = HCI_ERROR_REJ_BAD_ADDR; hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp); } static void hci_conn_request_evt(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_ev_conn_request *ev = data; int mask = hdev->link_mode; struct inquiry_entry *ie; struct hci_conn *conn; __u8 flags = 0; bt_dev_dbg(hdev, "bdaddr %pMR type 0x%x", &ev->bdaddr, ev->link_type); mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type, &flags); if (!(mask & HCI_LM_ACCEPT)) { hci_reject_conn(hdev, &ev->bdaddr); return; } hci_dev_lock(hdev); if (hci_bdaddr_list_lookup(&hdev->reject_list, &ev->bdaddr, BDADDR_BREDR)) { hci_reject_conn(hdev, &ev->bdaddr); goto unlock; } /* Require HCI_CONNECTABLE or an accept list entry to accept the * connection. These features are only touched through mgmt so * only do the checks if HCI_MGMT is set. */ if (hci_dev_test_flag(hdev, HCI_MGMT) && !hci_dev_test_flag(hdev, HCI_CONNECTABLE) && !hci_bdaddr_list_lookup_with_flags(&hdev->accept_list, &ev->bdaddr, BDADDR_BREDR)) { hci_reject_conn(hdev, &ev->bdaddr); goto unlock; } /* Connection accepted */ ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr); if (ie) memcpy(ie->data.dev_class, ev->dev_class, 3); conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr); if (!conn) { conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr, HCI_ROLE_SLAVE); if (!conn) { bt_dev_err(hdev, "no memory for new connection"); goto unlock; } } memcpy(conn->dev_class, ev->dev_class, 3); hci_dev_unlock(hdev); if (ev->link_type == ACL_LINK || (!(flags & HCI_PROTO_DEFER) && !lmp_esco_capable(hdev))) { struct hci_cp_accept_conn_req cp; conn->state = BT_CONNECT; bacpy(&cp.bdaddr, &ev->bdaddr); if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER)) cp.role = 0x00; /* Become central */ else cp.role = 0x01; /* Remain peripheral */ hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp), &cp); } else if (!(flags & HCI_PROTO_DEFER)) { struct hci_cp_accept_sync_conn_req cp; conn->state = BT_CONNECT; bacpy(&cp.bdaddr, &ev->bdaddr); cp.pkt_type = cpu_to_le16(conn->pkt_type); cp.tx_bandwidth = cpu_to_le32(0x00001f40); cp.rx_bandwidth = cpu_to_le32(0x00001f40); cp.max_latency = cpu_to_le16(0xffff); cp.content_format = cpu_to_le16(hdev->voice_setting); cp.retrans_effort = 0xff; hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ, sizeof(cp), &cp); } else { conn->state = BT_CONNECT2; hci_connect_cfm(conn, 0); } return; unlock: hci_dev_unlock(hdev); } static u8 hci_to_mgmt_reason(u8 err) { switch (err) { case HCI_ERROR_CONNECTION_TIMEOUT: return MGMT_DEV_DISCONN_TIMEOUT; case HCI_ERROR_REMOTE_USER_TERM: case HCI_ERROR_REMOTE_LOW_RESOURCES: case HCI_ERROR_REMOTE_POWER_OFF: return MGMT_DEV_DISCONN_REMOTE; case HCI_ERROR_LOCAL_HOST_TERM: return MGMT_DEV_DISCONN_LOCAL_HOST; default: return MGMT_DEV_DISCONN_UNKNOWN; } } static void hci_disconn_complete_evt(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_ev_disconn_complete *ev = data; u8 reason; struct hci_conn_params *params; struct hci_conn *conn; bool mgmt_connected; bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); hci_dev_lock(hdev); conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); if (!conn) goto unlock; if (ev->status) { mgmt_disconnect_failed(hdev, &conn->dst, conn->type, conn->dst_type, ev->status); goto unlock; } conn->state = BT_CLOSED; mgmt_connected = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags); if (test_bit(HCI_CONN_AUTH_FAILURE, &conn->flags)) reason = MGMT_DEV_DISCONN_AUTH_FAILURE; else reason = hci_to_mgmt_reason(ev->reason); mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type, reason, mgmt_connected); if (conn->type == ACL_LINK) { if (test_and_clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags)) hci_remove_link_key(hdev, &conn->dst); hci_update_scan(hdev); } params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type); if (params) { switch (params->auto_connect) { case HCI_AUTO_CONN_LINK_LOSS: if (ev->reason != HCI_ERROR_CONNECTION_TIMEOUT) break; fallthrough; case HCI_AUTO_CONN_DIRECT: case HCI_AUTO_CONN_ALWAYS: hci_pend_le_list_del_init(params); hci_pend_le_list_add(params, &hdev->pend_le_conns); hci_update_passive_scan(hdev); break; default: break; } } hci_disconn_cfm(conn, ev->reason); /* Re-enable advertising if necessary, since it might * have been disabled by the connection. From the * HCI_LE_Set_Advertise_Enable command description in * the core specification (v4.0): * "The Controller shall continue advertising until the Host * issues an LE_Set_Advertise_Enable command with * Advertising_Enable set to 0x00 (Advertising is disabled) * or until a connection is created or until the Advertising * is timed out due to Directed Advertising." */ if (conn->type == LE_LINK && conn->role == HCI_ROLE_SLAVE) { hdev->cur_adv_instance = conn->adv_instance; hci_enable_advertising(hdev); } hci_conn_del(conn); unlock: hci_dev_unlock(hdev); } static void hci_auth_complete_evt(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_ev_auth_complete *ev = data; struct hci_conn *conn; bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); hci_dev_lock(hdev); conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); if (!conn) goto unlock; if (!ev->status) { clear_bit(HCI_CONN_AUTH_FAILURE, &conn->flags); if (!hci_conn_ssp_enabled(conn) && test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) { bt_dev_info(hdev, "re-auth of legacy device is not possible."); } else { set_bit(HCI_CONN_AUTH, &conn->flags); conn->sec_level = conn->pending_sec_level; } } else { if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING) set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags); mgmt_auth_failed(conn, ev->status); } clear_bit(HCI_CONN_AUTH_PEND, &conn->flags); clear_bit(HCI_CONN_REAUTH_PEND, &conn->flags); if (conn->state == BT_CONFIG) { if (!ev->status && hci_conn_ssp_enabled(conn)) { struct hci_cp_set_conn_encrypt cp; cp.handle = ev->handle; cp.encrypt = 0x01; hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp), &cp); } else { conn->state = BT_CONNECTED; hci_connect_cfm(conn, ev->status); hci_conn_drop(conn); } } else { hci_auth_cfm(conn, ev->status); hci_conn_hold(conn); conn->disc_timeout = HCI_DISCONN_TIMEOUT; hci_conn_drop(conn); } if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) { if (!ev->status) { struct hci_cp_set_conn_encrypt cp; cp.handle = ev->handle; cp.encrypt = 0x01; hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp), &cp); } else { clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags); hci_encrypt_cfm(conn, ev->status); } } unlock: hci_dev_unlock(hdev); } static void hci_remote_name_evt(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_ev_remote_name *ev = data; struct hci_conn *conn; bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); hci_conn_check_pending(hdev); hci_dev_lock(hdev); conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); if (!hci_dev_test_flag(hdev, HCI_MGMT)) goto check_auth; if (ev->status == 0) hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name, strnlen(ev->name, HCI_MAX_NAME_LENGTH)); else hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0); check_auth: if (!conn) goto unlock; if (!hci_outgoing_auth_needed(hdev, conn)) goto unlock; if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) { struct hci_cp_auth_requested cp; set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags); cp.handle = __cpu_to_le16(conn->handle); hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp); } unlock: hci_dev_unlock(hdev); } static void hci_encrypt_change_evt(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_ev_encrypt_change *ev = data; struct hci_conn *conn; bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); hci_dev_lock(hdev); conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); if (!conn) goto unlock; if (!ev->status) { if (ev->encrypt) { /* Encryption implies authentication */ set_bit(HCI_CONN_AUTH, &conn->flags); set_bit(HCI_CONN_ENCRYPT, &conn->flags); conn->sec_level = conn->pending_sec_level; /* P-256 authentication key implies FIPS */ if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256) set_bit(HCI_CONN_FIPS, &conn->flags); if ((conn->type == ACL_LINK && ev->encrypt == 0x02) || conn->type == LE_LINK) set_bit(HCI_CONN_AES_CCM, &conn->flags); } else { clear_bit(HCI_CONN_ENCRYPT, &conn->flags); clear_bit(HCI_CONN_AES_CCM, &conn->flags); } } /* We should disregard the current RPA and generate a new one * whenever the encryption procedure fails. */ if (ev->status && conn->type == LE_LINK) { hci_dev_set_flag(hdev, HCI_RPA_EXPIRED); hci_adv_instances_set_rpa_expired(hdev, true); } clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags); /* Check link security requirements are met */ if (!hci_conn_check_link_mode(conn)) ev->status = HCI_ERROR_AUTH_FAILURE; if (ev->status && conn->state == BT_CONNECTED) { if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING) set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags); /* Notify upper layers so they can cleanup before * disconnecting. */ hci_encrypt_cfm(conn, ev->status); hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE); hci_conn_drop(conn); goto unlock; } /* Try reading the encryption key size for encrypted ACL links */ if (!ev->status && ev->encrypt && conn->type == ACL_LINK) { struct hci_cp_read_enc_key_size cp; /* Only send HCI_Read_Encryption_Key_Size if the * controller really supports it. If it doesn't, assume * the default size (16). */ if (!(hdev->commands[20] & 0x10)) { conn->enc_key_size = HCI_LINK_KEY_SIZE; goto notify; } cp.handle = cpu_to_le16(conn->handle); if (hci_send_cmd(hdev, HCI_OP_READ_ENC_KEY_SIZE, sizeof(cp), &cp)) { bt_dev_err(hdev, "sending read key size failed"); conn->enc_key_size = HCI_LINK_KEY_SIZE; goto notify; } goto unlock; } /* Set the default Authenticated Payload Timeout after * an LE Link is established. As per Core Spec v5.0, Vol 2, Part B * Section 3.3, the HCI command WRITE_AUTH_PAYLOAD_TIMEOUT should be * sent when the link is active and Encryption is enabled, the conn * type can be either LE or ACL and controller must support LMP Ping. * Ensure for AES-CCM encryption as well. */ if (test_bit(HCI_CONN_ENCRYPT, &conn->flags) && test_bit(HCI_CONN_AES_CCM, &conn->flags) && ((conn->type == ACL_LINK && lmp_ping_capable(hdev)) || (conn->type == LE_LINK && (hdev->le_features[0] & HCI_LE_PING)))) { struct hci_cp_write_auth_payload_to cp; cp.handle = cpu_to_le16(conn->handle); cp.timeout = cpu_to_le16(hdev->auth_payload_timeout); if (hci_send_cmd(conn->hdev, HCI_OP_WRITE_AUTH_PAYLOAD_TO, sizeof(cp), &cp)) { bt_dev_err(hdev, "write auth payload timeout failed"); goto notify; } goto unlock; } notify: hci_encrypt_cfm(conn, ev->status); unlock: hci_dev_unlock(hdev); } static void hci_change_link_key_complete_evt(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_ev_change_link_key_complete *ev = data; struct hci_conn *conn; bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); hci_dev_lock(hdev); conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); if (conn) { if (!ev->status) set_bit(HCI_CONN_SECURE, &conn->flags); clear_bit(HCI_CONN_AUTH_PEND, &conn->flags); hci_key_change_cfm(conn, ev->status); } hci_dev_unlock(hdev); } static void hci_remote_features_evt(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_ev_remote_features *ev = data; struct hci_conn *conn; bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); hci_dev_lock(hdev); conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); if (!conn) goto unlock; if (!ev->status) memcpy(conn->features[0], ev->features, 8); if (conn->state != BT_CONFIG) goto unlock; if (!ev->status && lmp_ext_feat_capable(hdev) && lmp_ext_feat_capable(conn)) { struct hci_cp_read_remote_ext_features cp; cp.handle = ev->handle; cp.page = 0x01; hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES, sizeof(cp), &cp); goto unlock; } if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) { struct hci_cp_remote_name_req cp; memset(&cp, 0, sizeof(cp)); bacpy(&cp.bdaddr, &conn->dst); cp.pscan_rep_mode = 0x02; hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp); } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) mgmt_device_connected(hdev, conn, NULL, 0); if (!hci_outgoing_auth_needed(hdev, conn)) { conn->state = BT_CONNECTED; hci_connect_cfm(conn, ev->status); hci_conn_drop(conn); } unlock: hci_dev_unlock(hdev); } static inline void handle_cmd_cnt_and_timer(struct hci_dev *hdev, u8 ncmd) { cancel_delayed_work(&hdev->cmd_timer); rcu_read_lock(); if (!test_bit(HCI_RESET, &hdev->flags)) { if (ncmd) { cancel_delayed_work(&hdev->ncmd_timer); atomic_set(&hdev->cmd_cnt, 1); } else { if (!hci_dev_test_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE)) queue_delayed_work(hdev->workqueue, &hdev->ncmd_timer, HCI_NCMD_TIMEOUT); } } rcu_read_unlock(); } static u8 hci_cc_le_read_buffer_size_v2(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_rp_le_read_buffer_size_v2 *rp = data; bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); if (rp->status) return rp->status; hdev->le_mtu = __le16_to_cpu(rp->acl_mtu); hdev->le_pkts = rp->acl_max_pkt; hdev->iso_mtu = __le16_to_cpu(rp->iso_mtu); hdev->iso_pkts = rp->iso_max_pkt; hdev->le_cnt = hdev->le_pkts; hdev->iso_cnt = hdev->iso_pkts; BT_DBG("%s acl mtu %d:%d iso mtu %d:%d", hdev->name, hdev->acl_mtu, hdev->acl_pkts, hdev->iso_mtu, hdev->iso_pkts); return rp->status; } static void hci_unbound_cis_failed(struct hci_dev *hdev, u8 cig, u8 status) { struct hci_conn *conn, *tmp; lockdep_assert_held(&hdev->lock); list_for_each_entry_safe(conn, tmp, &hdev->conn_hash.list, list) { if (conn->type != ISO_LINK || !bacmp(&conn->dst, BDADDR_ANY) || conn->state == BT_OPEN || conn->iso_qos.ucast.cig != cig) continue; if (HCI_CONN_HANDLE_UNSET(conn->handle)) hci_conn_failed(conn, status); } } static u8 hci_cc_le_set_cig_params(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_rp_le_set_cig_params *rp = data; struct hci_cp_le_set_cig_params *cp; struct hci_conn *conn; u8 status = rp->status; bool pending = false; int i; bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_CIG_PARAMS); if (!rp->status && (!cp || rp->num_handles != cp->num_cis || rp->cig_id != cp->cig_id)) { bt_dev_err(hdev, "unexpected Set CIG Parameters response data"); status = HCI_ERROR_UNSPECIFIED; } hci_dev_lock(hdev); /* BLUETOOTH CORE SPECIFICATION Version 5.4 | Vol 4, Part E page 2554 * * If the Status return parameter is non-zero, then the state of the CIG * and its CIS configurations shall not be changed by the command. If * the CIG did not already exist, it shall not be created. */ if (status) { /* Keep current configuration, fail only the unbound CIS */ hci_unbound_cis_failed(hdev, rp->cig_id, status); goto unlock; } /* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E page 2553 * * If the Status return parameter is zero, then the Controller shall * set the Connection_Handle arrayed return parameter to the connection * handle(s) corresponding to the CIS configurations specified in * the CIS_IDs command parameter, in the same order. */ for (i = 0; i < rp->num_handles; ++i) { conn = hci_conn_hash_lookup_cis(hdev, NULL, 0, rp->cig_id, cp->cis[i].cis_id); if (!conn || !bacmp(&conn->dst, BDADDR_ANY)) continue; if (conn->state != BT_BOUND && conn->state != BT_CONNECT) continue; if (hci_conn_set_handle(conn, __le16_to_cpu(rp->handle[i]))) continue; if (conn->state == BT_CONNECT) pending = true; } unlock: if (pending) hci_le_create_cis_pending(hdev); hci_dev_unlock(hdev); return rp->status; } static u8 hci_cc_le_setup_iso_path(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_rp_le_setup_iso_path *rp = data; struct hci_cp_le_setup_iso_path *cp; struct hci_conn *conn; bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SETUP_ISO_PATH); if (!cp) return rp->status; hci_dev_lock(hdev); conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); if (!conn) goto unlock; if (rp->status) { hci_connect_cfm(conn, rp->status); hci_conn_del(conn); goto unlock; } switch (cp->direction) { /* Input (Host to Controller) */ case 0x00: /* Only confirm connection if output only */ if (conn->iso_qos.ucast.out.sdu && !conn->iso_qos.ucast.in.sdu) hci_connect_cfm(conn, rp->status); break; /* Output (Controller to Host) */ case 0x01: /* Confirm connection since conn->iso_qos is always configured * last. */ hci_connect_cfm(conn, rp->status); break; } unlock: hci_dev_unlock(hdev); return rp->status; } static void hci_cs_le_create_big(struct hci_dev *hdev, u8 status) { bt_dev_dbg(hdev, "status 0x%2.2x", status); } static u8 hci_cc_set_per_adv_param(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_ev_status *rp = data; struct hci_cp_le_set_per_adv_params *cp; bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); if (rp->status) return rp->status; cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_PER_ADV_PARAMS); if (!cp) return rp->status; /* TODO: set the conn state */ return rp->status; } static u8 hci_cc_le_set_per_adv_enable(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_ev_status *rp = data; struct hci_cp_le_set_per_adv_enable *cp; struct adv_info *adv = NULL, *n; u8 per_adv_cnt = 0; bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); if (rp->status) return rp->status; cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_PER_ADV_ENABLE); if (!cp) return rp->status; hci_dev_lock(hdev); adv = hci_find_adv_instance(hdev, cp->handle); if (cp->enable) { hci_dev_set_flag(hdev, HCI_LE_PER_ADV); if (adv) adv->enabled = true; } else { /* If just one instance was disabled check if there are * any other instance enabled before clearing HCI_LE_PER_ADV. * The current periodic adv instance will be marked as * disabled once extended advertising is also disabled. */ list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) { if (adv->periodic && adv->enabled) per_adv_cnt++; } if (per_adv_cnt > 1) goto unlock; hci_dev_clear_flag(hdev, HCI_LE_PER_ADV); } unlock: hci_dev_unlock(hdev); return rp->status; } #define HCI_CC_VL(_op, _func, _min, _max) \ { \ .op = _op, \ .func = _func, \ .min_len = _min, \ .max_len = _max, \ } #define HCI_CC(_op, _func, _len) \ HCI_CC_VL(_op, _func, _len, _len) #define HCI_CC_STATUS(_op, _func) \ HCI_CC(_op, _func, sizeof(struct hci_ev_status)) static const struct hci_cc { u16 op; u8 (*func)(struct hci_dev *hdev, void *data, struct sk_buff *skb); u16 min_len; u16 max_len; } hci_cc_table[] = { HCI_CC_STATUS(HCI_OP_INQUIRY_CANCEL, hci_cc_inquiry_cancel), HCI_CC_STATUS(HCI_OP_PERIODIC_INQ, hci_cc_periodic_inq), HCI_CC_STATUS(HCI_OP_EXIT_PERIODIC_INQ, hci_cc_exit_periodic_inq), HCI_CC_STATUS(HCI_OP_REMOTE_NAME_REQ_CANCEL, hci_cc_remote_name_req_cancel), HCI_CC(HCI_OP_ROLE_DISCOVERY, hci_cc_role_discovery, sizeof(struct hci_rp_role_discovery)), HCI_CC(HCI_OP_READ_LINK_POLICY, hci_cc_read_link_policy, sizeof(struct hci_rp_read_link_policy)), HCI_CC(HCI_OP_WRITE_LINK_POLICY, hci_cc_write_link_policy, sizeof(struct hci_rp_write_link_policy)), HCI_CC(HCI_OP_READ_DEF_LINK_POLICY, hci_cc_read_def_link_policy, sizeof(struct hci_rp_read_def_link_policy)), HCI_CC_STATUS(HCI_OP_WRITE_DEF_LINK_POLICY, hci_cc_write_def_link_policy), HCI_CC_STATUS(HCI_OP_RESET, hci_cc_reset), HCI_CC(HCI_OP_READ_STORED_LINK_KEY, hci_cc_read_stored_link_key, sizeof(struct hci_rp_read_stored_link_key)), HCI_CC(HCI_OP_DELETE_STORED_LINK_KEY, hci_cc_delete_stored_link_key, sizeof(struct hci_rp_delete_stored_link_key)), HCI_CC_STATUS(HCI_OP_WRITE_LOCAL_NAME, hci_cc_write_local_name), HCI_CC(HCI_OP_READ_LOCAL_NAME, hci_cc_read_local_name, sizeof(struct hci_rp_read_local_name)), HCI_CC_STATUS(HCI_OP_WRITE_AUTH_ENABLE, hci_cc_write_auth_enable), HCI_CC_STATUS(HCI_OP_WRITE_ENCRYPT_MODE, hci_cc_write_encrypt_mode), HCI_CC_STATUS(HCI_OP_WRITE_SCAN_ENABLE, hci_cc_write_scan_enable), HCI_CC_STATUS(HCI_OP_SET_EVENT_FLT, hci_cc_set_event_filter), HCI_CC(HCI_OP_READ_CLASS_OF_DEV, hci_cc_read_class_of_dev, sizeof(struct hci_rp_read_class_of_dev)), HCI_CC_STATUS(HCI_OP_WRITE_CLASS_OF_DEV, hci_cc_write_class_of_dev), HCI_CC(HCI_OP_READ_VOICE_SETTING, hci_cc_read_voice_setting, sizeof(struct hci_rp_read_voice_setting)), HCI_CC_STATUS(HCI_OP_WRITE_VOICE_SETTING, hci_cc_write_voice_setting), HCI_CC(HCI_OP_READ_NUM_SUPPORTED_IAC, hci_cc_read_num_supported_iac, sizeof(struct hci_rp_read_num_supported_iac)), HCI_CC_STATUS(HCI_OP_WRITE_SSP_MODE, hci_cc_write_ssp_mode), HCI_CC_STATUS(HCI_OP_WRITE_SC_SUPPORT, hci_cc_write_sc_support), HCI_CC(HCI_OP_READ_AUTH_PAYLOAD_TO, hci_cc_read_auth_payload_timeout, sizeof(struct hci_rp_read_auth_payload_to)), HCI_CC(HCI_OP_WRITE_AUTH_PAYLOAD_TO, hci_cc_write_auth_payload_timeout, sizeof(struct hci_rp_write_auth_payload_to)), HCI_CC(HCI_OP_READ_LOCAL_VERSION, hci_cc_read_local_version, sizeof(struct hci_rp_read_local_version)), HCI_CC(HCI_OP_READ_LOCAL_COMMANDS, hci_cc_read_local_commands, sizeof(struct hci_rp_read_local_commands)), HCI_CC(HCI_OP_READ_LOCAL_FEATURES, hci_cc_read_local_features, sizeof(struct hci_rp_read_local_features)), HCI_CC(HCI_OP_READ_LOCAL_EXT_FEATURES, hci_cc_read_local_ext_features, sizeof(struct hci_rp_read_local_ext_features)), HCI_CC(HCI_OP_READ_BUFFER_SIZE, hci_cc_read_buffer_size, sizeof(struct hci_rp_read_buffer_size)), HCI_CC(HCI_OP_READ_BD_ADDR, hci_cc_read_bd_addr, sizeof(struct hci_rp_read_bd_addr)), HCI_CC(HCI_OP_READ_LOCAL_PAIRING_OPTS, hci_cc_read_local_pairing_opts, sizeof(struct hci_rp_read_local_pairing_opts)), HCI_CC(HCI_OP_READ_PAGE_SCAN_ACTIVITY, hci_cc_read_page_scan_activity, sizeof(struct hci_rp_read_page_scan_activity)), HCI_CC_STATUS(HCI_OP_WRITE_PAGE_SCAN_ACTIVITY, hci_cc_write_page_scan_activity), HCI_CC(HCI_OP_READ_PAGE_SCAN_TYPE, hci_cc_read_page_scan_type, sizeof(struct hci_rp_read_page_scan_type)), HCI_CC_STATUS(HCI_OP_WRITE_PAGE_SCAN_TYPE, hci_cc_write_page_scan_type), HCI_CC(HCI_OP_READ_DATA_BLOCK_SIZE, hci_cc_read_data_block_size, sizeof(struct hci_rp_read_data_block_size)), HCI_CC(HCI_OP_READ_FLOW_CONTROL_MODE, hci_cc_read_flow_control_mode, sizeof(struct hci_rp_read_flow_control_mode)), HCI_CC(HCI_OP_READ_LOCAL_AMP_INFO, hci_cc_read_local_amp_info, sizeof(struct hci_rp_read_local_amp_info)), HCI_CC(HCI_OP_READ_CLOCK, hci_cc_read_clock, sizeof(struct hci_rp_read_clock)), HCI_CC(HCI_OP_READ_ENC_KEY_SIZE, hci_cc_read_enc_key_size, sizeof(struct hci_rp_read_enc_key_size)), HCI_CC(HCI_OP_READ_INQ_RSP_TX_POWER, hci_cc_read_inq_rsp_tx_power, sizeof(struct hci_rp_read_inq_rsp_tx_power)), HCI_CC(HCI_OP_READ_DEF_ERR_DATA_REPORTING, hci_cc_read_def_err_data_reporting, sizeof(struct hci_rp_read_def_err_data_reporting)), HCI_CC_STATUS(HCI_OP_WRITE_DEF_ERR_DATA_REPORTING, hci_cc_write_def_err_data_reporting), HCI_CC(HCI_OP_PIN_CODE_REPLY, hci_cc_pin_code_reply, sizeof(struct hci_rp_pin_code_reply)), HCI_CC(HCI_OP_PIN_CODE_NEG_REPLY, hci_cc_pin_code_neg_reply, sizeof(struct hci_rp_pin_code_neg_reply)), HCI_CC(HCI_OP_READ_LOCAL_OOB_DATA, hci_cc_read_local_oob_data, sizeof(struct hci_rp_read_local_oob_data)), HCI_CC(HCI_OP_READ_LOCAL_OOB_EXT_DATA, hci_cc_read_local_oob_ext_data, sizeof(struct hci_rp_read_local_oob_ext_data)), HCI_CC(HCI_OP_LE_READ_BUFFER_SIZE, hci_cc_le_read_buffer_size, sizeof(struct hci_rp_le_read_buffer_size)), HCI_CC(HCI_OP_LE_READ_LOCAL_FEATURES, hci_cc_le_read_local_features, sizeof(struct hci_rp_le_read_local_features)), HCI_CC(HCI_OP_LE_READ_ADV_TX_POWER, hci_cc_le_read_adv_tx_power, sizeof(struct hci_rp_le_read_adv_tx_power)), HCI_CC(HCI_OP_USER_CONFIRM_REPLY, hci_cc_user_confirm_reply, sizeof(struct hci_rp_user_confirm_reply)), HCI_CC(HCI_OP_USER_CONFIRM_NEG_REPLY, hci_cc_user_confirm_neg_reply, sizeof(struct hci_rp_user_confirm_reply)), HCI_CC(HCI_OP_USER_PASSKEY_REPLY, hci_cc_user_passkey_reply, sizeof(struct hci_rp_user_confirm_reply)), HCI_CC(HCI_OP_USER_PASSKEY_NEG_REPLY, hci_cc_user_passkey_neg_reply, sizeof(struct hci_rp_user_confirm_reply)), HCI_CC_STATUS(HCI_OP_LE_SET_RANDOM_ADDR, hci_cc_le_set_random_addr), HCI_CC_STATUS(HCI_OP_LE_SET_ADV_ENABLE, hci_cc_le_set_adv_enable), HCI_CC_STATUS(HCI_OP_LE_SET_SCAN_PARAM, hci_cc_le_set_scan_param), HCI_CC_STATUS(HCI_OP_LE_SET_SCAN_ENABLE, hci_cc_le_set_scan_enable), HCI_CC(HCI_OP_LE_READ_ACCEPT_LIST_SIZE, hci_cc_le_read_accept_list_size, sizeof(struct hci_rp_le_read_accept_list_size)), HCI_CC_STATUS(HCI_OP_LE_CLEAR_ACCEPT_LIST, hci_cc_le_clear_accept_list), HCI_CC_STATUS(HCI_OP_LE_ADD_TO_ACCEPT_LIST, hci_cc_le_add_to_accept_list), HCI_CC_STATUS(HCI_OP_LE_DEL_FROM_ACCEPT_LIST, hci_cc_le_del_from_accept_list), HCI_CC(HCI_OP_LE_READ_SUPPORTED_STATES, hci_cc_le_read_supported_states, sizeof(struct hci_rp_le_read_supported_states)), HCI_CC(HCI_OP_LE_READ_DEF_DATA_LEN, hci_cc_le_read_def_data_len, sizeof(struct hci_rp_le_read_def_data_len)), HCI_CC_STATUS(HCI_OP_LE_WRITE_DEF_DATA_LEN, hci_cc_le_write_def_data_len), HCI_CC_STATUS(HCI_OP_LE_ADD_TO_RESOLV_LIST, hci_cc_le_add_to_resolv_list), HCI_CC_STATUS(HCI_OP_LE_DEL_FROM_RESOLV_LIST, hci_cc_le_del_from_resolv_list), HCI_CC_STATUS(HCI_OP_LE_CLEAR_RESOLV_LIST, hci_cc_le_clear_resolv_list), HCI_CC(HCI_OP_LE_READ_RESOLV_LIST_SIZE, hci_cc_le_read_resolv_list_size, sizeof(struct hci_rp_le_read_resolv_list_size)), HCI_CC_STATUS(HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, hci_cc_le_set_addr_resolution_enable), HCI_CC(HCI_OP_LE_READ_MAX_DATA_LEN, hci_cc_le_read_max_data_len, sizeof(struct hci_rp_le_read_max_data_len)), HCI_CC_STATUS(HCI_OP_WRITE_LE_HOST_SUPPORTED, hci_cc_write_le_host_supported), HCI_CC_STATUS(HCI_OP_LE_SET_ADV_PARAM, hci_cc_set_adv_param), HCI_CC(HCI_OP_READ_RSSI, hci_cc_read_rssi, sizeof(struct hci_rp_read_rssi)), HCI_CC(HCI_OP_READ_TX_POWER, hci_cc_read_tx_power, sizeof(struct hci_rp_read_tx_power)), HCI_CC_STATUS(HCI_OP_WRITE_SSP_DEBUG_MODE, hci_cc_write_ssp_debug_mode), HCI_CC_STATUS(HCI_OP_LE_SET_EXT_SCAN_PARAMS, hci_cc_le_set_ext_scan_param), HCI_CC_STATUS(HCI_OP_LE_SET_EXT_SCAN_ENABLE, hci_cc_le_set_ext_scan_enable), HCI_CC_STATUS(HCI_OP_LE_SET_DEFAULT_PHY, hci_cc_le_set_default_phy), HCI_CC(HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS, hci_cc_le_read_num_adv_sets, sizeof(struct hci_rp_le_read_num_supported_adv_sets)), HCI_CC(HCI_OP_LE_SET_EXT_ADV_PARAMS, hci_cc_set_ext_adv_param, sizeof(struct hci_rp_le_set_ext_adv_params)), HCI_CC_STATUS(HCI_OP_LE_SET_EXT_ADV_ENABLE, hci_cc_le_set_ext_adv_enable), HCI_CC_STATUS(HCI_OP_LE_SET_ADV_SET_RAND_ADDR, hci_cc_le_set_adv_set_random_addr), HCI_CC_STATUS(HCI_OP_LE_REMOVE_ADV_SET, hci_cc_le_remove_adv_set), HCI_CC_STATUS(HCI_OP_LE_CLEAR_ADV_SETS, hci_cc_le_clear_adv_sets), HCI_CC_STATUS(HCI_OP_LE_SET_PER_ADV_PARAMS, hci_cc_set_per_adv_param), HCI_CC_STATUS(HCI_OP_LE_SET_PER_ADV_ENABLE, hci_cc_le_set_per_adv_enable), HCI_CC(HCI_OP_LE_READ_TRANSMIT_POWER, hci_cc_le_read_transmit_power, sizeof(struct hci_rp_le_read_transmit_power)), HCI_CC_STATUS(HCI_OP_LE_SET_PRIVACY_MODE, hci_cc_le_set_privacy_mode), HCI_CC(HCI_OP_LE_READ_BUFFER_SIZE_V2, hci_cc_le_read_buffer_size_v2, sizeof(struct hci_rp_le_read_buffer_size_v2)), HCI_CC_VL(HCI_OP_LE_SET_CIG_PARAMS, hci_cc_le_set_cig_params, sizeof(struct hci_rp_le_set_cig_params), HCI_MAX_EVENT_SIZE), HCI_CC(HCI_OP_LE_SETUP_ISO_PATH, hci_cc_le_setup_iso_path, sizeof(struct hci_rp_le_setup_iso_path)), }; static u8 hci_cc_func(struct hci_dev *hdev, const struct hci_cc *cc, struct sk_buff *skb) { void *data; if (skb->len < cc->min_len) { bt_dev_err(hdev, "unexpected cc 0x%4.4x length: %u < %u", cc->op, skb->len, cc->min_len); return HCI_ERROR_UNSPECIFIED; } /* Just warn if the length is over max_len size it still be possible to * partially parse the cc so leave to callback to decide if that is * acceptable. */ if (skb->len > cc->max_len) bt_dev_warn(hdev, "unexpected cc 0x%4.4x length: %u > %u", cc->op, skb->len, cc->max_len); data = hci_cc_skb_pull(hdev, skb, cc->op, cc->min_len); if (!data) return HCI_ERROR_UNSPECIFIED; return cc->func(hdev, data, skb); } static void hci_cmd_complete_evt(struct hci_dev *hdev, void *data, struct sk_buff *skb, u16 *opcode, u8 *status, hci_req_complete_t *req_complete, hci_req_complete_skb_t *req_complete_skb) { struct hci_ev_cmd_complete *ev = data; int i; *opcode = __le16_to_cpu(ev->opcode); bt_dev_dbg(hdev, "opcode 0x%4.4x", *opcode); for (i = 0; i < ARRAY_SIZE(hci_cc_table); i++) { if (hci_cc_table[i].op == *opcode) { *status = hci_cc_func(hdev, &hci_cc_table[i], skb); break; } } if (i == ARRAY_SIZE(hci_cc_table)) { /* Unknown opcode, assume byte 0 contains the status, so * that e.g. __hci_cmd_sync() properly returns errors * for vendor specific commands send by HCI drivers. * If a vendor doesn't actually follow this convention we may * need to introduce a vendor CC table in order to properly set * the status. */ *status = skb->data[0]; } handle_cmd_cnt_and_timer(hdev, ev->ncmd); hci_req_cmd_complete(hdev, *opcode, *status, req_complete, req_complete_skb); if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) { bt_dev_err(hdev, "unexpected event for opcode 0x%4.4x", *opcode); return; } if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q)) queue_work(hdev->workqueue, &hdev->cmd_work); } static void hci_cs_le_create_cis(struct hci_dev *hdev, u8 status) { struct hci_cp_le_create_cis *cp; bool pending = false; int i; bt_dev_dbg(hdev, "status 0x%2.2x", status); if (!status) return; cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CIS); if (!cp) return; hci_dev_lock(hdev); /* Remove connection if command failed */ for (i = 0; cp->num_cis; cp->num_cis--, i++) { struct hci_conn *conn; u16 handle; handle = __le16_to_cpu(cp->cis[i].cis_handle); conn = hci_conn_hash_lookup_handle(hdev, handle); if (conn) { if (test_and_clear_bit(HCI_CONN_CREATE_CIS, &conn->flags)) pending = true; conn->state = BT_CLOSED; hci_connect_cfm(conn, status); hci_conn_del(conn); } } if (pending) hci_le_create_cis_pending(hdev); hci_dev_unlock(hdev); } #define HCI_CS(_op, _func) \ { \ .op = _op, \ .func = _func, \ } static const struct hci_cs { u16 op; void (*func)(struct hci_dev *hdev, __u8 status); } hci_cs_table[] = { HCI_CS(HCI_OP_INQUIRY, hci_cs_inquiry), HCI_CS(HCI_OP_CREATE_CONN, hci_cs_create_conn), HCI_CS(HCI_OP_DISCONNECT, hci_cs_disconnect), HCI_CS(HCI_OP_ADD_SCO, hci_cs_add_sco), HCI_CS(HCI_OP_AUTH_REQUESTED, hci_cs_auth_requested), HCI_CS(HCI_OP_SET_CONN_ENCRYPT, hci_cs_set_conn_encrypt), HCI_CS(HCI_OP_REMOTE_NAME_REQ, hci_cs_remote_name_req), HCI_CS(HCI_OP_READ_REMOTE_FEATURES, hci_cs_read_remote_features), HCI_CS(HCI_OP_READ_REMOTE_EXT_FEATURES, hci_cs_read_remote_ext_features), HCI_CS(HCI_OP_SETUP_SYNC_CONN, hci_cs_setup_sync_conn), HCI_CS(HCI_OP_ENHANCED_SETUP_SYNC_CONN, hci_cs_enhanced_setup_sync_conn), HCI_CS(HCI_OP_SNIFF_MODE, hci_cs_sniff_mode), HCI_CS(HCI_OP_EXIT_SNIFF_MODE, hci_cs_exit_sniff_mode), HCI_CS(HCI_OP_SWITCH_ROLE, hci_cs_switch_role), HCI_CS(HCI_OP_LE_CREATE_CONN, hci_cs_le_create_conn), HCI_CS(HCI_OP_LE_READ_REMOTE_FEATURES, hci_cs_le_read_remote_features), HCI_CS(HCI_OP_LE_START_ENC, hci_cs_le_start_enc), HCI_CS(HCI_OP_LE_EXT_CREATE_CONN, hci_cs_le_ext_create_conn), HCI_CS(HCI_OP_LE_CREATE_CIS, hci_cs_le_create_cis), HCI_CS(HCI_OP_LE_CREATE_BIG, hci_cs_le_create_big), }; static void hci_cmd_status_evt(struct hci_dev *hdev, void *data, struct sk_buff *skb, u16 *opcode, u8 *status, hci_req_complete_t *req_complete, hci_req_complete_skb_t *req_complete_skb) { struct hci_ev_cmd_status *ev = data; int i; *opcode = __le16_to_cpu(ev->opcode); *status = ev->status; bt_dev_dbg(hdev, "opcode 0x%4.4x", *opcode); for (i = 0; i < ARRAY_SIZE(hci_cs_table); i++) { if (hci_cs_table[i].op == *opcode) { hci_cs_table[i].func(hdev, ev->status); break; } } handle_cmd_cnt_and_timer(hdev, ev->ncmd); /* Indicate request completion if the command failed. Also, if * we're not waiting for a special event and we get a success * command status we should try to flag the request as completed * (since for this kind of commands there will not be a command * complete event). */ if (ev->status || (hdev->sent_cmd && !hci_skb_event(hdev->sent_cmd))) { hci_req_cmd_complete(hdev, *opcode, ev->status, req_complete, req_complete_skb); if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) { bt_dev_err(hdev, "unexpected event for opcode 0x%4.4x", *opcode); return; } } if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q)) queue_work(hdev->workqueue, &hdev->cmd_work); } static void hci_hardware_error_evt(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_ev_hardware_error *ev = data; bt_dev_dbg(hdev, "code 0x%2.2x", ev->code); hdev->hw_error_code = ev->code; queue_work(hdev->req_workqueue, &hdev->error_reset); } static void hci_role_change_evt(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_ev_role_change *ev = data; struct hci_conn *conn; bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); hci_dev_lock(hdev); conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); if (conn) { if (!ev->status) conn->role = ev->role; clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags); hci_role_switch_cfm(conn, ev->status, ev->role); } hci_dev_unlock(hdev); } static void hci_num_comp_pkts_evt(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_ev_num_comp_pkts *ev = data; int i; if (!hci_ev_skb_pull(hdev, skb, HCI_EV_NUM_COMP_PKTS, flex_array_size(ev, handles, ev->num))) return; if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) { bt_dev_err(hdev, "wrong event for mode %d", hdev->flow_ctl_mode); return; } bt_dev_dbg(hdev, "num %d", ev->num); for (i = 0; i < ev->num; i++) { struct hci_comp_pkts_info *info = &ev->handles[i]; struct hci_conn *conn; __u16 handle, count; handle = __le16_to_cpu(info->handle); count = __le16_to_cpu(info->count); conn = hci_conn_hash_lookup_handle(hdev, handle); if (!conn) continue; conn->sent -= count; switch (conn->type) { case ACL_LINK: hdev->acl_cnt += count; if (hdev->acl_cnt > hdev->acl_pkts) hdev->acl_cnt = hdev->acl_pkts; break; case LE_LINK: if (hdev->le_pkts) { hdev->le_cnt += count; if (hdev->le_cnt > hdev->le_pkts) hdev->le_cnt = hdev->le_pkts; } else { hdev->acl_cnt += count; if (hdev->acl_cnt > hdev->acl_pkts) hdev->acl_cnt = hdev->acl_pkts; } break; case SCO_LINK: hdev->sco_cnt += count; if (hdev->sco_cnt > hdev->sco_pkts) hdev->sco_cnt = hdev->sco_pkts; break; case ISO_LINK: if (hdev->iso_pkts) { hdev->iso_cnt += count; if (hdev->iso_cnt > hdev->iso_pkts) hdev->iso_cnt = hdev->iso_pkts; } else if (hdev->le_pkts) { hdev->le_cnt += count; if (hdev->le_cnt > hdev->le_pkts) hdev->le_cnt = hdev->le_pkts; } else { hdev->acl_cnt += count; if (hdev->acl_cnt > hdev->acl_pkts) hdev->acl_cnt = hdev->acl_pkts; } break; default: bt_dev_err(hdev, "unknown type %d conn %p", conn->type, conn); break; } } queue_work(hdev->workqueue, &hdev->tx_work); } static struct hci_conn *__hci_conn_lookup_handle(struct hci_dev *hdev, __u16 handle) { struct hci_chan *chan; switch (hdev->dev_type) { case HCI_PRIMARY: return hci_conn_hash_lookup_handle(hdev, handle); case HCI_AMP: chan = hci_chan_lookup_handle(hdev, handle); if (chan) return chan->conn; break; default: bt_dev_err(hdev, "unknown dev_type %d", hdev->dev_type); break; } return NULL; } static void hci_num_comp_blocks_evt(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_ev_num_comp_blocks *ev = data; int i; if (!hci_ev_skb_pull(hdev, skb, HCI_EV_NUM_COMP_BLOCKS, flex_array_size(ev, handles, ev->num_hndl))) return; if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) { bt_dev_err(hdev, "wrong event for mode %d", hdev->flow_ctl_mode); return; } bt_dev_dbg(hdev, "num_blocks %d num_hndl %d", ev->num_blocks, ev->num_hndl); for (i = 0; i < ev->num_hndl; i++) { struct hci_comp_blocks_info *info = &ev->handles[i]; struct hci_conn *conn = NULL; __u16 handle, block_count; handle = __le16_to_cpu(info->handle); block_count = __le16_to_cpu(info->blocks); conn = __hci_conn_lookup_handle(hdev, handle); if (!conn) continue; conn->sent -= block_count; switch (conn->type) { case ACL_LINK: case AMP_LINK: hdev->block_cnt += block_count; if (hdev->block_cnt > hdev->num_blocks) hdev->block_cnt = hdev->num_blocks; break; default: bt_dev_err(hdev, "unknown type %d conn %p", conn->type, conn); break; } } queue_work(hdev->workqueue, &hdev->tx_work); } static void hci_mode_change_evt(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_ev_mode_change *ev = data; struct hci_conn *conn; bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); hci_dev_lock(hdev); conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); if (conn) { conn->mode = ev->mode; if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) { if (conn->mode == HCI_CM_ACTIVE) set_bit(HCI_CONN_POWER_SAVE, &conn->flags); else clear_bit(HCI_CONN_POWER_SAVE, &conn->flags); } if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags)) hci_sco_setup(conn, ev->status); } hci_dev_unlock(hdev); } static void hci_pin_code_request_evt(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_ev_pin_code_req *ev = data; struct hci_conn *conn; bt_dev_dbg(hdev, ""); hci_dev_lock(hdev); conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); if (!conn) goto unlock; if (conn->state == BT_CONNECTED) { hci_conn_hold(conn); conn->disc_timeout = HCI_PAIRING_TIMEOUT; hci_conn_drop(conn); } if (!hci_dev_test_flag(hdev, HCI_BONDABLE) && !test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags)) { hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY, sizeof(ev->bdaddr), &ev->bdaddr); } else if (hci_dev_test_flag(hdev, HCI_MGMT)) { u8 secure; if (conn->pending_sec_level == BT_SECURITY_HIGH) secure = 1; else secure = 0; mgmt_pin_code_request(hdev, &ev->bdaddr, secure); } unlock: hci_dev_unlock(hdev); } static void conn_set_key(struct hci_conn *conn, u8 key_type, u8 pin_len) { if (key_type == HCI_LK_CHANGED_COMBINATION) return; conn->pin_length = pin_len; conn->key_type = key_type; switch (key_type) { case HCI_LK_LOCAL_UNIT: case HCI_LK_REMOTE_UNIT: case HCI_LK_DEBUG_COMBINATION: return; case HCI_LK_COMBINATION: if (pin_len == 16) conn->pending_sec_level = BT_SECURITY_HIGH; else conn->pending_sec_level = BT_SECURITY_MEDIUM; break; case HCI_LK_UNAUTH_COMBINATION_P192: case HCI_LK_UNAUTH_COMBINATION_P256: conn->pending_sec_level = BT_SECURITY_MEDIUM; break; case HCI_LK_AUTH_COMBINATION_P192: conn->pending_sec_level = BT_SECURITY_HIGH; break; case HCI_LK_AUTH_COMBINATION_P256: conn->pending_sec_level = BT_SECURITY_FIPS; break; } } static void hci_link_key_request_evt(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_ev_link_key_req *ev = data; struct hci_cp_link_key_reply cp; struct hci_conn *conn; struct link_key *key; bt_dev_dbg(hdev, ""); if (!hci_dev_test_flag(hdev, HCI_MGMT)) return; hci_dev_lock(hdev); key = hci_find_link_key(hdev, &ev->bdaddr); if (!key) { bt_dev_dbg(hdev, "link key not found for %pMR", &ev->bdaddr); goto not_found; } bt_dev_dbg(hdev, "found key type %u for %pMR", key->type, &ev->bdaddr); conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); if (conn) { clear_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags); if ((key->type == HCI_LK_UNAUTH_COMBINATION_P192 || key->type == HCI_LK_UNAUTH_COMBINATION_P256) && conn->auth_type != 0xff && (conn->auth_type & 0x01)) { bt_dev_dbg(hdev, "ignoring unauthenticated key"); goto not_found; } if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 && (conn->pending_sec_level == BT_SECURITY_HIGH || conn->pending_sec_level == BT_SECURITY_FIPS)) { bt_dev_dbg(hdev, "ignoring key unauthenticated for high security"); goto not_found; } conn_set_key(conn, key->type, key->pin_len); } bacpy(&cp.bdaddr, &ev->bdaddr); memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE); hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp); hci_dev_unlock(hdev); return; not_found: hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr); hci_dev_unlock(hdev); } static void hci_link_key_notify_evt(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_ev_link_key_notify *ev = data; struct hci_conn *conn; struct link_key *key; bool persistent; u8 pin_len = 0; bt_dev_dbg(hdev, ""); hci_dev_lock(hdev); conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); if (!conn) goto unlock; hci_conn_hold(conn); conn->disc_timeout = HCI_DISCONN_TIMEOUT; hci_conn_drop(conn); set_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags); conn_set_key(conn, ev->key_type, conn->pin_length); if (!hci_dev_test_flag(hdev, HCI_MGMT)) goto unlock; key = hci_add_link_key(hdev, conn, &ev->bdaddr, ev->link_key, ev->key_type, pin_len, &persistent); if (!key) goto unlock; /* Update connection information since adding the key will have * fixed up the type in the case of changed combination keys. */ if (ev->key_type == HCI_LK_CHANGED_COMBINATION) conn_set_key(conn, key->type, key->pin_len); mgmt_new_link_key(hdev, key, persistent); /* Keep debug keys around only if the HCI_KEEP_DEBUG_KEYS flag * is set. If it's not set simply remove the key from the kernel * list (we've still notified user space about it but with * store_hint being 0). */ if (key->type == HCI_LK_DEBUG_COMBINATION && !hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS)) { list_del_rcu(&key->list); kfree_rcu(key, rcu); goto unlock; } if (persistent) clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags); else set_bit(HCI_CONN_FLUSH_KEY, &conn->flags); unlock: hci_dev_unlock(hdev); } static void hci_clock_offset_evt(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_ev_clock_offset *ev = data; struct hci_conn *conn; bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); hci_dev_lock(hdev); conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); if (conn && !ev->status) { struct inquiry_entry *ie; ie = hci_inquiry_cache_lookup(hdev, &conn->dst); if (ie) { ie->data.clock_offset = ev->clock_offset; ie->timestamp = jiffies; } } hci_dev_unlock(hdev); } static void hci_pkt_type_change_evt(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_ev_pkt_type_change *ev = data; struct hci_conn *conn; bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); hci_dev_lock(hdev); conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); if (conn && !ev->status) conn->pkt_type = __le16_to_cpu(ev->pkt_type); hci_dev_unlock(hdev); } static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_ev_pscan_rep_mode *ev = data; struct inquiry_entry *ie; bt_dev_dbg(hdev, ""); hci_dev_lock(hdev); ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr); if (ie) { ie->data.pscan_rep_mode = ev->pscan_rep_mode; ie->timestamp = jiffies; } hci_dev_unlock(hdev); } static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, void *edata, struct sk_buff *skb) { struct hci_ev_inquiry_result_rssi *ev = edata; struct inquiry_data data; int i; bt_dev_dbg(hdev, "num_rsp %d", ev->num); if (!ev->num) return; if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) return; hci_dev_lock(hdev); if (skb->len == array_size(ev->num, sizeof(struct inquiry_info_rssi_pscan))) { struct inquiry_info_rssi_pscan *info; for (i = 0; i < ev->num; i++) { u32 flags; info = hci_ev_skb_pull(hdev, skb, HCI_EV_INQUIRY_RESULT_WITH_RSSI, sizeof(*info)); if (!info) { bt_dev_err(hdev, "Malformed HCI Event: 0x%2.2x", HCI_EV_INQUIRY_RESULT_WITH_RSSI); goto unlock; } bacpy(&data.bdaddr, &info->bdaddr); data.pscan_rep_mode = info->pscan_rep_mode; data.pscan_period_mode = info->pscan_period_mode; data.pscan_mode = info->pscan_mode; memcpy(data.dev_class, info->dev_class, 3); data.clock_offset = info->clock_offset; data.rssi = info->rssi; data.ssp_mode = 0x00; flags = hci_inquiry_cache_update(hdev, &data, false); mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00, info->dev_class, info->rssi, flags, NULL, 0, NULL, 0, 0); } } else if (skb->len == array_size(ev->num, sizeof(struct inquiry_info_rssi))) { struct inquiry_info_rssi *info; for (i = 0; i < ev->num; i++) { u32 flags; info = hci_ev_skb_pull(hdev, skb, HCI_EV_INQUIRY_RESULT_WITH_RSSI, sizeof(*info)); if (!info) { bt_dev_err(hdev, "Malformed HCI Event: 0x%2.2x", HCI_EV_INQUIRY_RESULT_WITH_RSSI); goto unlock; } bacpy(&data.bdaddr, &info->bdaddr); data.pscan_rep_mode = info->pscan_rep_mode; data.pscan_period_mode = info->pscan_period_mode; data.pscan_mode = 0x00; memcpy(data.dev_class, info->dev_class, 3); data.clock_offset = info->clock_offset; data.rssi = info->rssi; data.ssp_mode = 0x00; flags = hci_inquiry_cache_update(hdev, &data, false); mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00, info->dev_class, info->rssi, flags, NULL, 0, NULL, 0, 0); } } else { bt_dev_err(hdev, "Malformed HCI Event: 0x%2.2x", HCI_EV_INQUIRY_RESULT_WITH_RSSI); } unlock: hci_dev_unlock(hdev); } static void hci_remote_ext_features_evt(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_ev_remote_ext_features *ev = data; struct hci_conn *conn; bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); hci_dev_lock(hdev); conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); if (!conn) goto unlock; if (ev->page < HCI_MAX_PAGES) memcpy(conn->features[ev->page], ev->features, 8); if (!ev->status && ev->page == 0x01) { struct inquiry_entry *ie; ie = hci_inquiry_cache_lookup(hdev, &conn->dst); if (ie) ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP); if (ev->features[0] & LMP_HOST_SSP) { set_bit(HCI_CONN_SSP_ENABLED, &conn->flags); } else { /* It is mandatory by the Bluetooth specification that * Extended Inquiry Results are only used when Secure * Simple Pairing is enabled, but some devices violate * this. * * To make these devices work, the internal SSP * enabled flag needs to be cleared if the remote host * features do not indicate SSP support */ clear_bit(HCI_CONN_SSP_ENABLED, &conn->flags); } if (ev->features[0] & LMP_HOST_SC) set_bit(HCI_CONN_SC_ENABLED, &conn->flags); } if (conn->state != BT_CONFIG) goto unlock; if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) { struct hci_cp_remote_name_req cp; memset(&cp, 0, sizeof(cp)); bacpy(&cp.bdaddr, &conn->dst); cp.pscan_rep_mode = 0x02; hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp); } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) mgmt_device_connected(hdev, conn, NULL, 0); if (!hci_outgoing_auth_needed(hdev, conn)) { conn->state = BT_CONNECTED; hci_connect_cfm(conn, ev->status); hci_conn_drop(conn); } unlock: hci_dev_unlock(hdev); } static void hci_sync_conn_complete_evt(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_ev_sync_conn_complete *ev = data; struct hci_conn *conn; u8 status = ev->status; switch (ev->link_type) { case SCO_LINK: case ESCO_LINK: break; default: /* As per Core 5.3 Vol 4 Part E 7.7.35 (p.2219), Link_Type * for HCI_Synchronous_Connection_Complete is limited to * either SCO or eSCO */ bt_dev_err(hdev, "Ignoring connect complete event for invalid link type"); return; } bt_dev_dbg(hdev, "status 0x%2.2x", status); hci_dev_lock(hdev); conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr); if (!conn) { if (ev->link_type == ESCO_LINK) goto unlock; /* When the link type in the event indicates SCO connection * and lookup of the connection object fails, then check * if an eSCO connection object exists. * * The core limits the synchronous connections to either * SCO or eSCO. The eSCO connection is preferred and tried * to be setup first and until successfully established, * the link type will be hinted as eSCO. */ conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr); if (!conn) goto unlock; } /* The HCI_Synchronous_Connection_Complete event is only sent once per connection. * Processing it more than once per connection can corrupt kernel memory. * * As the connection handle is set here for the first time, it indicates * whether the connection is already set up. */ if (!HCI_CONN_HANDLE_UNSET(conn->handle)) { bt_dev_err(hdev, "Ignoring HCI_Sync_Conn_Complete event for existing connection"); goto unlock; } switch (status) { case 0x00: status = hci_conn_set_handle(conn, __le16_to_cpu(ev->handle)); if (status) { conn->state = BT_CLOSED; break; } conn->state = BT_CONNECTED; conn->type = ev->link_type; hci_debugfs_create_conn(conn); hci_conn_add_sysfs(conn); break; case 0x10: /* Connection Accept Timeout */ case 0x0d: /* Connection Rejected due to Limited Resources */ case 0x11: /* Unsupported Feature or Parameter Value */ case 0x1c: /* SCO interval rejected */ case 0x1a: /* Unsupported Remote Feature */ case 0x1e: /* Invalid LMP Parameters */ case 0x1f: /* Unspecified error */ case 0x20: /* Unsupported LMP Parameter value */ if (conn->out) { conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) | (hdev->esco_type & EDR_ESCO_MASK); if (hci_setup_sync(conn, conn->parent->handle)) goto unlock; } fallthrough; default: conn->state = BT_CLOSED; break; } bt_dev_dbg(hdev, "SCO connected with air mode: %02x", ev->air_mode); /* Notify only in case of SCO over HCI transport data path which * is zero and non-zero value shall be non-HCI transport data path */ if (conn->codec.data_path == 0 && hdev->notify) { switch (ev->air_mode) { case 0x02: hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_CVSD); break; case 0x03: hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_TRANSP); break; } } hci_connect_cfm(conn, status); if (status) hci_conn_del(conn); unlock: hci_dev_unlock(hdev); } static inline size_t eir_get_length(u8 *eir, size_t eir_len) { size_t parsed = 0; while (parsed < eir_len) { u8 field_len = eir[0]; if (field_len == 0) return parsed; parsed += field_len + 1; eir += field_len + 1; } return eir_len; } static void hci_extended_inquiry_result_evt(struct hci_dev *hdev, void *edata, struct sk_buff *skb) { struct hci_ev_ext_inquiry_result *ev = edata; struct inquiry_data data; size_t eir_len; int i; if (!hci_ev_skb_pull(hdev, skb, HCI_EV_EXTENDED_INQUIRY_RESULT, flex_array_size(ev, info, ev->num))) return; bt_dev_dbg(hdev, "num %d", ev->num); if (!ev->num) return; if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) return; hci_dev_lock(hdev); for (i = 0; i < ev->num; i++) { struct extended_inquiry_info *info = &ev->info[i]; u32 flags; bool name_known; bacpy(&data.bdaddr, &info->bdaddr); data.pscan_rep_mode = info->pscan_rep_mode; data.pscan_period_mode = info->pscan_period_mode; data.pscan_mode = 0x00; memcpy(data.dev_class, info->dev_class, 3); data.clock_offset = info->clock_offset; data.rssi = info->rssi; data.ssp_mode = 0x01; if (hci_dev_test_flag(hdev, HCI_MGMT)) name_known = eir_get_data(info->data, sizeof(info->data), EIR_NAME_COMPLETE, NULL); else name_known = true; flags = hci_inquiry_cache_update(hdev, &data, name_known); eir_len = eir_get_length(info->data, sizeof(info->data)); mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00, info->dev_class, info->rssi, flags, info->data, eir_len, NULL, 0, 0); } hci_dev_unlock(hdev); } static void hci_key_refresh_complete_evt(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_ev_key_refresh_complete *ev = data; struct hci_conn *conn; bt_dev_dbg(hdev, "status 0x%2.2x handle 0x%4.4x", ev->status, __le16_to_cpu(ev->handle)); hci_dev_lock(hdev); conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); if (!conn) goto unlock; /* For BR/EDR the necessary steps are taken through the * auth_complete event. */ if (conn->type != LE_LINK) goto unlock; if (!ev->status) conn->sec_level = conn->pending_sec_level; clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags); if (ev->status && conn->state == BT_CONNECTED) { hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE); hci_conn_drop(conn); goto unlock; } if (conn->state == BT_CONFIG) { if (!ev->status) conn->state = BT_CONNECTED; hci_connect_cfm(conn, ev->status); hci_conn_drop(conn); } else { hci_auth_cfm(conn, ev->status); hci_conn_hold(conn); conn->disc_timeout = HCI_DISCONN_TIMEOUT; hci_conn_drop(conn); } unlock: hci_dev_unlock(hdev); } static u8 hci_get_auth_req(struct hci_conn *conn) { /* If remote requests no-bonding follow that lead */ if (conn->remote_auth == HCI_AT_NO_BONDING || conn->remote_auth == HCI_AT_NO_BONDING_MITM) return conn->remote_auth | (conn->auth_type & 0x01); /* If both remote and local have enough IO capabilities, require * MITM protection */ if (conn->remote_cap != HCI_IO_NO_INPUT_OUTPUT && conn->io_capability != HCI_IO_NO_INPUT_OUTPUT) return conn->remote_auth | 0x01; /* No MITM protection possible so ignore remote requirement */ return (conn->remote_auth & ~0x01) | (conn->auth_type & 0x01); } static u8 bredr_oob_data_present(struct hci_conn *conn) { struct hci_dev *hdev = conn->hdev; struct oob_data *data; data = hci_find_remote_oob_data(hdev, &conn->dst, BDADDR_BREDR); if (!data) return 0x00; if (bredr_sc_enabled(hdev)) { /* When Secure Connections is enabled, then just * return the present value stored with the OOB * data. The stored value contains the right present * information. However it can only be trusted when * not in Secure Connection Only mode. */ if (!hci_dev_test_flag(hdev, HCI_SC_ONLY)) return data->present; /* When Secure Connections Only mode is enabled, then * the P-256 values are required. If they are not * available, then do not declare that OOB data is * present. */ if (!memcmp(data->rand256, ZERO_KEY, 16) || !memcmp(data->hash256, ZERO_KEY, 16)) return 0x00; return 0x02; } /* When Secure Connections is not enabled or actually * not supported by the hardware, then check that if * P-192 data values are present. */ if (!memcmp(data->rand192, ZERO_KEY, 16) || !memcmp(data->hash192, ZERO_KEY, 16)) return 0x00; return 0x01; } static void hci_io_capa_request_evt(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_ev_io_capa_request *ev = data; struct hci_conn *conn; bt_dev_dbg(hdev, ""); hci_dev_lock(hdev); conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); if (!conn) goto unlock; hci_conn_hold(conn); if (!hci_dev_test_flag(hdev, HCI_MGMT)) goto unlock; /* Allow pairing if we're pairable, the initiators of the * pairing or if the remote is not requesting bonding. */ if (hci_dev_test_flag(hdev, HCI_BONDABLE) || test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags) || (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) { struct hci_cp_io_capability_reply cp; bacpy(&cp.bdaddr, &ev->bdaddr); /* Change the IO capability from KeyboardDisplay * to DisplayYesNo as it is not supported by BT spec. */ cp.capability = (conn->io_capability == 0x04) ? HCI_IO_DISPLAY_YESNO : conn->io_capability; /* If we are initiators, there is no remote information yet */ if (conn->remote_auth == 0xff) { /* Request MITM protection if our IO caps allow it * except for the no-bonding case. */ if (conn->io_capability != HCI_IO_NO_INPUT_OUTPUT && conn->auth_type != HCI_AT_NO_BONDING) conn->auth_type |= 0x01; } else { conn->auth_type = hci_get_auth_req(conn); } /* If we're not bondable, force one of the non-bondable * authentication requirement values. */ if (!hci_dev_test_flag(hdev, HCI_BONDABLE)) conn->auth_type &= HCI_AT_NO_BONDING_MITM; cp.authentication = conn->auth_type; cp.oob_data = bredr_oob_data_present(conn); hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY, sizeof(cp), &cp); } else { struct hci_cp_io_capability_neg_reply cp; bacpy(&cp.bdaddr, &ev->bdaddr); cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED; hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY, sizeof(cp), &cp); } unlock: hci_dev_unlock(hdev); } static void hci_io_capa_reply_evt(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_ev_io_capa_reply *ev = data; struct hci_conn *conn; bt_dev_dbg(hdev, ""); hci_dev_lock(hdev); conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); if (!conn) goto unlock; conn->remote_cap = ev->capability; conn->remote_auth = ev->authentication; unlock: hci_dev_unlock(hdev); } static void hci_user_confirm_request_evt(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_ev_user_confirm_req *ev = data; int loc_mitm, rem_mitm, confirm_hint = 0; struct hci_conn *conn; bt_dev_dbg(hdev, ""); hci_dev_lock(hdev); if (!hci_dev_test_flag(hdev, HCI_MGMT)) goto unlock; conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); if (!conn) goto unlock; loc_mitm = (conn->auth_type & 0x01); rem_mitm = (conn->remote_auth & 0x01); /* If we require MITM but the remote device can't provide that * (it has NoInputNoOutput) then reject the confirmation * request. We check the security level here since it doesn't * necessarily match conn->auth_type. */ if (conn->pending_sec_level > BT_SECURITY_MEDIUM && conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) { bt_dev_dbg(hdev, "Rejecting request: remote device can't provide MITM"); hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY, sizeof(ev->bdaddr), &ev->bdaddr); goto unlock; } /* If no side requires MITM protection; auto-accept */ if ((!loc_mitm || conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) && (!rem_mitm || conn->io_capability == HCI_IO_NO_INPUT_OUTPUT)) { /* If we're not the initiators request authorization to * proceed from user space (mgmt_user_confirm with * confirm_hint set to 1). The exception is if neither * side had MITM or if the local IO capability is * NoInputNoOutput, in which case we do auto-accept */ if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && conn->io_capability != HCI_IO_NO_INPUT_OUTPUT && (loc_mitm || rem_mitm)) { bt_dev_dbg(hdev, "Confirming auto-accept as acceptor"); confirm_hint = 1; goto confirm; } /* If there already exists link key in local host, leave the * decision to user space since the remote device could be * legitimate or malicious. */ if (hci_find_link_key(hdev, &ev->bdaddr)) { bt_dev_dbg(hdev, "Local host already has link key"); confirm_hint = 1; goto confirm; } BT_DBG("Auto-accept of user confirmation with %ums delay", hdev->auto_accept_delay); if (hdev->auto_accept_delay > 0) { int delay = msecs_to_jiffies(hdev->auto_accept_delay); queue_delayed_work(conn->hdev->workqueue, &conn->auto_accept_work, delay); goto unlock; } hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY, sizeof(ev->bdaddr), &ev->bdaddr); goto unlock; } confirm: mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0, le32_to_cpu(ev->passkey), confirm_hint); unlock: hci_dev_unlock(hdev); } static void hci_user_passkey_request_evt(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_ev_user_passkey_req *ev = data; bt_dev_dbg(hdev, ""); if (hci_dev_test_flag(hdev, HCI_MGMT)) mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0); } static void hci_user_passkey_notify_evt(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_ev_user_passkey_notify *ev = data; struct hci_conn *conn; bt_dev_dbg(hdev, ""); conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); if (!conn) return; conn->passkey_notify = __le32_to_cpu(ev->passkey); conn->passkey_entered = 0; if (hci_dev_test_flag(hdev, HCI_MGMT)) mgmt_user_passkey_notify(hdev, &conn->dst, conn->type, conn->dst_type, conn->passkey_notify, conn->passkey_entered); } static void hci_keypress_notify_evt(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_ev_keypress_notify *ev = data; struct hci_conn *conn; bt_dev_dbg(hdev, ""); conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); if (!conn) return; switch (ev->type) { case HCI_KEYPRESS_STARTED: conn->passkey_entered = 0; return; case HCI_KEYPRESS_ENTERED: conn->passkey_entered++; break; case HCI_KEYPRESS_ERASED: conn->passkey_entered--; break; case HCI_KEYPRESS_CLEARED: conn->passkey_entered = 0; break; case HCI_KEYPRESS_COMPLETED: return; } if (hci_dev_test_flag(hdev, HCI_MGMT)) mgmt_user_passkey_notify(hdev, &conn->dst, conn->type, conn->dst_type, conn->passkey_notify, conn->passkey_entered); } static void hci_simple_pair_complete_evt(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_ev_simple_pair_complete *ev = data; struct hci_conn *conn; bt_dev_dbg(hdev, ""); hci_dev_lock(hdev); conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); if (!conn) goto unlock; /* Reset the authentication requirement to unknown */ conn->remote_auth = 0xff; /* To avoid duplicate auth_failed events to user space we check * the HCI_CONN_AUTH_PEND flag which will be set if we * initiated the authentication. A traditional auth_complete * event gets always produced as initiator and is also mapped to * the mgmt_auth_failed event */ if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status) mgmt_auth_failed(conn, ev->status); hci_conn_drop(conn); unlock: hci_dev_unlock(hdev); } static void hci_remote_host_features_evt(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_ev_remote_host_features *ev = data; struct inquiry_entry *ie; struct hci_conn *conn; bt_dev_dbg(hdev, ""); hci_dev_lock(hdev); conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); if (conn) memcpy(conn->features[1], ev->features, 8); ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr); if (ie) ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP); hci_dev_unlock(hdev); } static void hci_remote_oob_data_request_evt(struct hci_dev *hdev, void *edata, struct sk_buff *skb) { struct hci_ev_remote_oob_data_request *ev = edata; struct oob_data *data; bt_dev_dbg(hdev, ""); hci_dev_lock(hdev); if (!hci_dev_test_flag(hdev, HCI_MGMT)) goto unlock; data = hci_find_remote_oob_data(hdev, &ev->bdaddr, BDADDR_BREDR); if (!data) { struct hci_cp_remote_oob_data_neg_reply cp; bacpy(&cp.bdaddr, &ev->bdaddr); hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY, sizeof(cp), &cp); goto unlock; } if (bredr_sc_enabled(hdev)) { struct hci_cp_remote_oob_ext_data_reply cp; bacpy(&cp.bdaddr, &ev->bdaddr); if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) { memset(cp.hash192, 0, sizeof(cp.hash192)); memset(cp.rand192, 0, sizeof(cp.rand192)); } else { memcpy(cp.hash192, data->hash192, sizeof(cp.hash192)); memcpy(cp.rand192, data->rand192, sizeof(cp.rand192)); } memcpy(cp.hash256, data->hash256, sizeof(cp.hash256)); memcpy(cp.rand256, data->rand256, sizeof(cp.rand256)); hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_EXT_DATA_REPLY, sizeof(cp), &cp); } else { struct hci_cp_remote_oob_data_reply cp; bacpy(&cp.bdaddr, &ev->bdaddr); memcpy(cp.hash, data->hash192, sizeof(cp.hash)); memcpy(cp.rand, data->rand192, sizeof(cp.rand)); hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY, sizeof(cp), &cp); } unlock: hci_dev_unlock(hdev); } #if IS_ENABLED(CONFIG_BT_HS) static void hci_chan_selected_evt(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_ev_channel_selected *ev = data; struct hci_conn *hcon; bt_dev_dbg(hdev, "handle 0x%2.2x", ev->phy_handle); hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle); if (!hcon) return; amp_read_loc_assoc_final_data(hdev, hcon); } static void hci_phy_link_complete_evt(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_ev_phy_link_complete *ev = data; struct hci_conn *hcon, *bredr_hcon; bt_dev_dbg(hdev, "handle 0x%2.2x status 0x%2.2x", ev->phy_handle, ev->status); hci_dev_lock(hdev); hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle); if (!hcon) goto unlock; if (!hcon->amp_mgr) goto unlock; if (ev->status) { hci_conn_del(hcon); goto unlock; } bredr_hcon = hcon->amp_mgr->l2cap_conn->hcon; hcon->state = BT_CONNECTED; bacpy(&hcon->dst, &bredr_hcon->dst); hci_conn_hold(hcon); hcon->disc_timeout = HCI_DISCONN_TIMEOUT; hci_conn_drop(hcon); hci_debugfs_create_conn(hcon); hci_conn_add_sysfs(hcon); amp_physical_cfm(bredr_hcon, hcon); unlock: hci_dev_unlock(hdev); } static void hci_loglink_complete_evt(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_ev_logical_link_complete *ev = data; struct hci_conn *hcon; struct hci_chan *hchan; struct amp_mgr *mgr; bt_dev_dbg(hdev, "log_handle 0x%4.4x phy_handle 0x%2.2x status 0x%2.2x", le16_to_cpu(ev->handle), ev->phy_handle, ev->status); hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle); if (!hcon) return; /* Create AMP hchan */ hchan = hci_chan_create(hcon); if (!hchan) return; hchan->handle = le16_to_cpu(ev->handle); hchan->amp = true; BT_DBG("hcon %p mgr %p hchan %p", hcon, hcon->amp_mgr, hchan); mgr = hcon->amp_mgr; if (mgr && mgr->bredr_chan) { struct l2cap_chan *bredr_chan = mgr->bredr_chan; l2cap_chan_lock(bredr_chan); bredr_chan->conn->mtu = hdev->block_mtu; l2cap_logical_cfm(bredr_chan, hchan, 0); hci_conn_hold(hcon); l2cap_chan_unlock(bredr_chan); } } static void hci_disconn_loglink_complete_evt(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_ev_disconn_logical_link_complete *ev = data; struct hci_chan *hchan; bt_dev_dbg(hdev, "handle 0x%4.4x status 0x%2.2x", le16_to_cpu(ev->handle), ev->status); if (ev->status) return; hci_dev_lock(hdev); hchan = hci_chan_lookup_handle(hdev, le16_to_cpu(ev->handle)); if (!hchan || !hchan->amp) goto unlock; amp_destroy_logical_link(hchan, ev->reason); unlock: hci_dev_unlock(hdev); } static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_ev_disconn_phy_link_complete *ev = data; struct hci_conn *hcon; bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); if (ev->status) return; hci_dev_lock(hdev); hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle); if (hcon && hcon->type == AMP_LINK) { hcon->state = BT_CLOSED; hci_disconn_cfm(hcon, ev->reason); hci_conn_del(hcon); } hci_dev_unlock(hdev); } #endif static void le_conn_update_addr(struct hci_conn *conn, bdaddr_t *bdaddr, u8 bdaddr_type, bdaddr_t *local_rpa) { if (conn->out) { conn->dst_type = bdaddr_type; conn->resp_addr_type = bdaddr_type; bacpy(&conn->resp_addr, bdaddr); /* Check if the controller has set a Local RPA then it must be * used instead or hdev->rpa. */ if (local_rpa && bacmp(local_rpa, BDADDR_ANY)) { conn->init_addr_type = ADDR_LE_DEV_RANDOM; bacpy(&conn->init_addr, local_rpa); } else if (hci_dev_test_flag(conn->hdev, HCI_PRIVACY)) { conn->init_addr_type = ADDR_LE_DEV_RANDOM; bacpy(&conn->init_addr, &conn->hdev->rpa); } else { hci_copy_identity_address(conn->hdev, &conn->init_addr, &conn->init_addr_type); } } else { conn->resp_addr_type = conn->hdev->adv_addr_type; /* Check if the controller has set a Local RPA then it must be * used instead or hdev->rpa. */ if (local_rpa && bacmp(local_rpa, BDADDR_ANY)) { conn->resp_addr_type = ADDR_LE_DEV_RANDOM; bacpy(&conn->resp_addr, local_rpa); } else if (conn->hdev->adv_addr_type == ADDR_LE_DEV_RANDOM) { /* In case of ext adv, resp_addr will be updated in * Adv Terminated event. */ if (!ext_adv_capable(conn->hdev)) bacpy(&conn->resp_addr, &conn->hdev->random_addr); } else { bacpy(&conn->resp_addr, &conn->hdev->bdaddr); } conn->init_addr_type = bdaddr_type; bacpy(&conn->init_addr, bdaddr); /* For incoming connections, set the default minimum * and maximum connection interval. They will be used * to check if the parameters are in range and if not * trigger the connection update procedure. */ conn->le_conn_min_interval = conn->hdev->le_conn_min_interval; conn->le_conn_max_interval = conn->hdev->le_conn_max_interval; } } static void le_conn_complete_evt(struct hci_dev *hdev, u8 status, bdaddr_t *bdaddr, u8 bdaddr_type, bdaddr_t *local_rpa, u8 role, u16 handle, u16 interval, u16 latency, u16 supervision_timeout) { struct hci_conn_params *params; struct hci_conn *conn; struct smp_irk *irk; u8 addr_type; hci_dev_lock(hdev); /* All controllers implicitly stop advertising in the event of a * connection, so ensure that the state bit is cleared. */ hci_dev_clear_flag(hdev, HCI_LE_ADV); conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, bdaddr); if (!conn) { /* In case of error status and there is no connection pending * just unlock as there is nothing to cleanup. */ if (status) goto unlock; conn = hci_conn_add(hdev, LE_LINK, bdaddr, role); if (!conn) { bt_dev_err(hdev, "no memory for new connection"); goto unlock; } conn->dst_type = bdaddr_type; /* If we didn't have a hci_conn object previously * but we're in central role this must be something * initiated using an accept list. Since accept list based * connections are not "first class citizens" we don't * have full tracking of them. Therefore, we go ahead * with a "best effort" approach of determining the * initiator address based on the HCI_PRIVACY flag. */ if (conn->out) { conn->resp_addr_type = bdaddr_type; bacpy(&conn->resp_addr, bdaddr); if (hci_dev_test_flag(hdev, HCI_PRIVACY)) { conn->init_addr_type = ADDR_LE_DEV_RANDOM; bacpy(&conn->init_addr, &hdev->rpa); } else { hci_copy_identity_address(hdev, &conn->init_addr, &conn->init_addr_type); } } } else { cancel_delayed_work(&conn->le_conn_timeout); } /* The HCI_LE_Connection_Complete event is only sent once per connection. * Processing it more than once per connection can corrupt kernel memory. * * As the connection handle is set here for the first time, it indicates * whether the connection is already set up. */ if (!HCI_CONN_HANDLE_UNSET(conn->handle)) { bt_dev_err(hdev, "Ignoring HCI_Connection_Complete for existing connection"); goto unlock; } le_conn_update_addr(conn, bdaddr, bdaddr_type, local_rpa); /* Lookup the identity address from the stored connection * address and address type. * * When establishing connections to an identity address, the * connection procedure will store the resolvable random * address first. Now if it can be converted back into the * identity address, start using the identity address from * now on. */ irk = hci_get_irk(hdev, &conn->dst, conn->dst_type); if (irk) { bacpy(&conn->dst, &irk->bdaddr); conn->dst_type = irk->addr_type; } conn->dst_type = ev_bdaddr_type(hdev, conn->dst_type, NULL); if (handle > HCI_CONN_HANDLE_MAX) { bt_dev_err(hdev, "Invalid handle: 0x%4.4x > 0x%4.4x", handle, HCI_CONN_HANDLE_MAX); status = HCI_ERROR_INVALID_PARAMETERS; } /* All connection failure handling is taken care of by the * hci_conn_failed function which is triggered by the HCI * request completion callbacks used for connecting. */ if (status) goto unlock; /* Drop the connection if it has been aborted */ if (test_bit(HCI_CONN_CANCEL, &conn->flags)) { hci_conn_drop(conn); goto unlock; } if (conn->dst_type == ADDR_LE_DEV_PUBLIC) addr_type = BDADDR_LE_PUBLIC; else addr_type = BDADDR_LE_RANDOM; /* Drop the connection if the device is blocked */ if (hci_bdaddr_list_lookup(&hdev->reject_list, &conn->dst, addr_type)) { hci_conn_drop(conn); goto unlock; } if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) mgmt_device_connected(hdev, conn, NULL, 0); conn->sec_level = BT_SECURITY_LOW; conn->handle = handle; conn->state = BT_CONFIG; /* Store current advertising instance as connection advertising instance * when sotfware rotation is in use so it can be re-enabled when * disconnected. */ if (!ext_adv_capable(hdev)) conn->adv_instance = hdev->cur_adv_instance; conn->le_conn_interval = interval; conn->le_conn_latency = latency; conn->le_supv_timeout = supervision_timeout; hci_debugfs_create_conn(conn); hci_conn_add_sysfs(conn); /* The remote features procedure is defined for central * role only. So only in case of an initiated connection * request the remote features. * * If the local controller supports peripheral-initiated features * exchange, then requesting the remote features in peripheral * role is possible. Otherwise just transition into the * connected state without requesting the remote features. */ if (conn->out || (hdev->le_features[0] & HCI_LE_PERIPHERAL_FEATURES)) { struct hci_cp_le_read_remote_features cp; cp.handle = __cpu_to_le16(conn->handle); hci_send_cmd(hdev, HCI_OP_LE_READ_REMOTE_FEATURES, sizeof(cp), &cp); hci_conn_hold(conn); } else { conn->state = BT_CONNECTED; hci_connect_cfm(conn, status); } params = hci_pend_le_action_lookup(&hdev->pend_le_conns, &conn->dst, conn->dst_type); if (params) { hci_pend_le_list_del_init(params); if (params->conn) { hci_conn_drop(params->conn); hci_conn_put(params->conn); params->conn = NULL; } } unlock: hci_update_passive_scan(hdev); hci_dev_unlock(hdev); } static void hci_le_conn_complete_evt(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_ev_le_conn_complete *ev = data; bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type, NULL, ev->role, le16_to_cpu(ev->handle), le16_to_cpu(ev->interval), le16_to_cpu(ev->latency), le16_to_cpu(ev->supervision_timeout)); } static void hci_le_enh_conn_complete_evt(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_ev_le_enh_conn_complete *ev = data; bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type, &ev->local_rpa, ev->role, le16_to_cpu(ev->handle), le16_to_cpu(ev->interval), le16_to_cpu(ev->latency), le16_to_cpu(ev->supervision_timeout)); } static void hci_le_ext_adv_term_evt(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_evt_le_ext_adv_set_term *ev = data; struct hci_conn *conn; struct adv_info *adv, *n; bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); /* The Bluetooth Core 5.3 specification clearly states that this event * shall not be sent when the Host disables the advertising set. So in * case of HCI_ERROR_CANCELLED_BY_HOST, just ignore the event. * * When the Host disables an advertising set, all cleanup is done via * its command callback and not needed to be duplicated here. */ if (ev->status == HCI_ERROR_CANCELLED_BY_HOST) { bt_dev_warn_ratelimited(hdev, "Unexpected advertising set terminated event"); return; } hci_dev_lock(hdev); adv = hci_find_adv_instance(hdev, ev->handle); if (ev->status) { if (!adv) goto unlock; /* Remove advertising as it has been terminated */ hci_remove_adv_instance(hdev, ev->handle); mgmt_advertising_removed(NULL, hdev, ev->handle); list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) { if (adv->enabled) goto unlock; } /* We are no longer advertising, clear HCI_LE_ADV */ hci_dev_clear_flag(hdev, HCI_LE_ADV); goto unlock; } if (adv) adv->enabled = false; conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->conn_handle)); if (conn) { /* Store handle in the connection so the correct advertising * instance can be re-enabled when disconnected. */ conn->adv_instance = ev->handle; if (hdev->adv_addr_type != ADDR_LE_DEV_RANDOM || bacmp(&conn->resp_addr, BDADDR_ANY)) goto unlock; if (!ev->handle) { bacpy(&conn->resp_addr, &hdev->random_addr); goto unlock; } if (adv) bacpy(&conn->resp_addr, &adv->random_addr); } unlock: hci_dev_unlock(hdev); } static void hci_le_conn_update_complete_evt(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_ev_le_conn_update_complete *ev = data; struct hci_conn *conn; bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); if (ev->status) return; hci_dev_lock(hdev); conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); if (conn) { conn->le_conn_interval = le16_to_cpu(ev->interval); conn->le_conn_latency = le16_to_cpu(ev->latency); conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout); } hci_dev_unlock(hdev); } /* This function requires the caller holds hdev->lock */ static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type, bool addr_resolved, u8 adv_type) { struct hci_conn *conn; struct hci_conn_params *params; /* If the event is not connectable don't proceed further */ if (adv_type != LE_ADV_IND && adv_type != LE_ADV_DIRECT_IND) return NULL; /* Ignore if the device is blocked or hdev is suspended */ if (hci_bdaddr_list_lookup(&hdev->reject_list, addr, addr_type) || hdev->suspended) return NULL; /* Most controller will fail if we try to create new connections * while we have an existing one in peripheral role. */ if (hdev->conn_hash.le_num_peripheral > 0 && (!test_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks) || !(hdev->le_states[3] & 0x10))) return NULL; /* If we're not connectable only connect devices that we have in * our pend_le_conns list. */ params = hci_pend_le_action_lookup(&hdev->pend_le_conns, addr, addr_type); if (!params) return NULL; if (!params->explicit_connect) { switch (params->auto_connect) { case HCI_AUTO_CONN_DIRECT: /* Only devices advertising with ADV_DIRECT_IND are * triggering a connection attempt. This is allowing * incoming connections from peripheral devices. */ if (adv_type != LE_ADV_DIRECT_IND) return NULL; break; case HCI_AUTO_CONN_ALWAYS: /* Devices advertising with ADV_IND or ADV_DIRECT_IND * are triggering a connection attempt. This means * that incoming connections from peripheral device are * accepted and also outgoing connections to peripheral * devices are established when found. */ break; default: return NULL; } } conn = hci_connect_le(hdev, addr, addr_type, addr_resolved, BT_SECURITY_LOW, hdev->def_le_autoconnect_timeout, HCI_ROLE_MASTER); if (!IS_ERR(conn)) { /* If HCI_AUTO_CONN_EXPLICIT is set, conn is already owned * by higher layer that tried to connect, if no then * store the pointer since we don't really have any * other owner of the object besides the params that * triggered it. This way we can abort the connection if * the parameters get removed and keep the reference * count consistent once the connection is established. */ if (!params->explicit_connect) params->conn = hci_conn_get(conn); return conn; } switch (PTR_ERR(conn)) { case -EBUSY: /* If hci_connect() returns -EBUSY it means there is already * an LE connection attempt going on. Since controllers don't * support more than one connection attempt at the time, we * don't consider this an error case. */ break; default: BT_DBG("Failed to connect: err %ld", PTR_ERR(conn)); return NULL; } return NULL; } static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr, u8 bdaddr_type, bdaddr_t *direct_addr, u8 direct_addr_type, s8 rssi, u8 *data, u8 len, bool ext_adv, bool ctl_time, u64 instant) { struct discovery_state *d = &hdev->discovery; struct smp_irk *irk; struct hci_conn *conn; bool match, bdaddr_resolved; u32 flags; u8 *ptr; switch (type) { case LE_ADV_IND: case LE_ADV_DIRECT_IND: case LE_ADV_SCAN_IND: case LE_ADV_NONCONN_IND: case LE_ADV_SCAN_RSP: break; default: bt_dev_err_ratelimited(hdev, "unknown advertising packet " "type: 0x%02x", type); return; } if (len > max_adv_len(hdev)) { bt_dev_err_ratelimited(hdev, "adv larger than maximum supported"); return; } /* Find the end of the data in case the report contains padded zero * bytes at the end causing an invalid length value. * * When data is NULL, len is 0 so there is no need for extra ptr * check as 'ptr < data + 0' is already false in such case. */ for (ptr = data; ptr < data + len && *ptr; ptr += *ptr + 1) { if (ptr + 1 + *ptr > data + len) break; } /* Adjust for actual length. This handles the case when remote * device is advertising with incorrect data length. */ len = ptr - data; /* If the direct address is present, then this report is from * a LE Direct Advertising Report event. In that case it is * important to see if the address is matching the local * controller address. */ if (!hci_dev_test_flag(hdev, HCI_MESH) && direct_addr) { direct_addr_type = ev_bdaddr_type(hdev, direct_addr_type, &bdaddr_resolved); /* Only resolvable random addresses are valid for these * kind of reports and others can be ignored. */ if (!hci_bdaddr_is_rpa(direct_addr, direct_addr_type)) return; /* If the controller is not using resolvable random * addresses, then this report can be ignored. */ if (!hci_dev_test_flag(hdev, HCI_PRIVACY)) return; /* If the local IRK of the controller does not match * with the resolvable random address provided, then * this report can be ignored. */ if (!smp_irk_matches(hdev, hdev->irk, direct_addr)) return; } /* Check if we need to convert to identity address */ irk = hci_get_irk(hdev, bdaddr, bdaddr_type); if (irk) { bdaddr = &irk->bdaddr; bdaddr_type = irk->addr_type; } bdaddr_type = ev_bdaddr_type(hdev, bdaddr_type, &bdaddr_resolved); /* Check if we have been requested to connect to this device. * * direct_addr is set only for directed advertising reports (it is NULL * for advertising reports) and is already verified to be RPA above. */ conn = check_pending_le_conn(hdev, bdaddr, bdaddr_type, bdaddr_resolved, type); if (!ext_adv && conn && type == LE_ADV_IND && len <= max_adv_len(hdev)) { /* Store report for later inclusion by * mgmt_device_connected */ memcpy(conn->le_adv_data, data, len); conn->le_adv_data_len = len; } if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND) flags = MGMT_DEV_FOUND_NOT_CONNECTABLE; else flags = 0; /* All scan results should be sent up for Mesh systems */ if (hci_dev_test_flag(hdev, HCI_MESH)) { mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL, rssi, flags, data, len, NULL, 0, instant); return; } /* Passive scanning shouldn't trigger any device found events, * except for devices marked as CONN_REPORT for which we do send * device found events, or advertisement monitoring requested. */ if (hdev->le_scan_type == LE_SCAN_PASSIVE) { if (type == LE_ADV_DIRECT_IND) return; if (!hci_pend_le_action_lookup(&hdev->pend_le_reports, bdaddr, bdaddr_type) && idr_is_empty(&hdev->adv_monitors_idr)) return; mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL, rssi, flags, data, len, NULL, 0, 0); return; } /* When receiving a scan response, then there is no way to * know if the remote device is connectable or not. However * since scan responses are merged with a previously seen * advertising report, the flags field from that report * will be used. * * In the unlikely case that a controller just sends a scan * response event that doesn't match the pending report, then * it is marked as a standalone SCAN_RSP. */ if (type == LE_ADV_SCAN_RSP) flags = MGMT_DEV_FOUND_SCAN_RSP; /* If there's nothing pending either store the data from this * event or send an immediate device found event if the data * should not be stored for later. */ if (!ext_adv && !has_pending_adv_report(hdev)) { /* If the report will trigger a SCAN_REQ store it for * later merging. */ if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) { store_pending_adv_report(hdev, bdaddr, bdaddr_type, rssi, flags, data, len); return; } mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL, rssi, flags, data, len, NULL, 0, 0); return; } /* Check if the pending report is for the same device as the new one */ match = (!bacmp(bdaddr, &d->last_adv_addr) && bdaddr_type == d->last_adv_addr_type); /* If the pending data doesn't match this report or this isn't a * scan response (e.g. we got a duplicate ADV_IND) then force * sending of the pending data. */ if (type != LE_ADV_SCAN_RSP || !match) { /* Send out whatever is in the cache, but skip duplicates */ if (!match) mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK, d->last_adv_addr_type, NULL, d->last_adv_rssi, d->last_adv_flags, d->last_adv_data, d->last_adv_data_len, NULL, 0, 0); /* If the new report will trigger a SCAN_REQ store it for * later merging. */ if (!ext_adv && (type == LE_ADV_IND || type == LE_ADV_SCAN_IND)) { store_pending_adv_report(hdev, bdaddr, bdaddr_type, rssi, flags, data, len); return; } /* The advertising reports cannot be merged, so clear * the pending report and send out a device found event. */ clear_pending_adv_report(hdev); mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL, rssi, flags, data, len, NULL, 0, 0); return; } /* If we get here we've got a pending ADV_IND or ADV_SCAN_IND and * the new event is a SCAN_RSP. We can therefore proceed with * sending a merged device found event. */ mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK, d->last_adv_addr_type, NULL, rssi, d->last_adv_flags, d->last_adv_data, d->last_adv_data_len, data, len, 0); clear_pending_adv_report(hdev); } static void hci_le_adv_report_evt(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_ev_le_advertising_report *ev = data; u64 instant = jiffies; if (!ev->num) return; hci_dev_lock(hdev); while (ev->num--) { struct hci_ev_le_advertising_info *info; s8 rssi; info = hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_ADVERTISING_REPORT, sizeof(*info)); if (!info) break; if (!hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_ADVERTISING_REPORT, info->length + 1)) break; if (info->length <= max_adv_len(hdev)) { rssi = info->data[info->length]; process_adv_report(hdev, info->type, &info->bdaddr, info->bdaddr_type, NULL, 0, rssi, info->data, info->length, false, false, instant); } else { bt_dev_err(hdev, "Dropping invalid advertising data"); } } hci_dev_unlock(hdev); } static u8 ext_evt_type_to_legacy(struct hci_dev *hdev, u16 evt_type) { if (evt_type & LE_EXT_ADV_LEGACY_PDU) { switch (evt_type) { case LE_LEGACY_ADV_IND: return LE_ADV_IND; case LE_LEGACY_ADV_DIRECT_IND: return LE_ADV_DIRECT_IND; case LE_LEGACY_ADV_SCAN_IND: return LE_ADV_SCAN_IND; case LE_LEGACY_NONCONN_IND: return LE_ADV_NONCONN_IND; case LE_LEGACY_SCAN_RSP_ADV: case LE_LEGACY_SCAN_RSP_ADV_SCAN: return LE_ADV_SCAN_RSP; } goto invalid; } if (evt_type & LE_EXT_ADV_CONN_IND) { if (evt_type & LE_EXT_ADV_DIRECT_IND) return LE_ADV_DIRECT_IND; return LE_ADV_IND; } if (evt_type & LE_EXT_ADV_SCAN_RSP) return LE_ADV_SCAN_RSP; if (evt_type & LE_EXT_ADV_SCAN_IND) return LE_ADV_SCAN_IND; if (evt_type == LE_EXT_ADV_NON_CONN_IND || evt_type & LE_EXT_ADV_DIRECT_IND) return LE_ADV_NONCONN_IND; invalid: bt_dev_err_ratelimited(hdev, "Unknown advertising packet type: 0x%02x", evt_type); return LE_ADV_INVALID; } static void hci_le_ext_adv_report_evt(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_ev_le_ext_adv_report *ev = data; u64 instant = jiffies; if (!ev->num) return; hci_dev_lock(hdev); while (ev->num--) { struct hci_ev_le_ext_adv_info *info; u8 legacy_evt_type; u16 evt_type; info = hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_EXT_ADV_REPORT, sizeof(*info)); if (!info) break; if (!hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_EXT_ADV_REPORT, info->length)) break; evt_type = __le16_to_cpu(info->type) & LE_EXT_ADV_EVT_TYPE_MASK; legacy_evt_type = ext_evt_type_to_legacy(hdev, evt_type); if (legacy_evt_type != LE_ADV_INVALID) { process_adv_report(hdev, legacy_evt_type, &info->bdaddr, info->bdaddr_type, NULL, 0, info->rssi, info->data, info->length, !(evt_type & LE_EXT_ADV_LEGACY_PDU), false, instant); } } hci_dev_unlock(hdev); } static int hci_le_pa_term_sync(struct hci_dev *hdev, __le16 handle) { struct hci_cp_le_pa_term_sync cp; memset(&cp, 0, sizeof(cp)); cp.handle = handle; return hci_send_cmd(hdev, HCI_OP_LE_PA_TERM_SYNC, sizeof(cp), &cp); } static void hci_le_pa_sync_estabilished_evt(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_ev_le_pa_sync_established *ev = data; int mask = hdev->link_mode; __u8 flags = 0; struct hci_conn *bis; bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); hci_dev_lock(hdev); hci_dev_clear_flag(hdev, HCI_PA_SYNC); mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ISO_LINK, &flags); if (!(mask & HCI_LM_ACCEPT)) { hci_le_pa_term_sync(hdev, ev->handle); goto unlock; } if (!(flags & HCI_PROTO_DEFER)) goto unlock; /* Add connection to indicate the PA sync event */ bis = hci_conn_add(hdev, ISO_LINK, BDADDR_ANY, HCI_ROLE_SLAVE); if (!bis) goto unlock; if (ev->status) set_bit(HCI_CONN_PA_SYNC_FAILED, &bis->flags); else set_bit(HCI_CONN_PA_SYNC, &bis->flags); /* Notify connection to iso layer */ hci_connect_cfm(bis, ev->status); unlock: hci_dev_unlock(hdev); } static void hci_le_per_adv_report_evt(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_ev_le_per_adv_report *ev = data; int mask = hdev->link_mode; __u8 flags = 0; bt_dev_dbg(hdev, "sync_handle 0x%4.4x", le16_to_cpu(ev->sync_handle)); hci_dev_lock(hdev); mask |= hci_proto_connect_ind(hdev, BDADDR_ANY, ISO_LINK, &flags); if (!(mask & HCI_LM_ACCEPT)) hci_le_pa_term_sync(hdev, ev->sync_handle); hci_dev_unlock(hdev); } static void hci_le_remote_feat_complete_evt(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_ev_le_remote_feat_complete *ev = data; struct hci_conn *conn; bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); hci_dev_lock(hdev); conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); if (conn) { if (!ev->status) memcpy(conn->features[0], ev->features, 8); if (conn->state == BT_CONFIG) { __u8 status; /* If the local controller supports peripheral-initiated * features exchange, but the remote controller does * not, then it is possible that the error code 0x1a * for unsupported remote feature gets returned. * * In this specific case, allow the connection to * transition into connected state and mark it as * successful. */ if (!conn->out && ev->status == 0x1a && (hdev->le_features[0] & HCI_LE_PERIPHERAL_FEATURES)) status = 0x00; else status = ev->status; conn->state = BT_CONNECTED; hci_connect_cfm(conn, status); hci_conn_drop(conn); } } hci_dev_unlock(hdev); } static void hci_le_ltk_request_evt(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_ev_le_ltk_req *ev = data; struct hci_cp_le_ltk_reply cp; struct hci_cp_le_ltk_neg_reply neg; struct hci_conn *conn; struct smp_ltk *ltk; bt_dev_dbg(hdev, "handle 0x%4.4x", __le16_to_cpu(ev->handle)); hci_dev_lock(hdev); conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); if (conn == NULL) goto not_found; ltk = hci_find_ltk(hdev, &conn->dst, conn->dst_type, conn->role); if (!ltk) goto not_found; if (smp_ltk_is_sc(ltk)) { /* With SC both EDiv and Rand are set to zero */ if (ev->ediv || ev->rand) goto not_found; } else { /* For non-SC keys check that EDiv and Rand match */ if (ev->ediv != ltk->ediv || ev->rand != ltk->rand) goto not_found; } memcpy(cp.ltk, ltk->val, ltk->enc_size); memset(cp.ltk + ltk->enc_size, 0, sizeof(cp.ltk) - ltk->enc_size); cp.handle = cpu_to_le16(conn->handle); conn->pending_sec_level = smp_ltk_sec_level(ltk); conn->enc_key_size = ltk->enc_size; hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp); /* Ref. Bluetooth Core SPEC pages 1975 and 2004. STK is a * temporary key used to encrypt a connection following * pairing. It is used during the Encrypted Session Setup to * distribute the keys. Later, security can be re-established * using a distributed LTK. */ if (ltk->type == SMP_STK) { set_bit(HCI_CONN_STK_ENCRYPT, &conn->flags); list_del_rcu(&ltk->list); kfree_rcu(ltk, rcu); } else { clear_bit(HCI_CONN_STK_ENCRYPT, &conn->flags); } hci_dev_unlock(hdev); return; not_found: neg.handle = ev->handle; hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg); hci_dev_unlock(hdev); } static void send_conn_param_neg_reply(struct hci_dev *hdev, u16 handle, u8 reason) { struct hci_cp_le_conn_param_req_neg_reply cp; cp.handle = cpu_to_le16(handle); cp.reason = reason; hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_NEG_REPLY, sizeof(cp), &cp); } static void hci_le_remote_conn_param_req_evt(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_ev_le_remote_conn_param_req *ev = data; struct hci_cp_le_conn_param_req_reply cp; struct hci_conn *hcon; u16 handle, min, max, latency, timeout; bt_dev_dbg(hdev, "handle 0x%4.4x", __le16_to_cpu(ev->handle)); handle = le16_to_cpu(ev->handle); min = le16_to_cpu(ev->interval_min); max = le16_to_cpu(ev->interval_max); latency = le16_to_cpu(ev->latency); timeout = le16_to_cpu(ev->timeout); hcon = hci_conn_hash_lookup_handle(hdev, handle); if (!hcon || hcon->state != BT_CONNECTED) return send_conn_param_neg_reply(hdev, handle, HCI_ERROR_UNKNOWN_CONN_ID); if (hci_check_conn_params(min, max, latency, timeout)) return send_conn_param_neg_reply(hdev, handle, HCI_ERROR_INVALID_LL_PARAMS); if (hcon->role == HCI_ROLE_MASTER) { struct hci_conn_params *params; u8 store_hint; hci_dev_lock(hdev); params = hci_conn_params_lookup(hdev, &hcon->dst, hcon->dst_type); if (params) { params->conn_min_interval = min; params->conn_max_interval = max; params->conn_latency = latency; params->supervision_timeout = timeout; store_hint = 0x01; } else { store_hint = 0x00; } hci_dev_unlock(hdev); mgmt_new_conn_param(hdev, &hcon->dst, hcon->dst_type, store_hint, min, max, latency, timeout); } cp.handle = ev->handle; cp.interval_min = ev->interval_min; cp.interval_max = ev->interval_max; cp.latency = ev->latency; cp.timeout = ev->timeout; cp.min_ce_len = 0; cp.max_ce_len = 0; hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_REPLY, sizeof(cp), &cp); } static void hci_le_direct_adv_report_evt(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_ev_le_direct_adv_report *ev = data; u64 instant = jiffies; int i; if (!hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_DIRECT_ADV_REPORT, flex_array_size(ev, info, ev->num))) return; if (!ev->num) return; hci_dev_lock(hdev); for (i = 0; i < ev->num; i++) { struct hci_ev_le_direct_adv_info *info = &ev->info[i]; process_adv_report(hdev, info->type, &info->bdaddr, info->bdaddr_type, &info->direct_addr, info->direct_addr_type, info->rssi, NULL, 0, false, false, instant); } hci_dev_unlock(hdev); } static void hci_le_phy_update_evt(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_ev_le_phy_update_complete *ev = data; struct hci_conn *conn; bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); if (ev->status) return; hci_dev_lock(hdev); conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); if (!conn) goto unlock; conn->le_tx_phy = ev->tx_phy; conn->le_rx_phy = ev->rx_phy; unlock: hci_dev_unlock(hdev); } static void hci_le_cis_estabilished_evt(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_evt_le_cis_established *ev = data; struct hci_conn *conn; struct bt_iso_qos *qos; bool pending = false; u16 handle = __le16_to_cpu(ev->handle); bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); hci_dev_lock(hdev); conn = hci_conn_hash_lookup_handle(hdev, handle); if (!conn) { bt_dev_err(hdev, "Unable to find connection with handle 0x%4.4x", handle); goto unlock; } if (conn->type != ISO_LINK) { bt_dev_err(hdev, "Invalid connection link type handle 0x%4.4x", handle); goto unlock; } qos = &conn->iso_qos; pending = test_and_clear_bit(HCI_CONN_CREATE_CIS, &conn->flags); /* Convert ISO Interval (1.25 ms slots) to SDU Interval (us) */ qos->ucast.in.interval = le16_to_cpu(ev->interval) * 1250; qos->ucast.out.interval = qos->ucast.in.interval; switch (conn->role) { case HCI_ROLE_SLAVE: /* Convert Transport Latency (us) to Latency (msec) */ qos->ucast.in.latency = DIV_ROUND_CLOSEST(get_unaligned_le24(ev->c_latency), 1000); qos->ucast.out.latency = DIV_ROUND_CLOSEST(get_unaligned_le24(ev->p_latency), 1000); qos->ucast.in.sdu = le16_to_cpu(ev->c_mtu); qos->ucast.out.sdu = le16_to_cpu(ev->p_mtu); qos->ucast.in.phy = ev->c_phy; qos->ucast.out.phy = ev->p_phy; break; case HCI_ROLE_MASTER: /* Convert Transport Latency (us) to Latency (msec) */ qos->ucast.out.latency = DIV_ROUND_CLOSEST(get_unaligned_le24(ev->c_latency), 1000); qos->ucast.in.latency = DIV_ROUND_CLOSEST(get_unaligned_le24(ev->p_latency), 1000); qos->ucast.out.sdu = le16_to_cpu(ev->c_mtu); qos->ucast.in.sdu = le16_to_cpu(ev->p_mtu); qos->ucast.out.phy = ev->c_phy; qos->ucast.in.phy = ev->p_phy; break; } if (!ev->status) { conn->state = BT_CONNECTED; hci_debugfs_create_conn(conn); hci_conn_add_sysfs(conn); hci_iso_setup_path(conn); goto unlock; } conn->state = BT_CLOSED; hci_connect_cfm(conn, ev->status); hci_conn_del(conn); unlock: if (pending) hci_le_create_cis_pending(hdev); hci_dev_unlock(hdev); } static void hci_le_reject_cis(struct hci_dev *hdev, __le16 handle) { struct hci_cp_le_reject_cis cp; memset(&cp, 0, sizeof(cp)); cp.handle = handle; cp.reason = HCI_ERROR_REJ_BAD_ADDR; hci_send_cmd(hdev, HCI_OP_LE_REJECT_CIS, sizeof(cp), &cp); } static void hci_le_accept_cis(struct hci_dev *hdev, __le16 handle) { struct hci_cp_le_accept_cis cp; memset(&cp, 0, sizeof(cp)); cp.handle = handle; hci_send_cmd(hdev, HCI_OP_LE_ACCEPT_CIS, sizeof(cp), &cp); } static void hci_le_cis_req_evt(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_evt_le_cis_req *ev = data; u16 acl_handle, cis_handle; struct hci_conn *acl, *cis; int mask; __u8 flags = 0; acl_handle = __le16_to_cpu(ev->acl_handle); cis_handle = __le16_to_cpu(ev->cis_handle); bt_dev_dbg(hdev, "acl 0x%4.4x handle 0x%4.4x cig 0x%2.2x cis 0x%2.2x", acl_handle, cis_handle, ev->cig_id, ev->cis_id); hci_dev_lock(hdev); acl = hci_conn_hash_lookup_handle(hdev, acl_handle); if (!acl) goto unlock; mask = hci_proto_connect_ind(hdev, &acl->dst, ISO_LINK, &flags); if (!(mask & HCI_LM_ACCEPT)) { hci_le_reject_cis(hdev, ev->cis_handle); goto unlock; } cis = hci_conn_hash_lookup_handle(hdev, cis_handle); if (!cis) { cis = hci_conn_add(hdev, ISO_LINK, &acl->dst, HCI_ROLE_SLAVE); if (!cis) { hci_le_reject_cis(hdev, ev->cis_handle); goto unlock; } cis->handle = cis_handle; } cis->iso_qos.ucast.cig = ev->cig_id; cis->iso_qos.ucast.cis = ev->cis_id; if (!(flags & HCI_PROTO_DEFER)) { hci_le_accept_cis(hdev, ev->cis_handle); } else { cis->state = BT_CONNECT2; hci_connect_cfm(cis, 0); } unlock: hci_dev_unlock(hdev); } static void hci_le_create_big_complete_evt(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_evt_le_create_big_complete *ev = data; struct hci_conn *conn; __u8 i = 0; BT_DBG("%s status 0x%2.2x", hdev->name, ev->status); if (!hci_le_ev_skb_pull(hdev, skb, HCI_EVT_LE_CREATE_BIG_COMPLETE, flex_array_size(ev, bis_handle, ev->num_bis))) return; hci_dev_lock(hdev); rcu_read_lock(); /* Connect all BISes that are bound to the BIG */ list_for_each_entry_rcu(conn, &hdev->conn_hash.list, list) { if (bacmp(&conn->dst, BDADDR_ANY) || conn->type != ISO_LINK || conn->iso_qos.bcast.big != ev->handle) continue; if (hci_conn_set_handle(conn, __le16_to_cpu(ev->bis_handle[i++]))) continue; if (!ev->status) { conn->state = BT_CONNECTED; set_bit(HCI_CONN_BIG_CREATED, &conn->flags); rcu_read_unlock(); hci_debugfs_create_conn(conn); hci_conn_add_sysfs(conn); hci_iso_setup_path(conn); rcu_read_lock(); continue; } hci_connect_cfm(conn, ev->status); rcu_read_unlock(); hci_conn_del(conn); rcu_read_lock(); } if (!ev->status && !i) /* If no BISes have been connected for the BIG, * terminate. This is in case all bound connections * have been closed before the BIG creation * has completed. */ hci_le_terminate_big_sync(hdev, ev->handle, HCI_ERROR_LOCAL_HOST_TERM); rcu_read_unlock(); hci_dev_unlock(hdev); } static void hci_le_big_sync_established_evt(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_evt_le_big_sync_estabilished *ev = data; struct hci_conn *bis; struct hci_conn *pa_sync; int i; bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); if (!hci_le_ev_skb_pull(hdev, skb, HCI_EVT_LE_BIG_SYNC_ESTABILISHED, flex_array_size(ev, bis, ev->num_bis))) return; hci_dev_lock(hdev); if (!ev->status) { pa_sync = hci_conn_hash_lookup_pa_sync(hdev, ev->handle); if (pa_sync) /* Also mark the BIG sync established event on the * associated PA sync hcon */ set_bit(HCI_CONN_BIG_SYNC, &pa_sync->flags); } for (i = 0; i < ev->num_bis; i++) { u16 handle = le16_to_cpu(ev->bis[i]); __le32 interval; bis = hci_conn_hash_lookup_handle(hdev, handle); if (!bis) { bis = hci_conn_add(hdev, ISO_LINK, BDADDR_ANY, HCI_ROLE_SLAVE); if (!bis) continue; bis->handle = handle; } if (ev->status != 0x42) /* Mark PA sync as established */ set_bit(HCI_CONN_PA_SYNC, &bis->flags); bis->iso_qos.bcast.big = ev->handle; memset(&interval, 0, sizeof(interval)); memcpy(&interval, ev->latency, sizeof(ev->latency)); bis->iso_qos.bcast.in.interval = le32_to_cpu(interval); /* Convert ISO Interval (1.25 ms slots) to latency (ms) */ bis->iso_qos.bcast.in.latency = le16_to_cpu(ev->interval) * 125 / 100; bis->iso_qos.bcast.in.sdu = le16_to_cpu(ev->max_pdu); if (!ev->status) { set_bit(HCI_CONN_BIG_SYNC, &bis->flags); hci_iso_setup_path(bis); } } /* In case BIG sync failed, notify each failed connection to * the user after all hci connections have been added */ if (ev->status) for (i = 0; i < ev->num_bis; i++) { u16 handle = le16_to_cpu(ev->bis[i]); bis = hci_conn_hash_lookup_handle(hdev, handle); set_bit(HCI_CONN_BIG_SYNC_FAILED, &bis->flags); hci_connect_cfm(bis, ev->status); } hci_dev_unlock(hdev); } static void hci_le_big_info_adv_report_evt(struct hci_dev *hdev, void *data, struct sk_buff *skb) { struct hci_evt_le_big_info_adv_report *ev = data; int mask = hdev->link_mode; __u8 flags = 0; bt_dev_dbg(hdev, "sync_handle 0x%4.4x", le16_to_cpu(ev->sync_handle)); hci_dev_lock(hdev); mask |= hci_proto_connect_ind(hdev, BDADDR_ANY, ISO_LINK, &flags); if (!(mask & HCI_LM_ACCEPT)) hci_le_pa_term_sync(hdev, ev->sync_handle); hci_dev_unlock(hdev); } #define HCI_LE_EV_VL(_op, _func, _min_len, _max_len) \ [_op] = { \ .func = _func, \ .min_len = _min_len, \ .max_len = _max_len, \ } #define HCI_LE_EV(_op, _func, _len) \ HCI_LE_EV_VL(_op, _func, _len, _len) #define HCI_LE_EV_STATUS(_op, _func) \ HCI_LE_EV(_op, _func, sizeof(struct hci_ev_status)) /* Entries in this table shall have their position according to the subevent * opcode they handle so the use of the macros above is recommend since it does * attempt to initialize at its proper index using Designated Initializers that * way events without a callback function can be ommited. */ static const struct hci_le_ev { void (*func)(struct hci_dev *hdev, void *data, struct sk_buff *skb); u16 min_len; u16 max_len; } hci_le_ev_table[U8_MAX + 1] = { /* [0x01 = HCI_EV_LE_CONN_COMPLETE] */ HCI_LE_EV(HCI_EV_LE_CONN_COMPLETE, hci_le_conn_complete_evt, sizeof(struct hci_ev_le_conn_complete)), /* [0x02 = HCI_EV_LE_ADVERTISING_REPORT] */ HCI_LE_EV_VL(HCI_EV_LE_ADVERTISING_REPORT, hci_le_adv_report_evt, sizeof(struct hci_ev_le_advertising_report), HCI_MAX_EVENT_SIZE), /* [0x03 = HCI_EV_LE_CONN_UPDATE_COMPLETE] */ HCI_LE_EV(HCI_EV_LE_CONN_UPDATE_COMPLETE, hci_le_conn_update_complete_evt, sizeof(struct hci_ev_le_conn_update_complete)), /* [0x04 = HCI_EV_LE_REMOTE_FEAT_COMPLETE] */ HCI_LE_EV(HCI_EV_LE_REMOTE_FEAT_COMPLETE, hci_le_remote_feat_complete_evt, sizeof(struct hci_ev_le_remote_feat_complete)), /* [0x05 = HCI_EV_LE_LTK_REQ] */ HCI_LE_EV(HCI_EV_LE_LTK_REQ, hci_le_ltk_request_evt, sizeof(struct hci_ev_le_ltk_req)), /* [0x06 = HCI_EV_LE_REMOTE_CONN_PARAM_REQ] */ HCI_LE_EV(HCI_EV_LE_REMOTE_CONN_PARAM_REQ, hci_le_remote_conn_param_req_evt, sizeof(struct hci_ev_le_remote_conn_param_req)), /* [0x0a = HCI_EV_LE_ENHANCED_CONN_COMPLETE] */ HCI_LE_EV(HCI_EV_LE_ENHANCED_CONN_COMPLETE, hci_le_enh_conn_complete_evt, sizeof(struct hci_ev_le_enh_conn_complete)), /* [0x0b = HCI_EV_LE_DIRECT_ADV_REPORT] */ HCI_LE_EV_VL(HCI_EV_LE_DIRECT_ADV_REPORT, hci_le_direct_adv_report_evt, sizeof(struct hci_ev_le_direct_adv_report), HCI_MAX_EVENT_SIZE), /* [0x0c = HCI_EV_LE_PHY_UPDATE_COMPLETE] */ HCI_LE_EV(HCI_EV_LE_PHY_UPDATE_COMPLETE, hci_le_phy_update_evt, sizeof(struct hci_ev_le_phy_update_complete)), /* [0x0d = HCI_EV_LE_EXT_ADV_REPORT] */ HCI_LE_EV_VL(HCI_EV_LE_EXT_ADV_REPORT, hci_le_ext_adv_report_evt, sizeof(struct hci_ev_le_ext_adv_report), HCI_MAX_EVENT_SIZE), /* [0x0e = HCI_EV_LE_PA_SYNC_ESTABLISHED] */ HCI_LE_EV(HCI_EV_LE_PA_SYNC_ESTABLISHED, hci_le_pa_sync_estabilished_evt, sizeof(struct hci_ev_le_pa_sync_established)), /* [0x0f = HCI_EV_LE_PER_ADV_REPORT] */ HCI_LE_EV_VL(HCI_EV_LE_PER_ADV_REPORT, hci_le_per_adv_report_evt, sizeof(struct hci_ev_le_per_adv_report), HCI_MAX_EVENT_SIZE), /* [0x12 = HCI_EV_LE_EXT_ADV_SET_TERM] */ HCI_LE_EV(HCI_EV_LE_EXT_ADV_SET_TERM, hci_le_ext_adv_term_evt, sizeof(struct hci_evt_le_ext_adv_set_term)), /* [0x19 = HCI_EVT_LE_CIS_ESTABLISHED] */ HCI_LE_EV(HCI_EVT_LE_CIS_ESTABLISHED, hci_le_cis_estabilished_evt, sizeof(struct hci_evt_le_cis_established)), /* [0x1a = HCI_EVT_LE_CIS_REQ] */ HCI_LE_EV(HCI_EVT_LE_CIS_REQ, hci_le_cis_req_evt, sizeof(struct hci_evt_le_cis_req)), /* [0x1b = HCI_EVT_LE_CREATE_BIG_COMPLETE] */ HCI_LE_EV_VL(HCI_EVT_LE_CREATE_BIG_COMPLETE, hci_le_create_big_complete_evt, sizeof(struct hci_evt_le_create_big_complete), HCI_MAX_EVENT_SIZE), /* [0x1d = HCI_EV_LE_BIG_SYNC_ESTABILISHED] */ HCI_LE_EV_VL(HCI_EVT_LE_BIG_SYNC_ESTABILISHED, hci_le_big_sync_established_evt, sizeof(struct hci_evt_le_big_sync_estabilished), HCI_MAX_EVENT_SIZE), /* [0x22 = HCI_EVT_LE_BIG_INFO_ADV_REPORT] */ HCI_LE_EV_VL(HCI_EVT_LE_BIG_INFO_ADV_REPORT, hci_le_big_info_adv_report_evt, sizeof(struct hci_evt_le_big_info_adv_report), HCI_MAX_EVENT_SIZE), }; static void hci_le_meta_evt(struct hci_dev *hdev, void *data, struct sk_buff *skb, u16 *opcode, u8 *status, hci_req_complete_t *req_complete, hci_req_complete_skb_t *req_complete_skb) { struct hci_ev_le_meta *ev = data; const struct hci_le_ev *subev; bt_dev_dbg(hdev, "subevent 0x%2.2x", ev->subevent); /* Only match event if command OGF is for LE */ if (hdev->sent_cmd && hci_opcode_ogf(hci_skb_opcode(hdev->sent_cmd)) == 0x08 && hci_skb_event(hdev->sent_cmd) == ev->subevent) { *opcode = hci_skb_opcode(hdev->sent_cmd); hci_req_cmd_complete(hdev, *opcode, 0x00, req_complete, req_complete_skb); } subev = &hci_le_ev_table[ev->subevent]; if (!subev->func) return; if (skb->len < subev->min_len) { bt_dev_err(hdev, "unexpected subevent 0x%2.2x length: %u < %u", ev->subevent, skb->len, subev->min_len); return; } /* Just warn if the length is over max_len size it still be * possible to partially parse the event so leave to callback to * decide if that is acceptable. */ if (skb->len > subev->max_len) bt_dev_warn(hdev, "unexpected subevent 0x%2.2x length: %u > %u", ev->subevent, skb->len, subev->max_len); data = hci_le_ev_skb_pull(hdev, skb, ev->subevent, subev->min_len); if (!data) return; subev->func(hdev, data, skb); } static bool hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 event, struct sk_buff *skb) { struct hci_ev_cmd_complete *ev; struct hci_event_hdr *hdr; if (!skb) return false; hdr = hci_ev_skb_pull(hdev, skb, event, sizeof(*hdr)); if (!hdr) return false; if (event) { if (hdr->evt != event) return false; return true; } /* Check if request ended in Command Status - no way to retrieve * any extra parameters in this case. */ if (hdr->evt == HCI_EV_CMD_STATUS) return false; if (hdr->evt != HCI_EV_CMD_COMPLETE) { bt_dev_err(hdev, "last event is not cmd complete (0x%2.2x)", hdr->evt); return false; } ev = hci_cc_skb_pull(hdev, skb, opcode, sizeof(*ev)); if (!ev) return false; if (opcode != __le16_to_cpu(ev->opcode)) { BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode, __le16_to_cpu(ev->opcode)); return false; } return true; } static void hci_store_wake_reason(struct hci_dev *hdev, u8 event, struct sk_buff *skb) { struct hci_ev_le_advertising_info *adv; struct hci_ev_le_direct_adv_info *direct_adv; struct hci_ev_le_ext_adv_info *ext_adv; const struct hci_ev_conn_complete *conn_complete = (void *)skb->data; const struct hci_ev_conn_request *conn_request = (void *)skb->data; hci_dev_lock(hdev); /* If we are currently suspended and this is the first BT event seen, * save the wake reason associated with the event. */ if (!hdev->suspended || hdev->wake_reason) goto unlock; /* Default to remote wake. Values for wake_reason are documented in the * Bluez mgmt api docs. */ hdev->wake_reason = MGMT_WAKE_REASON_REMOTE_WAKE; /* Once configured for remote wakeup, we should only wake up for * reconnections. It's useful to see which device is waking us up so * keep track of the bdaddr of the connection event that woke us up. */ if (event == HCI_EV_CONN_REQUEST) { bacpy(&hdev->wake_addr, &conn_complete->bdaddr); hdev->wake_addr_type = BDADDR_BREDR; } else if (event == HCI_EV_CONN_COMPLETE) { bacpy(&hdev->wake_addr, &conn_request->bdaddr); hdev->wake_addr_type = BDADDR_BREDR; } else if (event == HCI_EV_LE_META) { struct hci_ev_le_meta *le_ev = (void *)skb->data; u8 subevent = le_ev->subevent; u8 *ptr = &skb->data[sizeof(*le_ev)]; u8 num_reports = *ptr; if ((subevent == HCI_EV_LE_ADVERTISING_REPORT || subevent == HCI_EV_LE_DIRECT_ADV_REPORT || subevent == HCI_EV_LE_EXT_ADV_REPORT) && num_reports) { adv = (void *)(ptr + 1); direct_adv = (void *)(ptr + 1); ext_adv = (void *)(ptr + 1); switch (subevent) { case HCI_EV_LE_ADVERTISING_REPORT: bacpy(&hdev->wake_addr, &adv->bdaddr); hdev->wake_addr_type = adv->bdaddr_type; break; case HCI_EV_LE_DIRECT_ADV_REPORT: bacpy(&hdev->wake_addr, &direct_adv->bdaddr); hdev->wake_addr_type = direct_adv->bdaddr_type; break; case HCI_EV_LE_EXT_ADV_REPORT: bacpy(&hdev->wake_addr, &ext_adv->bdaddr); hdev->wake_addr_type = ext_adv->bdaddr_type; break; } } } else { hdev->wake_reason = MGMT_WAKE_REASON_UNEXPECTED; } unlock: hci_dev_unlock(hdev); } #define HCI_EV_VL(_op, _func, _min_len, _max_len) \ [_op] = { \ .req = false, \ .func = _func, \ .min_len = _min_len, \ .max_len = _max_len, \ } #define HCI_EV(_op, _func, _len) \ HCI_EV_VL(_op, _func, _len, _len) #define HCI_EV_STATUS(_op, _func) \ HCI_EV(_op, _func, sizeof(struct hci_ev_status)) #define HCI_EV_REQ_VL(_op, _func, _min_len, _max_len) \ [_op] = { \ .req = true, \ .func_req = _func, \ .min_len = _min_len, \ .max_len = _max_len, \ } #define HCI_EV_REQ(_op, _func, _len) \ HCI_EV_REQ_VL(_op, _func, _len, _len) /* Entries in this table shall have their position according to the event opcode * they handle so the use of the macros above is recommend since it does attempt * to initialize at its proper index using Designated Initializers that way * events without a callback function don't have entered. */ static const struct hci_ev { bool req; union { void (*func)(struct hci_dev *hdev, void *data, struct sk_buff *skb); void (*func_req)(struct hci_dev *hdev, void *data, struct sk_buff *skb, u16 *opcode, u8 *status, hci_req_complete_t *req_complete, hci_req_complete_skb_t *req_complete_skb); }; u16 min_len; u16 max_len; } hci_ev_table[U8_MAX + 1] = { /* [0x01 = HCI_EV_INQUIRY_COMPLETE] */ HCI_EV_STATUS(HCI_EV_INQUIRY_COMPLETE, hci_inquiry_complete_evt), /* [0x02 = HCI_EV_INQUIRY_RESULT] */ HCI_EV_VL(HCI_EV_INQUIRY_RESULT, hci_inquiry_result_evt, sizeof(struct hci_ev_inquiry_result), HCI_MAX_EVENT_SIZE), /* [0x03 = HCI_EV_CONN_COMPLETE] */ HCI_EV(HCI_EV_CONN_COMPLETE, hci_conn_complete_evt, sizeof(struct hci_ev_conn_complete)), /* [0x04 = HCI_EV_CONN_REQUEST] */ HCI_EV(HCI_EV_CONN_REQUEST, hci_conn_request_evt, sizeof(struct hci_ev_conn_request)), /* [0x05 = HCI_EV_DISCONN_COMPLETE] */ HCI_EV(HCI_EV_DISCONN_COMPLETE, hci_disconn_complete_evt, sizeof(struct hci_ev_disconn_complete)), /* [0x06 = HCI_EV_AUTH_COMPLETE] */ HCI_EV(HCI_EV_AUTH_COMPLETE, hci_auth_complete_evt, sizeof(struct hci_ev_auth_complete)), /* [0x07 = HCI_EV_REMOTE_NAME] */ HCI_EV(HCI_EV_REMOTE_NAME, hci_remote_name_evt, sizeof(struct hci_ev_remote_name)), /* [0x08 = HCI_EV_ENCRYPT_CHANGE] */ HCI_EV(HCI_EV_ENCRYPT_CHANGE, hci_encrypt_change_evt, sizeof(struct hci_ev_encrypt_change)), /* [0x09 = HCI_EV_CHANGE_LINK_KEY_COMPLETE] */ HCI_EV(HCI_EV_CHANGE_LINK_KEY_COMPLETE, hci_change_link_key_complete_evt, sizeof(struct hci_ev_change_link_key_complete)), /* [0x0b = HCI_EV_REMOTE_FEATURES] */ HCI_EV(HCI_EV_REMOTE_FEATURES, hci_remote_features_evt, sizeof(struct hci_ev_remote_features)), /* [0x0e = HCI_EV_CMD_COMPLETE] */ HCI_EV_REQ_VL(HCI_EV_CMD_COMPLETE, hci_cmd_complete_evt, sizeof(struct hci_ev_cmd_complete), HCI_MAX_EVENT_SIZE), /* [0x0f = HCI_EV_CMD_STATUS] */ HCI_EV_REQ(HCI_EV_CMD_STATUS, hci_cmd_status_evt, sizeof(struct hci_ev_cmd_status)), /* [0x10 = HCI_EV_CMD_STATUS] */ HCI_EV(HCI_EV_HARDWARE_ERROR, hci_hardware_error_evt, sizeof(struct hci_ev_hardware_error)), /* [0x12 = HCI_EV_ROLE_CHANGE] */ HCI_EV(HCI_EV_ROLE_CHANGE, hci_role_change_evt, sizeof(struct hci_ev_role_change)), /* [0x13 = HCI_EV_NUM_COMP_PKTS] */ HCI_EV_VL(HCI_EV_NUM_COMP_PKTS, hci_num_comp_pkts_evt, sizeof(struct hci_ev_num_comp_pkts), HCI_MAX_EVENT_SIZE), /* [0x14 = HCI_EV_MODE_CHANGE] */ HCI_EV(HCI_EV_MODE_CHANGE, hci_mode_change_evt, sizeof(struct hci_ev_mode_change)), /* [0x16 = HCI_EV_PIN_CODE_REQ] */ HCI_EV(HCI_EV_PIN_CODE_REQ, hci_pin_code_request_evt, sizeof(struct hci_ev_pin_code_req)), /* [0x17 = HCI_EV_LINK_KEY_REQ] */ HCI_EV(HCI_EV_LINK_KEY_REQ, hci_link_key_request_evt, sizeof(struct hci_ev_link_key_req)), /* [0x18 = HCI_EV_LINK_KEY_NOTIFY] */ HCI_EV(HCI_EV_LINK_KEY_NOTIFY, hci_link_key_notify_evt, sizeof(struct hci_ev_link_key_notify)), /* [0x1c = HCI_EV_CLOCK_OFFSET] */ HCI_EV(HCI_EV_CLOCK_OFFSET, hci_clock_offset_evt, sizeof(struct hci_ev_clock_offset)), /* [0x1d = HCI_EV_PKT_TYPE_CHANGE] */ HCI_EV(HCI_EV_PKT_TYPE_CHANGE, hci_pkt_type_change_evt, sizeof(struct hci_ev_pkt_type_change)), /* [0x20 = HCI_EV_PSCAN_REP_MODE] */ HCI_EV(HCI_EV_PSCAN_REP_MODE, hci_pscan_rep_mode_evt, sizeof(struct hci_ev_pscan_rep_mode)), /* [0x22 = HCI_EV_INQUIRY_RESULT_WITH_RSSI] */ HCI_EV_VL(HCI_EV_INQUIRY_RESULT_WITH_RSSI, hci_inquiry_result_with_rssi_evt, sizeof(struct hci_ev_inquiry_result_rssi), HCI_MAX_EVENT_SIZE), /* [0x23 = HCI_EV_REMOTE_EXT_FEATURES] */ HCI_EV(HCI_EV_REMOTE_EXT_FEATURES, hci_remote_ext_features_evt, sizeof(struct hci_ev_remote_ext_features)), /* [0x2c = HCI_EV_SYNC_CONN_COMPLETE] */ HCI_EV(HCI_EV_SYNC_CONN_COMPLETE, hci_sync_conn_complete_evt, sizeof(struct hci_ev_sync_conn_complete)), /* [0x2d = HCI_EV_EXTENDED_INQUIRY_RESULT] */ HCI_EV_VL(HCI_EV_EXTENDED_INQUIRY_RESULT, hci_extended_inquiry_result_evt, sizeof(struct hci_ev_ext_inquiry_result), HCI_MAX_EVENT_SIZE), /* [0x30 = HCI_EV_KEY_REFRESH_COMPLETE] */ HCI_EV(HCI_EV_KEY_REFRESH_COMPLETE, hci_key_refresh_complete_evt, sizeof(struct hci_ev_key_refresh_complete)), /* [0x31 = HCI_EV_IO_CAPA_REQUEST] */ HCI_EV(HCI_EV_IO_CAPA_REQUEST, hci_io_capa_request_evt, sizeof(struct hci_ev_io_capa_request)), /* [0x32 = HCI_EV_IO_CAPA_REPLY] */ HCI_EV(HCI_EV_IO_CAPA_REPLY, hci_io_capa_reply_evt, sizeof(struct hci_ev_io_capa_reply)), /* [0x33 = HCI_EV_USER_CONFIRM_REQUEST] */ HCI_EV(HCI_EV_USER_CONFIRM_REQUEST, hci_user_confirm_request_evt, sizeof(struct hci_ev_user_confirm_req)), /* [0x34 = HCI_EV_USER_PASSKEY_REQUEST] */ HCI_EV(HCI_EV_USER_PASSKEY_REQUEST, hci_user_passkey_request_evt, sizeof(struct hci_ev_user_passkey_req)), /* [0x35 = HCI_EV_REMOTE_OOB_DATA_REQUEST] */ HCI_EV(HCI_EV_REMOTE_OOB_DATA_REQUEST, hci_remote_oob_data_request_evt, sizeof(struct hci_ev_remote_oob_data_request)), /* [0x36 = HCI_EV_SIMPLE_PAIR_COMPLETE] */ HCI_EV(HCI_EV_SIMPLE_PAIR_COMPLETE, hci_simple_pair_complete_evt, sizeof(struct hci_ev_simple_pair_complete)), /* [0x3b = HCI_EV_USER_PASSKEY_NOTIFY] */ HCI_EV(HCI_EV_USER_PASSKEY_NOTIFY, hci_user_passkey_notify_evt, sizeof(struct hci_ev_user_passkey_notify)), /* [0x3c = HCI_EV_KEYPRESS_NOTIFY] */ HCI_EV(HCI_EV_KEYPRESS_NOTIFY, hci_keypress_notify_evt, sizeof(struct hci_ev_keypress_notify)), /* [0x3d = HCI_EV_REMOTE_HOST_FEATURES] */ HCI_EV(HCI_EV_REMOTE_HOST_FEATURES, hci_remote_host_features_evt, sizeof(struct hci_ev_remote_host_features)), /* [0x3e = HCI_EV_LE_META] */ HCI_EV_REQ_VL(HCI_EV_LE_META, hci_le_meta_evt, sizeof(struct hci_ev_le_meta), HCI_MAX_EVENT_SIZE), #if IS_ENABLED(CONFIG_BT_HS) /* [0x40 = HCI_EV_PHY_LINK_COMPLETE] */ HCI_EV(HCI_EV_PHY_LINK_COMPLETE, hci_phy_link_complete_evt, sizeof(struct hci_ev_phy_link_complete)), /* [0x41 = HCI_EV_CHANNEL_SELECTED] */ HCI_EV(HCI_EV_CHANNEL_SELECTED, hci_chan_selected_evt, sizeof(struct hci_ev_channel_selected)), /* [0x42 = HCI_EV_DISCONN_PHY_LINK_COMPLETE] */ HCI_EV(HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE, hci_disconn_loglink_complete_evt, sizeof(struct hci_ev_disconn_logical_link_complete)), /* [0x45 = HCI_EV_LOGICAL_LINK_COMPLETE] */ HCI_EV(HCI_EV_LOGICAL_LINK_COMPLETE, hci_loglink_complete_evt, sizeof(struct hci_ev_logical_link_complete)), /* [0x46 = HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE] */ HCI_EV(HCI_EV_DISCONN_PHY_LINK_COMPLETE, hci_disconn_phylink_complete_evt, sizeof(struct hci_ev_disconn_phy_link_complete)), #endif /* [0x48 = HCI_EV_NUM_COMP_BLOCKS] */ HCI_EV(HCI_EV_NUM_COMP_BLOCKS, hci_num_comp_blocks_evt, sizeof(struct hci_ev_num_comp_blocks)), /* [0xff = HCI_EV_VENDOR] */ HCI_EV_VL(HCI_EV_VENDOR, msft_vendor_evt, 0, HCI_MAX_EVENT_SIZE), }; static void hci_event_func(struct hci_dev *hdev, u8 event, struct sk_buff *skb, u16 *opcode, u8 *status, hci_req_complete_t *req_complete, hci_req_complete_skb_t *req_complete_skb) { const struct hci_ev *ev = &hci_ev_table[event]; void *data; if (!ev->func) return; if (skb->len < ev->min_len) { bt_dev_err(hdev, "unexpected event 0x%2.2x length: %u < %u", event, skb->len, ev->min_len); return; } /* Just warn if the length is over max_len size it still be * possible to partially parse the event so leave to callback to * decide if that is acceptable. */ if (skb->len > ev->max_len) bt_dev_warn_ratelimited(hdev, "unexpected event 0x%2.2x length: %u > %u", event, skb->len, ev->max_len); data = hci_ev_skb_pull(hdev, skb, event, ev->min_len); if (!data) return; if (ev->req) ev->func_req(hdev, data, skb, opcode, status, req_complete, req_complete_skb); else ev->func(hdev, data, skb); } void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb) { struct hci_event_hdr *hdr = (void *) skb->data; hci_req_complete_t req_complete = NULL; hci_req_complete_skb_t req_complete_skb = NULL; struct sk_buff *orig_skb = NULL; u8 status = 0, event, req_evt = 0; u16 opcode = HCI_OP_NOP; if (skb->len < sizeof(*hdr)) { bt_dev_err(hdev, "Malformed HCI Event"); goto done; } kfree_skb(hdev->recv_event); hdev->recv_event = skb_clone(skb, GFP_KERNEL); event = hdr->evt; if (!event) { bt_dev_warn(hdev, "Received unexpected HCI Event 0x%2.2x", event); goto done; } /* Only match event if command OGF is not for LE */ if (hdev->sent_cmd && hci_opcode_ogf(hci_skb_opcode(hdev->sent_cmd)) != 0x08 && hci_skb_event(hdev->sent_cmd) == event) { hci_req_cmd_complete(hdev, hci_skb_opcode(hdev->sent_cmd), status, &req_complete, &req_complete_skb); req_evt = event; } /* If it looks like we might end up having to call * req_complete_skb, store a pristine copy of the skb since the * various handlers may modify the original one through * skb_pull() calls, etc. */ if (req_complete_skb || event == HCI_EV_CMD_STATUS || event == HCI_EV_CMD_COMPLETE) orig_skb = skb_clone(skb, GFP_KERNEL); skb_pull(skb, HCI_EVENT_HDR_SIZE); /* Store wake reason if we're suspended */ hci_store_wake_reason(hdev, event, skb); bt_dev_dbg(hdev, "event 0x%2.2x", event); hci_event_func(hdev, event, skb, &opcode, &status, &req_complete, &req_complete_skb); if (req_complete) { req_complete(hdev, status, opcode); } else if (req_complete_skb) { if (!hci_get_cmd_complete(hdev, opcode, req_evt, orig_skb)) { kfree_skb(orig_skb); orig_skb = NULL; } req_complete_skb(hdev, status, opcode, orig_skb); } done: kfree_skb(orig_skb); kfree_skb(skb); hdev->stat.evt_rx++; }
linux-master
net/bluetooth/hci_event.c
/* BlueZ - Bluetooth protocol stack for Linux Copyright (C) 2015 Intel Corporation This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License version 2 as published by the Free Software Foundation; THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS SOFTWARE IS DISCLAIMED. */ #include <asm/unaligned.h> #include <net/bluetooth/bluetooth.h> #include <net/bluetooth/hci_core.h> #include <net/bluetooth/hci_mon.h> #include <net/bluetooth/mgmt.h> #include "mgmt_util.h" static struct sk_buff *create_monitor_ctrl_event(__le16 index, u32 cookie, u16 opcode, u16 len, void *buf) { struct hci_mon_hdr *hdr; struct sk_buff *skb; skb = bt_skb_alloc(6 + len, GFP_ATOMIC); if (!skb) return NULL; put_unaligned_le32(cookie, skb_put(skb, 4)); put_unaligned_le16(opcode, skb_put(skb, 2)); if (buf) skb_put_data(skb, buf, len); __net_timestamp(skb); hdr = skb_push(skb, HCI_MON_HDR_SIZE); hdr->opcode = cpu_to_le16(HCI_MON_CTRL_EVENT); hdr->index = index; hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE); return skb; } struct sk_buff *mgmt_alloc_skb(struct hci_dev *hdev, u16 opcode, unsigned int size) { struct sk_buff *skb; skb = alloc_skb(sizeof(struct mgmt_hdr) + size, GFP_KERNEL); if (!skb) return skb; skb_reserve(skb, sizeof(struct mgmt_hdr)); bt_cb(skb)->mgmt.hdev = hdev; bt_cb(skb)->mgmt.opcode = opcode; return skb; } int mgmt_send_event_skb(unsigned short channel, struct sk_buff *skb, int flag, struct sock *skip_sk) { struct hci_dev *hdev; struct mgmt_hdr *hdr; int len; if (!skb) return -EINVAL; len = skb->len; hdev = bt_cb(skb)->mgmt.hdev; /* Time stamp */ __net_timestamp(skb); /* Send just the data, without headers, to the monitor */ if (channel == HCI_CHANNEL_CONTROL) hci_send_monitor_ctrl_event(hdev, bt_cb(skb)->mgmt.opcode, skb->data, skb->len, skb_get_ktime(skb), flag, skip_sk); hdr = skb_push(skb, sizeof(*hdr)); hdr->opcode = cpu_to_le16(bt_cb(skb)->mgmt.opcode); if (hdev) hdr->index = cpu_to_le16(hdev->id); else hdr->index = cpu_to_le16(MGMT_INDEX_NONE); hdr->len = cpu_to_le16(len); hci_send_to_channel(channel, skb, flag, skip_sk); kfree_skb(skb); return 0; } int mgmt_send_event(u16 event, struct hci_dev *hdev, unsigned short channel, void *data, u16 data_len, int flag, struct sock *skip_sk) { struct sk_buff *skb; skb = mgmt_alloc_skb(hdev, event, data_len); if (!skb) return -ENOMEM; if (data) skb_put_data(skb, data, data_len); return mgmt_send_event_skb(channel, skb, flag, skip_sk); } int mgmt_cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status) { struct sk_buff *skb, *mskb; struct mgmt_hdr *hdr; struct mgmt_ev_cmd_status *ev; int err; BT_DBG("sock %p, index %u, cmd %u, status %u", sk, index, cmd, status); skb = alloc_skb(sizeof(*hdr) + sizeof(*ev), GFP_KERNEL); if (!skb) return -ENOMEM; hdr = skb_put(skb, sizeof(*hdr)); hdr->opcode = cpu_to_le16(MGMT_EV_CMD_STATUS); hdr->index = cpu_to_le16(index); hdr->len = cpu_to_le16(sizeof(*ev)); ev = skb_put(skb, sizeof(*ev)); ev->status = status; ev->opcode = cpu_to_le16(cmd); mskb = create_monitor_ctrl_event(hdr->index, hci_sock_get_cookie(sk), MGMT_EV_CMD_STATUS, sizeof(*ev), ev); if (mskb) skb->tstamp = mskb->tstamp; else __net_timestamp(skb); err = sock_queue_rcv_skb(sk, skb); if (err < 0) kfree_skb(skb); if (mskb) { hci_send_to_channel(HCI_CHANNEL_MONITOR, mskb, HCI_SOCK_TRUSTED, NULL); kfree_skb(mskb); } return err; } int mgmt_cmd_complete(struct sock *sk, u16 index, u16 cmd, u8 status, void *rp, size_t rp_len) { struct sk_buff *skb, *mskb; struct mgmt_hdr *hdr; struct mgmt_ev_cmd_complete *ev; int err; BT_DBG("sock %p", sk); skb = alloc_skb(sizeof(*hdr) + sizeof(*ev) + rp_len, GFP_KERNEL); if (!skb) return -ENOMEM; hdr = skb_put(skb, sizeof(*hdr)); hdr->opcode = cpu_to_le16(MGMT_EV_CMD_COMPLETE); hdr->index = cpu_to_le16(index); hdr->len = cpu_to_le16(sizeof(*ev) + rp_len); ev = skb_put(skb, sizeof(*ev) + rp_len); ev->opcode = cpu_to_le16(cmd); ev->status = status; if (rp) memcpy(ev->data, rp, rp_len); mskb = create_monitor_ctrl_event(hdr->index, hci_sock_get_cookie(sk), MGMT_EV_CMD_COMPLETE, sizeof(*ev) + rp_len, ev); if (mskb) skb->tstamp = mskb->tstamp; else __net_timestamp(skb); err = sock_queue_rcv_skb(sk, skb); if (err < 0) kfree_skb(skb); if (mskb) { hci_send_to_channel(HCI_CHANNEL_MONITOR, mskb, HCI_SOCK_TRUSTED, NULL); kfree_skb(mskb); } return err; } struct mgmt_pending_cmd *mgmt_pending_find(unsigned short channel, u16 opcode, struct hci_dev *hdev) { struct mgmt_pending_cmd *cmd; list_for_each_entry(cmd, &hdev->mgmt_pending, list) { if (hci_sock_get_channel(cmd->sk) != channel) continue; if (cmd->opcode == opcode) return cmd; } return NULL; } struct mgmt_pending_cmd *mgmt_pending_find_data(unsigned short channel, u16 opcode, struct hci_dev *hdev, const void *data) { struct mgmt_pending_cmd *cmd; list_for_each_entry(cmd, &hdev->mgmt_pending, list) { if (cmd->user_data != data) continue; if (cmd->opcode == opcode) return cmd; } return NULL; } void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev, void (*cb)(struct mgmt_pending_cmd *cmd, void *data), void *data) { struct mgmt_pending_cmd *cmd, *tmp; list_for_each_entry_safe(cmd, tmp, &hdev->mgmt_pending, list) { if (opcode > 0 && cmd->opcode != opcode) continue; cb(cmd, data); } } struct mgmt_pending_cmd *mgmt_pending_new(struct sock *sk, u16 opcode, struct hci_dev *hdev, void *data, u16 len) { struct mgmt_pending_cmd *cmd; cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); if (!cmd) return NULL; cmd->opcode = opcode; cmd->index = hdev->id; cmd->param = kmemdup(data, len, GFP_KERNEL); if (!cmd->param) { kfree(cmd); return NULL; } cmd->param_len = len; cmd->sk = sk; sock_hold(sk); return cmd; } struct mgmt_pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode, struct hci_dev *hdev, void *data, u16 len) { struct mgmt_pending_cmd *cmd; cmd = mgmt_pending_new(sk, opcode, hdev, data, len); if (!cmd) return NULL; list_add_tail(&cmd->list, &hdev->mgmt_pending); return cmd; } void mgmt_pending_free(struct mgmt_pending_cmd *cmd) { sock_put(cmd->sk); kfree(cmd->param); kfree(cmd); } void mgmt_pending_remove(struct mgmt_pending_cmd *cmd) { list_del(&cmd->list); mgmt_pending_free(cmd); } void mgmt_mesh_foreach(struct hci_dev *hdev, void (*cb)(struct mgmt_mesh_tx *mesh_tx, void *data), void *data, struct sock *sk) { struct mgmt_mesh_tx *mesh_tx, *tmp; list_for_each_entry_safe(mesh_tx, tmp, &hdev->mgmt_pending, list) { if (!sk || mesh_tx->sk == sk) cb(mesh_tx, data); } } struct mgmt_mesh_tx *mgmt_mesh_next(struct hci_dev *hdev, struct sock *sk) { struct mgmt_mesh_tx *mesh_tx; if (list_empty(&hdev->mesh_pending)) return NULL; list_for_each_entry(mesh_tx, &hdev->mesh_pending, list) { if (!sk || mesh_tx->sk == sk) return mesh_tx; } return NULL; } struct mgmt_mesh_tx *mgmt_mesh_find(struct hci_dev *hdev, u8 handle) { struct mgmt_mesh_tx *mesh_tx; if (list_empty(&hdev->mesh_pending)) return NULL; list_for_each_entry(mesh_tx, &hdev->mesh_pending, list) { if (mesh_tx->handle == handle) return mesh_tx; } return NULL; } struct mgmt_mesh_tx *mgmt_mesh_add(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) { struct mgmt_mesh_tx *mesh_tx; mesh_tx = kzalloc(sizeof(*mesh_tx), GFP_KERNEL); if (!mesh_tx) return NULL; hdev->mesh_send_ref++; if (!hdev->mesh_send_ref) hdev->mesh_send_ref++; mesh_tx->handle = hdev->mesh_send_ref; mesh_tx->index = hdev->id; memcpy(mesh_tx->param, data, len); mesh_tx->param_len = len; mesh_tx->sk = sk; sock_hold(sk); list_add_tail(&mesh_tx->list, &hdev->mesh_pending); return mesh_tx; } void mgmt_mesh_remove(struct mgmt_mesh_tx *mesh_tx) { list_del(&mesh_tx->list); sock_put(mesh_tx->sk); kfree(mesh_tx); }
linux-master
net/bluetooth/mgmt_util.c
/* CMTP implementation for Linux Bluetooth stack (BlueZ). Copyright (C) 2002-2003 Marcel Holtmann <[email protected]> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License version 2 as published by the Free Software Foundation; THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS SOFTWARE IS DISCLAIMED. */ #include <linux/export.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/types.h> #include <linux/errno.h> #include <linux/kernel.h> #include <linux/sched/signal.h> #include <linux/slab.h> #include <linux/poll.h> #include <linux/fcntl.h> #include <linux/skbuff.h> #include <linux/socket.h> #include <linux/ioctl.h> #include <linux/file.h> #include <linux/wait.h> #include <linux/kthread.h> #include <net/sock.h> #include <linux/isdn/capilli.h> #include <linux/isdn/capicmd.h> #include <linux/isdn/capiutil.h> #include "cmtp.h" #define CAPI_INTEROPERABILITY 0x20 #define CAPI_INTEROPERABILITY_REQ CAPICMD(CAPI_INTEROPERABILITY, CAPI_REQ) #define CAPI_INTEROPERABILITY_CONF CAPICMD(CAPI_INTEROPERABILITY, CAPI_CONF) #define CAPI_INTEROPERABILITY_IND CAPICMD(CAPI_INTEROPERABILITY, CAPI_IND) #define CAPI_INTEROPERABILITY_RESP CAPICMD(CAPI_INTEROPERABILITY, CAPI_RESP) #define CAPI_INTEROPERABILITY_REQ_LEN (CAPI_MSG_BASELEN + 2) #define CAPI_INTEROPERABILITY_CONF_LEN (CAPI_MSG_BASELEN + 4) #define CAPI_INTEROPERABILITY_IND_LEN (CAPI_MSG_BASELEN + 2) #define CAPI_INTEROPERABILITY_RESP_LEN (CAPI_MSG_BASELEN + 2) #define CAPI_FUNCTION_REGISTER 0 #define CAPI_FUNCTION_RELEASE 1 #define CAPI_FUNCTION_GET_PROFILE 2 #define CAPI_FUNCTION_GET_MANUFACTURER 3 #define CAPI_FUNCTION_GET_VERSION 4 #define CAPI_FUNCTION_GET_SERIAL_NUMBER 5 #define CAPI_FUNCTION_MANUFACTURER 6 #define CAPI_FUNCTION_LOOPBACK 7 #define CMTP_MSGNUM 1 #define CMTP_APPLID 2 #define CMTP_MAPPING 3 static struct cmtp_application *cmtp_application_add(struct cmtp_session *session, __u16 appl) { struct cmtp_application *app = kzalloc(sizeof(*app), GFP_KERNEL); BT_DBG("session %p application %p appl %u", session, app, appl); if (!app) return NULL; app->state = BT_OPEN; app->appl = appl; list_add_tail(&app->list, &session->applications); return app; } static void cmtp_application_del(struct cmtp_session *session, struct cmtp_application *app) { BT_DBG("session %p application %p", session, app); if (app) { list_del(&app->list); kfree(app); } } static struct cmtp_application *cmtp_application_get(struct cmtp_session *session, int pattern, __u16 value) { struct cmtp_application *app; list_for_each_entry(app, &session->applications, list) { switch (pattern) { case CMTP_MSGNUM: if (app->msgnum == value) return app; break; case CMTP_APPLID: if (app->appl == value) return app; break; case CMTP_MAPPING: if (app->mapping == value) return app; break; } } return NULL; } static int cmtp_msgnum_get(struct cmtp_session *session) { session->msgnum++; if ((session->msgnum & 0xff) > 200) session->msgnum = CMTP_INITIAL_MSGNUM + 1; return session->msgnum; } static void cmtp_send_capimsg(struct cmtp_session *session, struct sk_buff *skb) { struct cmtp_scb *scb = (void *) skb->cb; BT_DBG("session %p skb %p len %u", session, skb, skb->len); scb->id = -1; scb->data = (CAPIMSG_COMMAND(skb->data) == CAPI_DATA_B3); skb_queue_tail(&session->transmit, skb); wake_up_interruptible(sk_sleep(session->sock->sk)); } static void cmtp_send_interopmsg(struct cmtp_session *session, __u8 subcmd, __u16 appl, __u16 msgnum, __u16 function, unsigned char *buf, int len) { struct sk_buff *skb; unsigned char *s; BT_DBG("session %p subcmd 0x%02x appl %u msgnum %u", session, subcmd, appl, msgnum); skb = alloc_skb(CAPI_MSG_BASELEN + 6 + len, GFP_ATOMIC); if (!skb) { BT_ERR("Can't allocate memory for interoperability packet"); return; } s = skb_put(skb, CAPI_MSG_BASELEN + 6 + len); capimsg_setu16(s, 0, CAPI_MSG_BASELEN + 6 + len); capimsg_setu16(s, 2, appl); capimsg_setu8 (s, 4, CAPI_INTEROPERABILITY); capimsg_setu8 (s, 5, subcmd); capimsg_setu16(s, 6, msgnum); /* Interoperability selector (Bluetooth Device Management) */ capimsg_setu16(s, 8, 0x0001); capimsg_setu8 (s, 10, 3 + len); capimsg_setu16(s, 11, function); capimsg_setu8 (s, 13, len); if (len > 0) memcpy(s + 14, buf, len); cmtp_send_capimsg(session, skb); } static void cmtp_recv_interopmsg(struct cmtp_session *session, struct sk_buff *skb) { struct capi_ctr *ctrl = &session->ctrl; struct cmtp_application *application; __u16 appl, msgnum, func, info; __u32 controller; BT_DBG("session %p skb %p len %u", session, skb, skb->len); switch (CAPIMSG_SUBCOMMAND(skb->data)) { case CAPI_CONF: if (skb->len < CAPI_MSG_BASELEN + 10) break; func = CAPIMSG_U16(skb->data, CAPI_MSG_BASELEN + 5); info = CAPIMSG_U16(skb->data, CAPI_MSG_BASELEN + 8); switch (func) { case CAPI_FUNCTION_REGISTER: msgnum = CAPIMSG_MSGID(skb->data); application = cmtp_application_get(session, CMTP_MSGNUM, msgnum); if (application) { application->state = BT_CONNECTED; application->msgnum = 0; application->mapping = CAPIMSG_APPID(skb->data); wake_up_interruptible(&session->wait); } break; case CAPI_FUNCTION_RELEASE: appl = CAPIMSG_APPID(skb->data); application = cmtp_application_get(session, CMTP_MAPPING, appl); if (application) { application->state = BT_CLOSED; application->msgnum = 0; wake_up_interruptible(&session->wait); } break; case CAPI_FUNCTION_GET_PROFILE: if (skb->len < CAPI_MSG_BASELEN + 11 + sizeof(capi_profile)) break; controller = CAPIMSG_U16(skb->data, CAPI_MSG_BASELEN + 11); msgnum = CAPIMSG_MSGID(skb->data); if (!info && (msgnum == CMTP_INITIAL_MSGNUM)) { session->ncontroller = controller; wake_up_interruptible(&session->wait); break; } if (!info && ctrl) { memcpy(&ctrl->profile, skb->data + CAPI_MSG_BASELEN + 11, sizeof(capi_profile)); session->state = BT_CONNECTED; capi_ctr_ready(ctrl); } break; case CAPI_FUNCTION_GET_MANUFACTURER: if (skb->len < CAPI_MSG_BASELEN + 15) break; if (!info && ctrl) { int len = min_t(uint, CAPI_MANUFACTURER_LEN, skb->data[CAPI_MSG_BASELEN + 14]); memset(ctrl->manu, 0, CAPI_MANUFACTURER_LEN); strncpy(ctrl->manu, skb->data + CAPI_MSG_BASELEN + 15, len); } break; case CAPI_FUNCTION_GET_VERSION: if (skb->len < CAPI_MSG_BASELEN + 32) break; if (!info && ctrl) { ctrl->version.majorversion = CAPIMSG_U32(skb->data, CAPI_MSG_BASELEN + 16); ctrl->version.minorversion = CAPIMSG_U32(skb->data, CAPI_MSG_BASELEN + 20); ctrl->version.majormanuversion = CAPIMSG_U32(skb->data, CAPI_MSG_BASELEN + 24); ctrl->version.minormanuversion = CAPIMSG_U32(skb->data, CAPI_MSG_BASELEN + 28); } break; case CAPI_FUNCTION_GET_SERIAL_NUMBER: if (skb->len < CAPI_MSG_BASELEN + 17) break; if (!info && ctrl) { int len = min_t(uint, CAPI_SERIAL_LEN, skb->data[CAPI_MSG_BASELEN + 16]); memset(ctrl->serial, 0, CAPI_SERIAL_LEN); strncpy(ctrl->serial, skb->data + CAPI_MSG_BASELEN + 17, len); } break; } break; case CAPI_IND: if (skb->len < CAPI_MSG_BASELEN + 6) break; func = CAPIMSG_U16(skb->data, CAPI_MSG_BASELEN + 3); if (func == CAPI_FUNCTION_LOOPBACK) { int len = min_t(uint, skb->len - CAPI_MSG_BASELEN - 6, skb->data[CAPI_MSG_BASELEN + 5]); appl = CAPIMSG_APPID(skb->data); msgnum = CAPIMSG_MSGID(skb->data); cmtp_send_interopmsg(session, CAPI_RESP, appl, msgnum, func, skb->data + CAPI_MSG_BASELEN + 6, len); } break; } kfree_skb(skb); } void cmtp_recv_capimsg(struct cmtp_session *session, struct sk_buff *skb) { struct capi_ctr *ctrl = &session->ctrl; struct cmtp_application *application; __u16 appl; __u32 contr; BT_DBG("session %p skb %p len %u", session, skb, skb->len); if (skb->len < CAPI_MSG_BASELEN) return; if (CAPIMSG_COMMAND(skb->data) == CAPI_INTEROPERABILITY) { cmtp_recv_interopmsg(session, skb); return; } if (session->flags & BIT(CMTP_LOOPBACK)) { kfree_skb(skb); return; } appl = CAPIMSG_APPID(skb->data); contr = CAPIMSG_CONTROL(skb->data); application = cmtp_application_get(session, CMTP_MAPPING, appl); if (application) { appl = application->appl; CAPIMSG_SETAPPID(skb->data, appl); } else { BT_ERR("Can't find application with id %u", appl); kfree_skb(skb); return; } if ((contr & 0x7f) == 0x01) { contr = (contr & 0xffffff80) | session->num; CAPIMSG_SETCONTROL(skb->data, contr); } capi_ctr_handle_message(ctrl, appl, skb); } static int cmtp_load_firmware(struct capi_ctr *ctrl, capiloaddata *data) { BT_DBG("ctrl %p data %p", ctrl, data); return 0; } static void cmtp_reset_ctr(struct capi_ctr *ctrl) { struct cmtp_session *session = ctrl->driverdata; BT_DBG("ctrl %p", ctrl); capi_ctr_down(ctrl); atomic_inc(&session->terminate); wake_up_process(session->task); } static void cmtp_register_appl(struct capi_ctr *ctrl, __u16 appl, capi_register_params *rp) { DECLARE_WAITQUEUE(wait, current); struct cmtp_session *session = ctrl->driverdata; struct cmtp_application *application; unsigned long timeo = CMTP_INTEROP_TIMEOUT; unsigned char buf[8]; int err = 0, nconn, want = rp->level3cnt; BT_DBG("ctrl %p appl %u level3cnt %u datablkcnt %u datablklen %u", ctrl, appl, rp->level3cnt, rp->datablkcnt, rp->datablklen); application = cmtp_application_add(session, appl); if (!application) { BT_ERR("Can't allocate memory for new application"); return; } if (want < 0) nconn = ctrl->profile.nbchannel * -want; else nconn = want; if (nconn == 0) nconn = ctrl->profile.nbchannel; capimsg_setu16(buf, 0, nconn); capimsg_setu16(buf, 2, rp->datablkcnt); capimsg_setu16(buf, 4, rp->datablklen); application->state = BT_CONFIG; application->msgnum = cmtp_msgnum_get(session); cmtp_send_interopmsg(session, CAPI_REQ, 0x0000, application->msgnum, CAPI_FUNCTION_REGISTER, buf, 6); add_wait_queue(&session->wait, &wait); while (1) { set_current_state(TASK_INTERRUPTIBLE); if (!timeo) { err = -EAGAIN; break; } if (application->state == BT_CLOSED) { err = -application->err; break; } if (application->state == BT_CONNECTED) break; if (signal_pending(current)) { err = -EINTR; break; } timeo = schedule_timeout(timeo); } set_current_state(TASK_RUNNING); remove_wait_queue(&session->wait, &wait); if (err) { cmtp_application_del(session, application); return; } } static void cmtp_release_appl(struct capi_ctr *ctrl, __u16 appl) { struct cmtp_session *session = ctrl->driverdata; struct cmtp_application *application; BT_DBG("ctrl %p appl %u", ctrl, appl); application = cmtp_application_get(session, CMTP_APPLID, appl); if (!application) { BT_ERR("Can't find application"); return; } application->msgnum = cmtp_msgnum_get(session); cmtp_send_interopmsg(session, CAPI_REQ, application->mapping, application->msgnum, CAPI_FUNCTION_RELEASE, NULL, 0); wait_event_interruptible_timeout(session->wait, (application->state == BT_CLOSED), CMTP_INTEROP_TIMEOUT); cmtp_application_del(session, application); } static u16 cmtp_send_message(struct capi_ctr *ctrl, struct sk_buff *skb) { struct cmtp_session *session = ctrl->driverdata; struct cmtp_application *application; __u16 appl; __u32 contr; BT_DBG("ctrl %p skb %p", ctrl, skb); appl = CAPIMSG_APPID(skb->data); contr = CAPIMSG_CONTROL(skb->data); application = cmtp_application_get(session, CMTP_APPLID, appl); if ((!application) || (application->state != BT_CONNECTED)) { BT_ERR("Can't find application with id %u", appl); return CAPI_ILLAPPNR; } CAPIMSG_SETAPPID(skb->data, application->mapping); if ((contr & 0x7f) == session->num) { contr = (contr & 0xffffff80) | 0x01; CAPIMSG_SETCONTROL(skb->data, contr); } cmtp_send_capimsg(session, skb); return CAPI_NOERROR; } static char *cmtp_procinfo(struct capi_ctr *ctrl) { return "CAPI Message Transport Protocol"; } static int cmtp_proc_show(struct seq_file *m, void *v) { struct capi_ctr *ctrl = m->private; struct cmtp_session *session = ctrl->driverdata; struct cmtp_application *app; seq_printf(m, "%s\n\n", cmtp_procinfo(ctrl)); seq_printf(m, "addr %s\n", session->name); seq_printf(m, "ctrl %d\n", session->num); list_for_each_entry(app, &session->applications, list) { seq_printf(m, "appl %u -> %u\n", app->appl, app->mapping); } return 0; } int cmtp_attach_device(struct cmtp_session *session) { unsigned char buf[4]; long ret; BT_DBG("session %p", session); capimsg_setu32(buf, 0, 0); cmtp_send_interopmsg(session, CAPI_REQ, 0xffff, CMTP_INITIAL_MSGNUM, CAPI_FUNCTION_GET_PROFILE, buf, 4); ret = wait_event_interruptible_timeout(session->wait, session->ncontroller, CMTP_INTEROP_TIMEOUT); BT_INFO("Found %d CAPI controller(s) on device %s", session->ncontroller, session->name); if (!ret) return -ETIMEDOUT; if (!session->ncontroller) return -ENODEV; if (session->ncontroller > 1) BT_INFO("Setting up only CAPI controller 1"); session->ctrl.owner = THIS_MODULE; session->ctrl.driverdata = session; strcpy(session->ctrl.name, session->name); session->ctrl.driver_name = "cmtp"; session->ctrl.load_firmware = cmtp_load_firmware; session->ctrl.reset_ctr = cmtp_reset_ctr; session->ctrl.register_appl = cmtp_register_appl; session->ctrl.release_appl = cmtp_release_appl; session->ctrl.send_message = cmtp_send_message; session->ctrl.procinfo = cmtp_procinfo; session->ctrl.proc_show = cmtp_proc_show; if (attach_capi_ctr(&session->ctrl) < 0) { BT_ERR("Can't attach new controller"); return -EBUSY; } session->num = session->ctrl.cnr; BT_DBG("session %p num %d", session, session->num); capimsg_setu32(buf, 0, 1); cmtp_send_interopmsg(session, CAPI_REQ, 0xffff, cmtp_msgnum_get(session), CAPI_FUNCTION_GET_MANUFACTURER, buf, 4); cmtp_send_interopmsg(session, CAPI_REQ, 0xffff, cmtp_msgnum_get(session), CAPI_FUNCTION_GET_VERSION, buf, 4); cmtp_send_interopmsg(session, CAPI_REQ, 0xffff, cmtp_msgnum_get(session), CAPI_FUNCTION_GET_SERIAL_NUMBER, buf, 4); cmtp_send_interopmsg(session, CAPI_REQ, 0xffff, cmtp_msgnum_get(session), CAPI_FUNCTION_GET_PROFILE, buf, 4); return 0; } void cmtp_detach_device(struct cmtp_session *session) { BT_DBG("session %p", session); detach_capi_ctr(&session->ctrl); }
linux-master
net/bluetooth/cmtp/capi.c
/* CMTP implementation for Linux Bluetooth stack (BlueZ). Copyright (C) 2002-2003 Marcel Holtmann <[email protected]> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License version 2 as published by the Free Software Foundation; THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS SOFTWARE IS DISCLAIMED. */ #include <linux/module.h> #include <linux/types.h> #include <linux/errno.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/poll.h> #include <linux/fcntl.h> #include <linux/freezer.h> #include <linux/skbuff.h> #include <linux/socket.h> #include <linux/ioctl.h> #include <linux/file.h> #include <linux/init.h> #include <linux/kthread.h> #include <net/sock.h> #include <linux/isdn/capilli.h> #include <net/bluetooth/bluetooth.h> #include <net/bluetooth/l2cap.h> #include "cmtp.h" #define VERSION "1.0" static DECLARE_RWSEM(cmtp_session_sem); static LIST_HEAD(cmtp_session_list); static struct cmtp_session *__cmtp_get_session(bdaddr_t *bdaddr) { struct cmtp_session *session; BT_DBG(""); list_for_each_entry(session, &cmtp_session_list, list) if (!bacmp(bdaddr, &session->bdaddr)) return session; return NULL; } static void __cmtp_link_session(struct cmtp_session *session) { list_add(&session->list, &cmtp_session_list); } static void __cmtp_unlink_session(struct cmtp_session *session) { list_del(&session->list); } static void __cmtp_copy_session(struct cmtp_session *session, struct cmtp_conninfo *ci) { u32 valid_flags = BIT(CMTP_LOOPBACK); memset(ci, 0, sizeof(*ci)); bacpy(&ci->bdaddr, &session->bdaddr); ci->flags = session->flags & valid_flags; ci->state = session->state; ci->num = session->num; } static inline int cmtp_alloc_block_id(struct cmtp_session *session) { int i, id = -1; for (i = 0; i < 16; i++) if (!test_and_set_bit(i, &session->blockids)) { id = i; break; } return id; } static inline void cmtp_free_block_id(struct cmtp_session *session, int id) { clear_bit(id, &session->blockids); } static inline void cmtp_add_msgpart(struct cmtp_session *session, int id, const unsigned char *buf, int count) { struct sk_buff *skb = session->reassembly[id], *nskb; int size; BT_DBG("session %p buf %p count %d", session, buf, count); size = (skb) ? skb->len + count : count; nskb = alloc_skb(size, GFP_ATOMIC); if (!nskb) { BT_ERR("Can't allocate memory for CAPI message"); return; } if (skb && (skb->len > 0)) skb_copy_from_linear_data(skb, skb_put(nskb, skb->len), skb->len); skb_put_data(nskb, buf, count); session->reassembly[id] = nskb; kfree_skb(skb); } static inline int cmtp_recv_frame(struct cmtp_session *session, struct sk_buff *skb) { __u8 hdr, hdrlen, id; __u16 len; BT_DBG("session %p skb %p len %d", session, skb, skb->len); while (skb->len > 0) { hdr = skb->data[0]; switch (hdr & 0xc0) { case 0x40: hdrlen = 2; len = skb->data[1]; break; case 0x80: hdrlen = 3; len = skb->data[1] | (skb->data[2] << 8); break; default: hdrlen = 1; len = 0; break; } id = (hdr & 0x3c) >> 2; BT_DBG("hdr 0x%02x hdrlen %d len %d id %d", hdr, hdrlen, len, id); if (hdrlen + len > skb->len) { BT_ERR("Wrong size or header information in CMTP frame"); break; } if (len == 0) { skb_pull(skb, hdrlen); continue; } switch (hdr & 0x03) { case 0x00: cmtp_add_msgpart(session, id, skb->data + hdrlen, len); cmtp_recv_capimsg(session, session->reassembly[id]); session->reassembly[id] = NULL; break; case 0x01: cmtp_add_msgpart(session, id, skb->data + hdrlen, len); break; default: kfree_skb(session->reassembly[id]); session->reassembly[id] = NULL; break; } skb_pull(skb, hdrlen + len); } kfree_skb(skb); return 0; } static int cmtp_send_frame(struct cmtp_session *session, unsigned char *data, int len) { struct socket *sock = session->sock; struct kvec iv = { data, len }; struct msghdr msg; BT_DBG("session %p data %p len %d", session, data, len); if (!len) return 0; memset(&msg, 0, sizeof(msg)); return kernel_sendmsg(sock, &msg, &iv, 1, len); } static void cmtp_process_transmit(struct cmtp_session *session) { struct sk_buff *skb, *nskb; unsigned char *hdr; unsigned int size, tail; BT_DBG("session %p", session); nskb = alloc_skb(session->mtu, GFP_ATOMIC); if (!nskb) { BT_ERR("Can't allocate memory for new frame"); return; } while ((skb = skb_dequeue(&session->transmit))) { struct cmtp_scb *scb = (void *) skb->cb; tail = session->mtu - nskb->len; if (tail < 5) { cmtp_send_frame(session, nskb->data, nskb->len); skb_trim(nskb, 0); tail = session->mtu; } size = min_t(uint, ((tail < 258) ? (tail - 2) : (tail - 3)), skb->len); if (scb->id < 0) { scb->id = cmtp_alloc_block_id(session); if (scb->id < 0) { skb_queue_head(&session->transmit, skb); break; } } if (size < 256) { hdr = skb_put(nskb, 2); hdr[0] = 0x40 | ((scb->id << 2) & 0x3c) | ((skb->len == size) ? 0x00 : 0x01); hdr[1] = size; } else { hdr = skb_put(nskb, 3); hdr[0] = 0x80 | ((scb->id << 2) & 0x3c) | ((skb->len == size) ? 0x00 : 0x01); hdr[1] = size & 0xff; hdr[2] = size >> 8; } skb_copy_from_linear_data(skb, skb_put(nskb, size), size); skb_pull(skb, size); if (skb->len > 0) { skb_queue_head(&session->transmit, skb); } else { cmtp_free_block_id(session, scb->id); if (scb->data) { cmtp_send_frame(session, nskb->data, nskb->len); skb_trim(nskb, 0); } kfree_skb(skb); } } cmtp_send_frame(session, nskb->data, nskb->len); kfree_skb(nskb); } static int cmtp_session(void *arg) { struct cmtp_session *session = arg; struct sock *sk = session->sock->sk; struct sk_buff *skb; DEFINE_WAIT_FUNC(wait, woken_wake_function); BT_DBG("session %p", session); set_user_nice(current, -15); add_wait_queue(sk_sleep(sk), &wait); while (1) { if (atomic_read(&session->terminate)) break; if (sk->sk_state != BT_CONNECTED) break; while ((skb = skb_dequeue(&sk->sk_receive_queue))) { skb_orphan(skb); if (!skb_linearize(skb)) cmtp_recv_frame(session, skb); else kfree_skb(skb); } cmtp_process_transmit(session); /* * wait_woken() performs the necessary memory barriers * for us; see the header comment for this primitive. */ wait_woken(&wait, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT); } remove_wait_queue(sk_sleep(sk), &wait); down_write(&cmtp_session_sem); if (!(session->flags & BIT(CMTP_LOOPBACK))) cmtp_detach_device(session); fput(session->sock->file); __cmtp_unlink_session(session); up_write(&cmtp_session_sem); kfree(session); module_put_and_kthread_exit(0); return 0; } int cmtp_add_connection(struct cmtp_connadd_req *req, struct socket *sock) { u32 valid_flags = BIT(CMTP_LOOPBACK); struct cmtp_session *session, *s; int i, err; BT_DBG(""); if (!l2cap_is_socket(sock)) return -EBADFD; if (req->flags & ~valid_flags) return -EINVAL; session = kzalloc(sizeof(struct cmtp_session), GFP_KERNEL); if (!session) return -ENOMEM; down_write(&cmtp_session_sem); s = __cmtp_get_session(&l2cap_pi(sock->sk)->chan->dst); if (s && s->state == BT_CONNECTED) { err = -EEXIST; goto failed; } bacpy(&session->bdaddr, &l2cap_pi(sock->sk)->chan->dst); session->mtu = min_t(uint, l2cap_pi(sock->sk)->chan->omtu, l2cap_pi(sock->sk)->chan->imtu); BT_DBG("mtu %d", session->mtu); sprintf(session->name, "%pMR", &session->bdaddr); session->sock = sock; session->state = BT_CONFIG; init_waitqueue_head(&session->wait); session->msgnum = CMTP_INITIAL_MSGNUM; INIT_LIST_HEAD(&session->applications); skb_queue_head_init(&session->transmit); for (i = 0; i < 16; i++) session->reassembly[i] = NULL; session->flags = req->flags; __cmtp_link_session(session); __module_get(THIS_MODULE); session->task = kthread_run(cmtp_session, session, "kcmtpd_ctr_%d", session->num); if (IS_ERR(session->task)) { module_put(THIS_MODULE); err = PTR_ERR(session->task); goto unlink; } if (!(session->flags & BIT(CMTP_LOOPBACK))) { err = cmtp_attach_device(session); if (err < 0) { /* Caller will call fput in case of failure, and so * will cmtp_session kthread. */ get_file(session->sock->file); atomic_inc(&session->terminate); wake_up_interruptible(sk_sleep(session->sock->sk)); up_write(&cmtp_session_sem); return err; } } up_write(&cmtp_session_sem); return 0; unlink: __cmtp_unlink_session(session); failed: up_write(&cmtp_session_sem); kfree(session); return err; } int cmtp_del_connection(struct cmtp_conndel_req *req) { u32 valid_flags = 0; struct cmtp_session *session; int err = 0; BT_DBG(""); if (req->flags & ~valid_flags) return -EINVAL; down_read(&cmtp_session_sem); session = __cmtp_get_session(&req->bdaddr); if (session) { /* Flush the transmit queue */ skb_queue_purge(&session->transmit); /* Stop session thread */ atomic_inc(&session->terminate); /* * See the comment preceding the call to wait_woken() * in cmtp_session(). */ wake_up_interruptible(sk_sleep(session->sock->sk)); } else err = -ENOENT; up_read(&cmtp_session_sem); return err; } int cmtp_get_connlist(struct cmtp_connlist_req *req) { struct cmtp_session *session; int err = 0, n = 0; BT_DBG(""); down_read(&cmtp_session_sem); list_for_each_entry(session, &cmtp_session_list, list) { struct cmtp_conninfo ci; __cmtp_copy_session(session, &ci); if (copy_to_user(req->ci, &ci, sizeof(ci))) { err = -EFAULT; break; } if (++n >= req->cnum) break; req->ci++; } req->cnum = n; up_read(&cmtp_session_sem); return err; } int cmtp_get_conninfo(struct cmtp_conninfo *ci) { struct cmtp_session *session; int err = 0; down_read(&cmtp_session_sem); session = __cmtp_get_session(&ci->bdaddr); if (session) __cmtp_copy_session(session, ci); else err = -ENOENT; up_read(&cmtp_session_sem); return err; } static int __init cmtp_init(void) { BT_INFO("CMTP (CAPI Emulation) ver %s", VERSION); return cmtp_init_sockets(); } static void __exit cmtp_exit(void) { cmtp_cleanup_sockets(); } module_init(cmtp_init); module_exit(cmtp_exit); MODULE_AUTHOR("Marcel Holtmann <[email protected]>"); MODULE_DESCRIPTION("Bluetooth CMTP ver " VERSION); MODULE_VERSION(VERSION); MODULE_LICENSE("GPL"); MODULE_ALIAS("bt-proto-5");
linux-master
net/bluetooth/cmtp/core.c
/* CMTP implementation for Linux Bluetooth stack (BlueZ). Copyright (C) 2002-2003 Marcel Holtmann <[email protected]> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License version 2 as published by the Free Software Foundation; THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS SOFTWARE IS DISCLAIMED. */ #include <linux/export.h> #include <linux/types.h> #include <linux/capability.h> #include <linux/errno.h> #include <linux/kernel.h> #include <linux/poll.h> #include <linux/fcntl.h> #include <linux/skbuff.h> #include <linux/socket.h> #include <linux/ioctl.h> #include <linux/file.h> #include <linux/compat.h> #include <linux/gfp.h> #include <linux/uaccess.h> #include <net/sock.h> #include <linux/isdn/capilli.h> #include "cmtp.h" static struct bt_sock_list cmtp_sk_list = { .lock = __RW_LOCK_UNLOCKED(cmtp_sk_list.lock) }; static int cmtp_sock_release(struct socket *sock) { struct sock *sk = sock->sk; BT_DBG("sock %p sk %p", sock, sk); if (!sk) return 0; bt_sock_unlink(&cmtp_sk_list, sk); sock_orphan(sk); sock_put(sk); return 0; } static int do_cmtp_sock_ioctl(struct socket *sock, unsigned int cmd, void __user *argp) { struct cmtp_connadd_req ca; struct cmtp_conndel_req cd; struct cmtp_connlist_req cl; struct cmtp_conninfo ci; struct socket *nsock; int err; BT_DBG("cmd %x arg %p", cmd, argp); switch (cmd) { case CMTPCONNADD: if (!capable(CAP_NET_ADMIN)) return -EPERM; if (copy_from_user(&ca, argp, sizeof(ca))) return -EFAULT; nsock = sockfd_lookup(ca.sock, &err); if (!nsock) return err; if (nsock->sk->sk_state != BT_CONNECTED) { sockfd_put(nsock); return -EBADFD; } err = cmtp_add_connection(&ca, nsock); if (!err) { if (copy_to_user(argp, &ca, sizeof(ca))) err = -EFAULT; } else sockfd_put(nsock); return err; case CMTPCONNDEL: if (!capable(CAP_NET_ADMIN)) return -EPERM; if (copy_from_user(&cd, argp, sizeof(cd))) return -EFAULT; return cmtp_del_connection(&cd); case CMTPGETCONNLIST: if (copy_from_user(&cl, argp, sizeof(cl))) return -EFAULT; if (cl.cnum <= 0) return -EINVAL; err = cmtp_get_connlist(&cl); if (!err && copy_to_user(argp, &cl, sizeof(cl))) return -EFAULT; return err; case CMTPGETCONNINFO: if (copy_from_user(&ci, argp, sizeof(ci))) return -EFAULT; err = cmtp_get_conninfo(&ci); if (!err && copy_to_user(argp, &ci, sizeof(ci))) return -EFAULT; return err; } return -EINVAL; } static int cmtp_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) { return do_cmtp_sock_ioctl(sock, cmd, (void __user *)arg); } #ifdef CONFIG_COMPAT static int cmtp_sock_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) { void __user *argp = compat_ptr(arg); if (cmd == CMTPGETCONNLIST) { struct cmtp_connlist_req cl; u32 __user *p = argp; u32 uci; int err; if (get_user(cl.cnum, p) || get_user(uci, p + 1)) return -EFAULT; cl.ci = compat_ptr(uci); if (cl.cnum <= 0) return -EINVAL; err = cmtp_get_connlist(&cl); if (!err && put_user(cl.cnum, p)) err = -EFAULT; return err; } return do_cmtp_sock_ioctl(sock, cmd, argp); } #endif static const struct proto_ops cmtp_sock_ops = { .family = PF_BLUETOOTH, .owner = THIS_MODULE, .release = cmtp_sock_release, .ioctl = cmtp_sock_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = cmtp_sock_compat_ioctl, #endif .bind = sock_no_bind, .getname = sock_no_getname, .sendmsg = sock_no_sendmsg, .recvmsg = sock_no_recvmsg, .listen = sock_no_listen, .shutdown = sock_no_shutdown, .connect = sock_no_connect, .socketpair = sock_no_socketpair, .accept = sock_no_accept, .mmap = sock_no_mmap }; static struct proto cmtp_proto = { .name = "CMTP", .owner = THIS_MODULE, .obj_size = sizeof(struct bt_sock) }; static int cmtp_sock_create(struct net *net, struct socket *sock, int protocol, int kern) { struct sock *sk; BT_DBG("sock %p", sock); if (sock->type != SOCK_RAW) return -ESOCKTNOSUPPORT; sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &cmtp_proto, kern); if (!sk) return -ENOMEM; sock_init_data(sock, sk); sock->ops = &cmtp_sock_ops; sock->state = SS_UNCONNECTED; sock_reset_flag(sk, SOCK_ZAPPED); sk->sk_protocol = protocol; sk->sk_state = BT_OPEN; bt_sock_link(&cmtp_sk_list, sk); return 0; } static const struct net_proto_family cmtp_sock_family_ops = { .family = PF_BLUETOOTH, .owner = THIS_MODULE, .create = cmtp_sock_create }; int cmtp_init_sockets(void) { int err; err = proto_register(&cmtp_proto, 0); if (err < 0) return err; err = bt_sock_register(BTPROTO_CMTP, &cmtp_sock_family_ops); if (err < 0) { BT_ERR("Can't register CMTP socket"); goto error; } err = bt_procfs_init(&init_net, "cmtp", &cmtp_sk_list, NULL); if (err < 0) { BT_ERR("Failed to create CMTP proc file"); bt_sock_unregister(BTPROTO_HIDP); goto error; } BT_INFO("CMTP socket layer initialized"); return 0; error: proto_unregister(&cmtp_proto); return err; } void cmtp_cleanup_sockets(void) { bt_procfs_cleanup(&init_net, "cmtp"); bt_sock_unregister(BTPROTO_CMTP); proto_unregister(&cmtp_proto); }
linux-master
net/bluetooth/cmtp/sock.c
/* RFCOMM implementation for Linux Bluetooth stack (BlueZ). Copyright (C) 2002 Maxim Krasnyansky <[email protected]> Copyright (C) 2002 Marcel Holtmann <[email protected]> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License version 2 as published by the Free Software Foundation; THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS SOFTWARE IS DISCLAIMED. */ /* * Bluetooth RFCOMM core. */ #include <linux/module.h> #include <linux/debugfs.h> #include <linux/kthread.h> #include <asm/unaligned.h> #include <net/bluetooth/bluetooth.h> #include <net/bluetooth/hci_core.h> #include <net/bluetooth/l2cap.h> #include <net/bluetooth/rfcomm.h> #include <trace/events/sock.h> #define VERSION "1.11" static bool disable_cfc; static bool l2cap_ertm; static int channel_mtu = -1; static struct task_struct *rfcomm_thread; static DEFINE_MUTEX(rfcomm_mutex); #define rfcomm_lock() mutex_lock(&rfcomm_mutex) #define rfcomm_unlock() mutex_unlock(&rfcomm_mutex) static LIST_HEAD(session_list); static int rfcomm_send_frame(struct rfcomm_session *s, u8 *data, int len); static int rfcomm_send_sabm(struct rfcomm_session *s, u8 dlci); static int rfcomm_send_disc(struct rfcomm_session *s, u8 dlci); static int rfcomm_queue_disc(struct rfcomm_dlc *d); static int rfcomm_send_nsc(struct rfcomm_session *s, int cr, u8 type); static int rfcomm_send_pn(struct rfcomm_session *s, int cr, struct rfcomm_dlc *d); static int rfcomm_send_msc(struct rfcomm_session *s, int cr, u8 dlci, u8 v24_sig); static int rfcomm_send_test(struct rfcomm_session *s, int cr, u8 *pattern, int len); static int rfcomm_send_credits(struct rfcomm_session *s, u8 addr, u8 credits); static void rfcomm_make_uih(struct sk_buff *skb, u8 addr); static void rfcomm_process_connect(struct rfcomm_session *s); static struct rfcomm_session *rfcomm_session_create(bdaddr_t *src, bdaddr_t *dst, u8 sec_level, int *err); static struct rfcomm_session *rfcomm_session_get(bdaddr_t *src, bdaddr_t *dst); static struct rfcomm_session *rfcomm_session_del(struct rfcomm_session *s); /* ---- RFCOMM frame parsing macros ---- */ #define __get_dlci(b) ((b & 0xfc) >> 2) #define __get_type(b) ((b & 0xef)) #define __test_ea(b) ((b & 0x01)) #define __test_cr(b) (!!(b & 0x02)) #define __test_pf(b) (!!(b & 0x10)) #define __session_dir(s) ((s)->initiator ? 0x00 : 0x01) #define __addr(cr, dlci) (((dlci & 0x3f) << 2) | (cr << 1) | 0x01) #define __ctrl(type, pf) (((type & 0xef) | (pf << 4))) #define __dlci(dir, chn) (((chn & 0x1f) << 1) | dir) #define __srv_channel(dlci) (dlci >> 1) #define __len8(len) (((len) << 1) | 1) #define __len16(len) ((len) << 1) /* MCC macros */ #define __mcc_type(cr, type) (((type << 2) | (cr << 1) | 0x01)) #define __get_mcc_type(b) ((b & 0xfc) >> 2) #define __get_mcc_len(b) ((b & 0xfe) >> 1) /* RPN macros */ #define __rpn_line_settings(data, stop, parity) ((data & 0x3) | ((stop & 0x1) << 2) | ((parity & 0x7) << 3)) #define __get_rpn_data_bits(line) ((line) & 0x3) #define __get_rpn_stop_bits(line) (((line) >> 2) & 0x1) #define __get_rpn_parity(line) (((line) >> 3) & 0x7) static DECLARE_WAIT_QUEUE_HEAD(rfcomm_wq); static void rfcomm_schedule(void) { wake_up_all(&rfcomm_wq); } /* ---- RFCOMM FCS computation ---- */ /* reversed, 8-bit, poly=0x07 */ static unsigned char rfcomm_crc_table[256] = { 0x00, 0x91, 0xe3, 0x72, 0x07, 0x96, 0xe4, 0x75, 0x0e, 0x9f, 0xed, 0x7c, 0x09, 0x98, 0xea, 0x7b, 0x1c, 0x8d, 0xff, 0x6e, 0x1b, 0x8a, 0xf8, 0x69, 0x12, 0x83, 0xf1, 0x60, 0x15, 0x84, 0xf6, 0x67, 0x38, 0xa9, 0xdb, 0x4a, 0x3f, 0xae, 0xdc, 0x4d, 0x36, 0xa7, 0xd5, 0x44, 0x31, 0xa0, 0xd2, 0x43, 0x24, 0xb5, 0xc7, 0x56, 0x23, 0xb2, 0xc0, 0x51, 0x2a, 0xbb, 0xc9, 0x58, 0x2d, 0xbc, 0xce, 0x5f, 0x70, 0xe1, 0x93, 0x02, 0x77, 0xe6, 0x94, 0x05, 0x7e, 0xef, 0x9d, 0x0c, 0x79, 0xe8, 0x9a, 0x0b, 0x6c, 0xfd, 0x8f, 0x1e, 0x6b, 0xfa, 0x88, 0x19, 0x62, 0xf3, 0x81, 0x10, 0x65, 0xf4, 0x86, 0x17, 0x48, 0xd9, 0xab, 0x3a, 0x4f, 0xde, 0xac, 0x3d, 0x46, 0xd7, 0xa5, 0x34, 0x41, 0xd0, 0xa2, 0x33, 0x54, 0xc5, 0xb7, 0x26, 0x53, 0xc2, 0xb0, 0x21, 0x5a, 0xcb, 0xb9, 0x28, 0x5d, 0xcc, 0xbe, 0x2f, 0xe0, 0x71, 0x03, 0x92, 0xe7, 0x76, 0x04, 0x95, 0xee, 0x7f, 0x0d, 0x9c, 0xe9, 0x78, 0x0a, 0x9b, 0xfc, 0x6d, 0x1f, 0x8e, 0xfb, 0x6a, 0x18, 0x89, 0xf2, 0x63, 0x11, 0x80, 0xf5, 0x64, 0x16, 0x87, 0xd8, 0x49, 0x3b, 0xaa, 0xdf, 0x4e, 0x3c, 0xad, 0xd6, 0x47, 0x35, 0xa4, 0xd1, 0x40, 0x32, 0xa3, 0xc4, 0x55, 0x27, 0xb6, 0xc3, 0x52, 0x20, 0xb1, 0xca, 0x5b, 0x29, 0xb8, 0xcd, 0x5c, 0x2e, 0xbf, 0x90, 0x01, 0x73, 0xe2, 0x97, 0x06, 0x74, 0xe5, 0x9e, 0x0f, 0x7d, 0xec, 0x99, 0x08, 0x7a, 0xeb, 0x8c, 0x1d, 0x6f, 0xfe, 0x8b, 0x1a, 0x68, 0xf9, 0x82, 0x13, 0x61, 0xf0, 0x85, 0x14, 0x66, 0xf7, 0xa8, 0x39, 0x4b, 0xda, 0xaf, 0x3e, 0x4c, 0xdd, 0xa6, 0x37, 0x45, 0xd4, 0xa1, 0x30, 0x42, 0xd3, 0xb4, 0x25, 0x57, 0xc6, 0xb3, 0x22, 0x50, 0xc1, 0xba, 0x2b, 0x59, 0xc8, 0xbd, 0x2c, 0x5e, 0xcf }; /* CRC on 2 bytes */ #define __crc(data) (rfcomm_crc_table[rfcomm_crc_table[0xff ^ data[0]] ^ data[1]]) /* FCS on 2 bytes */ static inline u8 __fcs(u8 *data) { return 0xff - __crc(data); } /* FCS on 3 bytes */ static inline u8 __fcs2(u8 *data) { return 0xff - rfcomm_crc_table[__crc(data) ^ data[2]]; } /* Check FCS */ static inline int __check_fcs(u8 *data, int type, u8 fcs) { u8 f = __crc(data); if (type != RFCOMM_UIH) f = rfcomm_crc_table[f ^ data[2]]; return rfcomm_crc_table[f ^ fcs] != 0xcf; } /* ---- L2CAP callbacks ---- */ static void rfcomm_l2state_change(struct sock *sk) { BT_DBG("%p state %d", sk, sk->sk_state); rfcomm_schedule(); } static void rfcomm_l2data_ready(struct sock *sk) { trace_sk_data_ready(sk); BT_DBG("%p", sk); rfcomm_schedule(); } static int rfcomm_l2sock_create(struct socket **sock) { int err; BT_DBG(""); err = sock_create_kern(&init_net, PF_BLUETOOTH, SOCK_SEQPACKET, BTPROTO_L2CAP, sock); if (!err) { struct sock *sk = (*sock)->sk; sk->sk_data_ready = rfcomm_l2data_ready; sk->sk_state_change = rfcomm_l2state_change; } return err; } static int rfcomm_check_security(struct rfcomm_dlc *d) { struct sock *sk = d->session->sock->sk; struct l2cap_conn *conn = l2cap_pi(sk)->chan->conn; __u8 auth_type; switch (d->sec_level) { case BT_SECURITY_HIGH: case BT_SECURITY_FIPS: auth_type = HCI_AT_GENERAL_BONDING_MITM; break; case BT_SECURITY_MEDIUM: auth_type = HCI_AT_GENERAL_BONDING; break; default: auth_type = HCI_AT_NO_BONDING; break; } return hci_conn_security(conn->hcon, d->sec_level, auth_type, d->out); } static void rfcomm_session_timeout(struct timer_list *t) { struct rfcomm_session *s = from_timer(s, t, timer); BT_DBG("session %p state %ld", s, s->state); set_bit(RFCOMM_TIMED_OUT, &s->flags); rfcomm_schedule(); } static void rfcomm_session_set_timer(struct rfcomm_session *s, long timeout) { BT_DBG("session %p state %ld timeout %ld", s, s->state, timeout); mod_timer(&s->timer, jiffies + timeout); } static void rfcomm_session_clear_timer(struct rfcomm_session *s) { BT_DBG("session %p state %ld", s, s->state); del_timer_sync(&s->timer); } /* ---- RFCOMM DLCs ---- */ static void rfcomm_dlc_timeout(struct timer_list *t) { struct rfcomm_dlc *d = from_timer(d, t, timer); BT_DBG("dlc %p state %ld", d, d->state); set_bit(RFCOMM_TIMED_OUT, &d->flags); rfcomm_dlc_put(d); rfcomm_schedule(); } static void rfcomm_dlc_set_timer(struct rfcomm_dlc *d, long timeout) { BT_DBG("dlc %p state %ld timeout %ld", d, d->state, timeout); if (!mod_timer(&d->timer, jiffies + timeout)) rfcomm_dlc_hold(d); } static void rfcomm_dlc_clear_timer(struct rfcomm_dlc *d) { BT_DBG("dlc %p state %ld", d, d->state); if (del_timer(&d->timer)) rfcomm_dlc_put(d); } static void rfcomm_dlc_clear_state(struct rfcomm_dlc *d) { BT_DBG("%p", d); d->state = BT_OPEN; d->flags = 0; d->mscex = 0; d->sec_level = BT_SECURITY_LOW; d->mtu = RFCOMM_DEFAULT_MTU; d->v24_sig = RFCOMM_V24_RTC | RFCOMM_V24_RTR | RFCOMM_V24_DV; d->cfc = RFCOMM_CFC_DISABLED; d->rx_credits = RFCOMM_DEFAULT_CREDITS; } struct rfcomm_dlc *rfcomm_dlc_alloc(gfp_t prio) { struct rfcomm_dlc *d = kzalloc(sizeof(*d), prio); if (!d) return NULL; timer_setup(&d->timer, rfcomm_dlc_timeout, 0); skb_queue_head_init(&d->tx_queue); mutex_init(&d->lock); refcount_set(&d->refcnt, 1); rfcomm_dlc_clear_state(d); BT_DBG("%p", d); return d; } void rfcomm_dlc_free(struct rfcomm_dlc *d) { BT_DBG("%p", d); skb_queue_purge(&d->tx_queue); kfree(d); } static void rfcomm_dlc_link(struct rfcomm_session *s, struct rfcomm_dlc *d) { BT_DBG("dlc %p session %p", d, s); rfcomm_session_clear_timer(s); rfcomm_dlc_hold(d); list_add(&d->list, &s->dlcs); d->session = s; } static void rfcomm_dlc_unlink(struct rfcomm_dlc *d) { struct rfcomm_session *s = d->session; BT_DBG("dlc %p refcnt %d session %p", d, refcount_read(&d->refcnt), s); list_del(&d->list); d->session = NULL; rfcomm_dlc_put(d); if (list_empty(&s->dlcs)) rfcomm_session_set_timer(s, RFCOMM_IDLE_TIMEOUT); } static struct rfcomm_dlc *rfcomm_dlc_get(struct rfcomm_session *s, u8 dlci) { struct rfcomm_dlc *d; list_for_each_entry(d, &s->dlcs, list) if (d->dlci == dlci) return d; return NULL; } static int rfcomm_check_channel(u8 channel) { return channel < 1 || channel > 30; } static int __rfcomm_dlc_open(struct rfcomm_dlc *d, bdaddr_t *src, bdaddr_t *dst, u8 channel) { struct rfcomm_session *s; int err = 0; u8 dlci; BT_DBG("dlc %p state %ld %pMR -> %pMR channel %d", d, d->state, src, dst, channel); if (rfcomm_check_channel(channel)) return -EINVAL; if (d->state != BT_OPEN && d->state != BT_CLOSED) return 0; s = rfcomm_session_get(src, dst); if (!s) { s = rfcomm_session_create(src, dst, d->sec_level, &err); if (!s) return err; } dlci = __dlci(__session_dir(s), channel); /* Check if DLCI already exists */ if (rfcomm_dlc_get(s, dlci)) return -EBUSY; rfcomm_dlc_clear_state(d); d->dlci = dlci; d->addr = __addr(s->initiator, dlci); d->priority = 7; d->state = BT_CONFIG; rfcomm_dlc_link(s, d); d->out = 1; d->mtu = s->mtu; d->cfc = (s->cfc == RFCOMM_CFC_UNKNOWN) ? 0 : s->cfc; if (s->state == BT_CONNECTED) { if (rfcomm_check_security(d)) rfcomm_send_pn(s, 1, d); else set_bit(RFCOMM_AUTH_PENDING, &d->flags); } rfcomm_dlc_set_timer(d, RFCOMM_CONN_TIMEOUT); return 0; } int rfcomm_dlc_open(struct rfcomm_dlc *d, bdaddr_t *src, bdaddr_t *dst, u8 channel) { int r; rfcomm_lock(); r = __rfcomm_dlc_open(d, src, dst, channel); rfcomm_unlock(); return r; } static void __rfcomm_dlc_disconn(struct rfcomm_dlc *d) { struct rfcomm_session *s = d->session; d->state = BT_DISCONN; if (skb_queue_empty(&d->tx_queue)) { rfcomm_send_disc(s, d->dlci); rfcomm_dlc_set_timer(d, RFCOMM_DISC_TIMEOUT); } else { rfcomm_queue_disc(d); rfcomm_dlc_set_timer(d, RFCOMM_DISC_TIMEOUT * 2); } } static int __rfcomm_dlc_close(struct rfcomm_dlc *d, int err) { struct rfcomm_session *s = d->session; if (!s) return 0; BT_DBG("dlc %p state %ld dlci %d err %d session %p", d, d->state, d->dlci, err, s); switch (d->state) { case BT_CONNECT: case BT_CONFIG: case BT_OPEN: case BT_CONNECT2: if (test_and_clear_bit(RFCOMM_DEFER_SETUP, &d->flags)) { set_bit(RFCOMM_AUTH_REJECT, &d->flags); rfcomm_schedule(); return 0; } } switch (d->state) { case BT_CONNECT: case BT_CONNECTED: __rfcomm_dlc_disconn(d); break; case BT_CONFIG: if (s->state != BT_BOUND) { __rfcomm_dlc_disconn(d); break; } /* if closing a dlc in a session that hasn't been started, * just close and unlink the dlc */ fallthrough; default: rfcomm_dlc_clear_timer(d); rfcomm_dlc_lock(d); d->state = BT_CLOSED; d->state_change(d, err); rfcomm_dlc_unlock(d); skb_queue_purge(&d->tx_queue); rfcomm_dlc_unlink(d); } return 0; } int rfcomm_dlc_close(struct rfcomm_dlc *d, int err) { int r = 0; struct rfcomm_dlc *d_list; struct rfcomm_session *s, *s_list; BT_DBG("dlc %p state %ld dlci %d err %d", d, d->state, d->dlci, err); rfcomm_lock(); s = d->session; if (!s) goto no_session; /* after waiting on the mutex check the session still exists * then check the dlc still exists */ list_for_each_entry(s_list, &session_list, list) { if (s_list == s) { list_for_each_entry(d_list, &s->dlcs, list) { if (d_list == d) { r = __rfcomm_dlc_close(d, err); break; } } break; } } no_session: rfcomm_unlock(); return r; } struct rfcomm_dlc *rfcomm_dlc_exists(bdaddr_t *src, bdaddr_t *dst, u8 channel) { struct rfcomm_session *s; struct rfcomm_dlc *dlc = NULL; u8 dlci; if (rfcomm_check_channel(channel)) return ERR_PTR(-EINVAL); rfcomm_lock(); s = rfcomm_session_get(src, dst); if (s) { dlci = __dlci(__session_dir(s), channel); dlc = rfcomm_dlc_get(s, dlci); } rfcomm_unlock(); return dlc; } static int rfcomm_dlc_send_frag(struct rfcomm_dlc *d, struct sk_buff *frag) { int len = frag->len; BT_DBG("dlc %p mtu %d len %d", d, d->mtu, len); if (len > d->mtu) return -EINVAL; rfcomm_make_uih(frag, d->addr); __skb_queue_tail(&d->tx_queue, frag); return len; } int rfcomm_dlc_send(struct rfcomm_dlc *d, struct sk_buff *skb) { unsigned long flags; struct sk_buff *frag, *next; int len; if (d->state != BT_CONNECTED) return -ENOTCONN; frag = skb_shinfo(skb)->frag_list; skb_shinfo(skb)->frag_list = NULL; /* Queue all fragments atomically. */ spin_lock_irqsave(&d->tx_queue.lock, flags); len = rfcomm_dlc_send_frag(d, skb); if (len < 0 || !frag) goto unlock; for (; frag; frag = next) { int ret; next = frag->next; ret = rfcomm_dlc_send_frag(d, frag); if (ret < 0) { dev_kfree_skb_irq(frag); goto unlock; } len += ret; } unlock: spin_unlock_irqrestore(&d->tx_queue.lock, flags); if (len > 0 && !test_bit(RFCOMM_TX_THROTTLED, &d->flags)) rfcomm_schedule(); return len; } void rfcomm_dlc_send_noerror(struct rfcomm_dlc *d, struct sk_buff *skb) { int len = skb->len; BT_DBG("dlc %p mtu %d len %d", d, d->mtu, len); rfcomm_make_uih(skb, d->addr); skb_queue_tail(&d->tx_queue, skb); if (d->state == BT_CONNECTED && !test_bit(RFCOMM_TX_THROTTLED, &d->flags)) rfcomm_schedule(); } void __rfcomm_dlc_throttle(struct rfcomm_dlc *d) { BT_DBG("dlc %p state %ld", d, d->state); if (!d->cfc) { d->v24_sig |= RFCOMM_V24_FC; set_bit(RFCOMM_MSC_PENDING, &d->flags); } rfcomm_schedule(); } void __rfcomm_dlc_unthrottle(struct rfcomm_dlc *d) { BT_DBG("dlc %p state %ld", d, d->state); if (!d->cfc) { d->v24_sig &= ~RFCOMM_V24_FC; set_bit(RFCOMM_MSC_PENDING, &d->flags); } rfcomm_schedule(); } /* Set/get modem status functions use _local_ status i.e. what we report to the other side. Remote status is provided by dlc->modem_status() callback. */ int rfcomm_dlc_set_modem_status(struct rfcomm_dlc *d, u8 v24_sig) { BT_DBG("dlc %p state %ld v24_sig 0x%x", d, d->state, v24_sig); if (test_bit(RFCOMM_RX_THROTTLED, &d->flags)) v24_sig |= RFCOMM_V24_FC; else v24_sig &= ~RFCOMM_V24_FC; d->v24_sig = v24_sig; if (!test_and_set_bit(RFCOMM_MSC_PENDING, &d->flags)) rfcomm_schedule(); return 0; } int rfcomm_dlc_get_modem_status(struct rfcomm_dlc *d, u8 *v24_sig) { BT_DBG("dlc %p state %ld v24_sig 0x%x", d, d->state, d->v24_sig); *v24_sig = d->v24_sig; return 0; } /* ---- RFCOMM sessions ---- */ static struct rfcomm_session *rfcomm_session_add(struct socket *sock, int state) { struct rfcomm_session *s = kzalloc(sizeof(*s), GFP_KERNEL); if (!s) return NULL; BT_DBG("session %p sock %p", s, sock); timer_setup(&s->timer, rfcomm_session_timeout, 0); INIT_LIST_HEAD(&s->dlcs); s->state = state; s->sock = sock; s->mtu = RFCOMM_DEFAULT_MTU; s->cfc = disable_cfc ? RFCOMM_CFC_DISABLED : RFCOMM_CFC_UNKNOWN; /* Do not increment module usage count for listening sessions. * Otherwise we won't be able to unload the module. */ if (state != BT_LISTEN) if (!try_module_get(THIS_MODULE)) { kfree(s); return NULL; } list_add(&s->list, &session_list); return s; } static struct rfcomm_session *rfcomm_session_del(struct rfcomm_session *s) { int state = s->state; BT_DBG("session %p state %ld", s, s->state); list_del(&s->list); rfcomm_session_clear_timer(s); sock_release(s->sock); kfree(s); if (state != BT_LISTEN) module_put(THIS_MODULE); return NULL; } static struct rfcomm_session *rfcomm_session_get(bdaddr_t *src, bdaddr_t *dst) { struct rfcomm_session *s, *n; struct l2cap_chan *chan; list_for_each_entry_safe(s, n, &session_list, list) { chan = l2cap_pi(s->sock->sk)->chan; if ((!bacmp(src, BDADDR_ANY) || !bacmp(&chan->src, src)) && !bacmp(&chan->dst, dst)) return s; } return NULL; } static struct rfcomm_session *rfcomm_session_close(struct rfcomm_session *s, int err) { struct rfcomm_dlc *d, *n; s->state = BT_CLOSED; BT_DBG("session %p state %ld err %d", s, s->state, err); /* Close all dlcs */ list_for_each_entry_safe(d, n, &s->dlcs, list) { d->state = BT_CLOSED; __rfcomm_dlc_close(d, err); } rfcomm_session_clear_timer(s); return rfcomm_session_del(s); } static struct rfcomm_session *rfcomm_session_create(bdaddr_t *src, bdaddr_t *dst, u8 sec_level, int *err) { struct rfcomm_session *s = NULL; struct sockaddr_l2 addr; struct socket *sock; struct sock *sk; BT_DBG("%pMR -> %pMR", src, dst); *err = rfcomm_l2sock_create(&sock); if (*err < 0) return NULL; bacpy(&addr.l2_bdaddr, src); addr.l2_family = AF_BLUETOOTH; addr.l2_psm = 0; addr.l2_cid = 0; addr.l2_bdaddr_type = BDADDR_BREDR; *err = kernel_bind(sock, (struct sockaddr *) &addr, sizeof(addr)); if (*err < 0) goto failed; /* Set L2CAP options */ sk = sock->sk; lock_sock(sk); /* Set MTU to 0 so L2CAP can auto select the MTU */ l2cap_pi(sk)->chan->imtu = 0; l2cap_pi(sk)->chan->sec_level = sec_level; if (l2cap_ertm) l2cap_pi(sk)->chan->mode = L2CAP_MODE_ERTM; release_sock(sk); s = rfcomm_session_add(sock, BT_BOUND); if (!s) { *err = -ENOMEM; goto failed; } s->initiator = 1; bacpy(&addr.l2_bdaddr, dst); addr.l2_family = AF_BLUETOOTH; addr.l2_psm = cpu_to_le16(L2CAP_PSM_RFCOMM); addr.l2_cid = 0; addr.l2_bdaddr_type = BDADDR_BREDR; *err = kernel_connect(sock, (struct sockaddr *) &addr, sizeof(addr), O_NONBLOCK); if (*err == 0 || *err == -EINPROGRESS) return s; return rfcomm_session_del(s); failed: sock_release(sock); return NULL; } void rfcomm_session_getaddr(struct rfcomm_session *s, bdaddr_t *src, bdaddr_t *dst) { struct l2cap_chan *chan = l2cap_pi(s->sock->sk)->chan; if (src) bacpy(src, &chan->src); if (dst) bacpy(dst, &chan->dst); } /* ---- RFCOMM frame sending ---- */ static int rfcomm_send_frame(struct rfcomm_session *s, u8 *data, int len) { struct kvec iv = { data, len }; struct msghdr msg; BT_DBG("session %p len %d", s, len); memset(&msg, 0, sizeof(msg)); return kernel_sendmsg(s->sock, &msg, &iv, 1, len); } static int rfcomm_send_cmd(struct rfcomm_session *s, struct rfcomm_cmd *cmd) { BT_DBG("%p cmd %u", s, cmd->ctrl); return rfcomm_send_frame(s, (void *) cmd, sizeof(*cmd)); } static int rfcomm_send_sabm(struct rfcomm_session *s, u8 dlci) { struct rfcomm_cmd cmd; BT_DBG("%p dlci %d", s, dlci); cmd.addr = __addr(s->initiator, dlci); cmd.ctrl = __ctrl(RFCOMM_SABM, 1); cmd.len = __len8(0); cmd.fcs = __fcs2((u8 *) &cmd); return rfcomm_send_cmd(s, &cmd); } static int rfcomm_send_ua(struct rfcomm_session *s, u8 dlci) { struct rfcomm_cmd cmd; BT_DBG("%p dlci %d", s, dlci); cmd.addr = __addr(!s->initiator, dlci); cmd.ctrl = __ctrl(RFCOMM_UA, 1); cmd.len = __len8(0); cmd.fcs = __fcs2((u8 *) &cmd); return rfcomm_send_cmd(s, &cmd); } static int rfcomm_send_disc(struct rfcomm_session *s, u8 dlci) { struct rfcomm_cmd cmd; BT_DBG("%p dlci %d", s, dlci); cmd.addr = __addr(s->initiator, dlci); cmd.ctrl = __ctrl(RFCOMM_DISC, 1); cmd.len = __len8(0); cmd.fcs = __fcs2((u8 *) &cmd); return rfcomm_send_cmd(s, &cmd); } static int rfcomm_queue_disc(struct rfcomm_dlc *d) { struct rfcomm_cmd *cmd; struct sk_buff *skb; BT_DBG("dlc %p dlci %d", d, d->dlci); skb = alloc_skb(sizeof(*cmd), GFP_KERNEL); if (!skb) return -ENOMEM; cmd = __skb_put(skb, sizeof(*cmd)); cmd->addr = d->addr; cmd->ctrl = __ctrl(RFCOMM_DISC, 1); cmd->len = __len8(0); cmd->fcs = __fcs2((u8 *) cmd); skb_queue_tail(&d->tx_queue, skb); rfcomm_schedule(); return 0; } static int rfcomm_send_dm(struct rfcomm_session *s, u8 dlci) { struct rfcomm_cmd cmd; BT_DBG("%p dlci %d", s, dlci); cmd.addr = __addr(!s->initiator, dlci); cmd.ctrl = __ctrl(RFCOMM_DM, 1); cmd.len = __len8(0); cmd.fcs = __fcs2((u8 *) &cmd); return rfcomm_send_cmd(s, &cmd); } static int rfcomm_send_nsc(struct rfcomm_session *s, int cr, u8 type) { struct rfcomm_hdr *hdr; struct rfcomm_mcc *mcc; u8 buf[16], *ptr = buf; BT_DBG("%p cr %d type %d", s, cr, type); hdr = (void *) ptr; ptr += sizeof(*hdr); hdr->addr = __addr(s->initiator, 0); hdr->ctrl = __ctrl(RFCOMM_UIH, 0); hdr->len = __len8(sizeof(*mcc) + 1); mcc = (void *) ptr; ptr += sizeof(*mcc); mcc->type = __mcc_type(0, RFCOMM_NSC); mcc->len = __len8(1); /* Type that we didn't like */ *ptr = __mcc_type(cr, type); ptr++; *ptr = __fcs(buf); ptr++; return rfcomm_send_frame(s, buf, ptr - buf); } static int rfcomm_send_pn(struct rfcomm_session *s, int cr, struct rfcomm_dlc *d) { struct rfcomm_hdr *hdr; struct rfcomm_mcc *mcc; struct rfcomm_pn *pn; u8 buf[16], *ptr = buf; BT_DBG("%p cr %d dlci %d mtu %d", s, cr, d->dlci, d->mtu); hdr = (void *) ptr; ptr += sizeof(*hdr); hdr->addr = __addr(s->initiator, 0); hdr->ctrl = __ctrl(RFCOMM_UIH, 0); hdr->len = __len8(sizeof(*mcc) + sizeof(*pn)); mcc = (void *) ptr; ptr += sizeof(*mcc); mcc->type = __mcc_type(cr, RFCOMM_PN); mcc->len = __len8(sizeof(*pn)); pn = (void *) ptr; ptr += sizeof(*pn); pn->dlci = d->dlci; pn->priority = d->priority; pn->ack_timer = 0; pn->max_retrans = 0; if (s->cfc) { pn->flow_ctrl = cr ? 0xf0 : 0xe0; pn->credits = RFCOMM_DEFAULT_CREDITS; } else { pn->flow_ctrl = 0; pn->credits = 0; } if (cr && channel_mtu >= 0) pn->mtu = cpu_to_le16(channel_mtu); else pn->mtu = cpu_to_le16(d->mtu); *ptr = __fcs(buf); ptr++; return rfcomm_send_frame(s, buf, ptr - buf); } int rfcomm_send_rpn(struct rfcomm_session *s, int cr, u8 dlci, u8 bit_rate, u8 data_bits, u8 stop_bits, u8 parity, u8 flow_ctrl_settings, u8 xon_char, u8 xoff_char, u16 param_mask) { struct rfcomm_hdr *hdr; struct rfcomm_mcc *mcc; struct rfcomm_rpn *rpn; u8 buf[16], *ptr = buf; BT_DBG("%p cr %d dlci %d bit_r 0x%x data_b 0x%x stop_b 0x%x parity 0x%x" " flwc_s 0x%x xon_c 0x%x xoff_c 0x%x p_mask 0x%x", s, cr, dlci, bit_rate, data_bits, stop_bits, parity, flow_ctrl_settings, xon_char, xoff_char, param_mask); hdr = (void *) ptr; ptr += sizeof(*hdr); hdr->addr = __addr(s->initiator, 0); hdr->ctrl = __ctrl(RFCOMM_UIH, 0); hdr->len = __len8(sizeof(*mcc) + sizeof(*rpn)); mcc = (void *) ptr; ptr += sizeof(*mcc); mcc->type = __mcc_type(cr, RFCOMM_RPN); mcc->len = __len8(sizeof(*rpn)); rpn = (void *) ptr; ptr += sizeof(*rpn); rpn->dlci = __addr(1, dlci); rpn->bit_rate = bit_rate; rpn->line_settings = __rpn_line_settings(data_bits, stop_bits, parity); rpn->flow_ctrl = flow_ctrl_settings; rpn->xon_char = xon_char; rpn->xoff_char = xoff_char; rpn->param_mask = cpu_to_le16(param_mask); *ptr = __fcs(buf); ptr++; return rfcomm_send_frame(s, buf, ptr - buf); } static int rfcomm_send_rls(struct rfcomm_session *s, int cr, u8 dlci, u8 status) { struct rfcomm_hdr *hdr; struct rfcomm_mcc *mcc; struct rfcomm_rls *rls; u8 buf[16], *ptr = buf; BT_DBG("%p cr %d status 0x%x", s, cr, status); hdr = (void *) ptr; ptr += sizeof(*hdr); hdr->addr = __addr(s->initiator, 0); hdr->ctrl = __ctrl(RFCOMM_UIH, 0); hdr->len = __len8(sizeof(*mcc) + sizeof(*rls)); mcc = (void *) ptr; ptr += sizeof(*mcc); mcc->type = __mcc_type(cr, RFCOMM_RLS); mcc->len = __len8(sizeof(*rls)); rls = (void *) ptr; ptr += sizeof(*rls); rls->dlci = __addr(1, dlci); rls->status = status; *ptr = __fcs(buf); ptr++; return rfcomm_send_frame(s, buf, ptr - buf); } static int rfcomm_send_msc(struct rfcomm_session *s, int cr, u8 dlci, u8 v24_sig) { struct rfcomm_hdr *hdr; struct rfcomm_mcc *mcc; struct rfcomm_msc *msc; u8 buf[16], *ptr = buf; BT_DBG("%p cr %d v24 0x%x", s, cr, v24_sig); hdr = (void *) ptr; ptr += sizeof(*hdr); hdr->addr = __addr(s->initiator, 0); hdr->ctrl = __ctrl(RFCOMM_UIH, 0); hdr->len = __len8(sizeof(*mcc) + sizeof(*msc)); mcc = (void *) ptr; ptr += sizeof(*mcc); mcc->type = __mcc_type(cr, RFCOMM_MSC); mcc->len = __len8(sizeof(*msc)); msc = (void *) ptr; ptr += sizeof(*msc); msc->dlci = __addr(1, dlci); msc->v24_sig = v24_sig | 0x01; *ptr = __fcs(buf); ptr++; return rfcomm_send_frame(s, buf, ptr - buf); } static int rfcomm_send_fcoff(struct rfcomm_session *s, int cr) { struct rfcomm_hdr *hdr; struct rfcomm_mcc *mcc; u8 buf[16], *ptr = buf; BT_DBG("%p cr %d", s, cr); hdr = (void *) ptr; ptr += sizeof(*hdr); hdr->addr = __addr(s->initiator, 0); hdr->ctrl = __ctrl(RFCOMM_UIH, 0); hdr->len = __len8(sizeof(*mcc)); mcc = (void *) ptr; ptr += sizeof(*mcc); mcc->type = __mcc_type(cr, RFCOMM_FCOFF); mcc->len = __len8(0); *ptr = __fcs(buf); ptr++; return rfcomm_send_frame(s, buf, ptr - buf); } static int rfcomm_send_fcon(struct rfcomm_session *s, int cr) { struct rfcomm_hdr *hdr; struct rfcomm_mcc *mcc; u8 buf[16], *ptr = buf; BT_DBG("%p cr %d", s, cr); hdr = (void *) ptr; ptr += sizeof(*hdr); hdr->addr = __addr(s->initiator, 0); hdr->ctrl = __ctrl(RFCOMM_UIH, 0); hdr->len = __len8(sizeof(*mcc)); mcc = (void *) ptr; ptr += sizeof(*mcc); mcc->type = __mcc_type(cr, RFCOMM_FCON); mcc->len = __len8(0); *ptr = __fcs(buf); ptr++; return rfcomm_send_frame(s, buf, ptr - buf); } static int rfcomm_send_test(struct rfcomm_session *s, int cr, u8 *pattern, int len) { struct socket *sock = s->sock; struct kvec iv[3]; struct msghdr msg; unsigned char hdr[5], crc[1]; if (len > 125) return -EINVAL; BT_DBG("%p cr %d", s, cr); hdr[0] = __addr(s->initiator, 0); hdr[1] = __ctrl(RFCOMM_UIH, 0); hdr[2] = 0x01 | ((len + 2) << 1); hdr[3] = 0x01 | ((cr & 0x01) << 1) | (RFCOMM_TEST << 2); hdr[4] = 0x01 | (len << 1); crc[0] = __fcs(hdr); iv[0].iov_base = hdr; iv[0].iov_len = 5; iv[1].iov_base = pattern; iv[1].iov_len = len; iv[2].iov_base = crc; iv[2].iov_len = 1; memset(&msg, 0, sizeof(msg)); return kernel_sendmsg(sock, &msg, iv, 3, 6 + len); } static int rfcomm_send_credits(struct rfcomm_session *s, u8 addr, u8 credits) { struct rfcomm_hdr *hdr; u8 buf[16], *ptr = buf; BT_DBG("%p addr %d credits %d", s, addr, credits); hdr = (void *) ptr; ptr += sizeof(*hdr); hdr->addr = addr; hdr->ctrl = __ctrl(RFCOMM_UIH, 1); hdr->len = __len8(0); *ptr = credits; ptr++; *ptr = __fcs(buf); ptr++; return rfcomm_send_frame(s, buf, ptr - buf); } static void rfcomm_make_uih(struct sk_buff *skb, u8 addr) { struct rfcomm_hdr *hdr; int len = skb->len; u8 *crc; if (len > 127) { hdr = skb_push(skb, 4); put_unaligned(cpu_to_le16(__len16(len)), (__le16 *) &hdr->len); } else { hdr = skb_push(skb, 3); hdr->len = __len8(len); } hdr->addr = addr; hdr->ctrl = __ctrl(RFCOMM_UIH, 0); crc = skb_put(skb, 1); *crc = __fcs((void *) hdr); } /* ---- RFCOMM frame reception ---- */ static struct rfcomm_session *rfcomm_recv_ua(struct rfcomm_session *s, u8 dlci) { BT_DBG("session %p state %ld dlci %d", s, s->state, dlci); if (dlci) { /* Data channel */ struct rfcomm_dlc *d = rfcomm_dlc_get(s, dlci); if (!d) { rfcomm_send_dm(s, dlci); return s; } switch (d->state) { case BT_CONNECT: rfcomm_dlc_clear_timer(d); rfcomm_dlc_lock(d); d->state = BT_CONNECTED; d->state_change(d, 0); rfcomm_dlc_unlock(d); rfcomm_send_msc(s, 1, dlci, d->v24_sig); break; case BT_DISCONN: d->state = BT_CLOSED; __rfcomm_dlc_close(d, 0); if (list_empty(&s->dlcs)) { s->state = BT_DISCONN; rfcomm_send_disc(s, 0); rfcomm_session_clear_timer(s); } break; } } else { /* Control channel */ switch (s->state) { case BT_CONNECT: s->state = BT_CONNECTED; rfcomm_process_connect(s); break; case BT_DISCONN: s = rfcomm_session_close(s, ECONNRESET); break; } } return s; } static struct rfcomm_session *rfcomm_recv_dm(struct rfcomm_session *s, u8 dlci) { int err = 0; BT_DBG("session %p state %ld dlci %d", s, s->state, dlci); if (dlci) { /* Data DLC */ struct rfcomm_dlc *d = rfcomm_dlc_get(s, dlci); if (d) { if (d->state == BT_CONNECT || d->state == BT_CONFIG) err = ECONNREFUSED; else err = ECONNRESET; d->state = BT_CLOSED; __rfcomm_dlc_close(d, err); } } else { if (s->state == BT_CONNECT) err = ECONNREFUSED; else err = ECONNRESET; s = rfcomm_session_close(s, err); } return s; } static struct rfcomm_session *rfcomm_recv_disc(struct rfcomm_session *s, u8 dlci) { int err = 0; BT_DBG("session %p state %ld dlci %d", s, s->state, dlci); if (dlci) { struct rfcomm_dlc *d = rfcomm_dlc_get(s, dlci); if (d) { rfcomm_send_ua(s, dlci); if (d->state == BT_CONNECT || d->state == BT_CONFIG) err = ECONNREFUSED; else err = ECONNRESET; d->state = BT_CLOSED; __rfcomm_dlc_close(d, err); } else rfcomm_send_dm(s, dlci); } else { rfcomm_send_ua(s, 0); if (s->state == BT_CONNECT) err = ECONNREFUSED; else err = ECONNRESET; s = rfcomm_session_close(s, err); } return s; } void rfcomm_dlc_accept(struct rfcomm_dlc *d) { struct sock *sk = d->session->sock->sk; struct l2cap_conn *conn = l2cap_pi(sk)->chan->conn; BT_DBG("dlc %p", d); rfcomm_send_ua(d->session, d->dlci); rfcomm_dlc_clear_timer(d); rfcomm_dlc_lock(d); d->state = BT_CONNECTED; d->state_change(d, 0); rfcomm_dlc_unlock(d); if (d->role_switch) hci_conn_switch_role(conn->hcon, 0x00); rfcomm_send_msc(d->session, 1, d->dlci, d->v24_sig); } static void rfcomm_check_accept(struct rfcomm_dlc *d) { if (rfcomm_check_security(d)) { if (d->defer_setup) { set_bit(RFCOMM_DEFER_SETUP, &d->flags); rfcomm_dlc_set_timer(d, RFCOMM_AUTH_TIMEOUT); rfcomm_dlc_lock(d); d->state = BT_CONNECT2; d->state_change(d, 0); rfcomm_dlc_unlock(d); } else rfcomm_dlc_accept(d); } else { set_bit(RFCOMM_AUTH_PENDING, &d->flags); rfcomm_dlc_set_timer(d, RFCOMM_AUTH_TIMEOUT); } } static int rfcomm_recv_sabm(struct rfcomm_session *s, u8 dlci) { struct rfcomm_dlc *d; u8 channel; BT_DBG("session %p state %ld dlci %d", s, s->state, dlci); if (!dlci) { rfcomm_send_ua(s, 0); if (s->state == BT_OPEN) { s->state = BT_CONNECTED; rfcomm_process_connect(s); } return 0; } /* Check if DLC exists */ d = rfcomm_dlc_get(s, dlci); if (d) { if (d->state == BT_OPEN) { /* DLC was previously opened by PN request */ rfcomm_check_accept(d); } return 0; } /* Notify socket layer about incoming connection */ channel = __srv_channel(dlci); if (rfcomm_connect_ind(s, channel, &d)) { d->dlci = dlci; d->addr = __addr(s->initiator, dlci); rfcomm_dlc_link(s, d); rfcomm_check_accept(d); } else { rfcomm_send_dm(s, dlci); } return 0; } static int rfcomm_apply_pn(struct rfcomm_dlc *d, int cr, struct rfcomm_pn *pn) { struct rfcomm_session *s = d->session; BT_DBG("dlc %p state %ld dlci %d mtu %d fc 0x%x credits %d", d, d->state, d->dlci, pn->mtu, pn->flow_ctrl, pn->credits); if ((pn->flow_ctrl == 0xf0 && s->cfc != RFCOMM_CFC_DISABLED) || pn->flow_ctrl == 0xe0) { d->cfc = RFCOMM_CFC_ENABLED; d->tx_credits = pn->credits; } else { d->cfc = RFCOMM_CFC_DISABLED; set_bit(RFCOMM_TX_THROTTLED, &d->flags); } if (s->cfc == RFCOMM_CFC_UNKNOWN) s->cfc = d->cfc; d->priority = pn->priority; d->mtu = __le16_to_cpu(pn->mtu); if (cr && d->mtu > s->mtu) d->mtu = s->mtu; return 0; } static int rfcomm_recv_pn(struct rfcomm_session *s, int cr, struct sk_buff *skb) { struct rfcomm_pn *pn = (void *) skb->data; struct rfcomm_dlc *d; u8 dlci = pn->dlci; BT_DBG("session %p state %ld dlci %d", s, s->state, dlci); if (!dlci) return 0; d = rfcomm_dlc_get(s, dlci); if (d) { if (cr) { /* PN request */ rfcomm_apply_pn(d, cr, pn); rfcomm_send_pn(s, 0, d); } else { /* PN response */ switch (d->state) { case BT_CONFIG: rfcomm_apply_pn(d, cr, pn); d->state = BT_CONNECT; rfcomm_send_sabm(s, d->dlci); break; } } } else { u8 channel = __srv_channel(dlci); if (!cr) return 0; /* PN request for non existing DLC. * Assume incoming connection. */ if (rfcomm_connect_ind(s, channel, &d)) { d->dlci = dlci; d->addr = __addr(s->initiator, dlci); rfcomm_dlc_link(s, d); rfcomm_apply_pn(d, cr, pn); d->state = BT_OPEN; rfcomm_send_pn(s, 0, d); } else { rfcomm_send_dm(s, dlci); } } return 0; } static int rfcomm_recv_rpn(struct rfcomm_session *s, int cr, int len, struct sk_buff *skb) { struct rfcomm_rpn *rpn = (void *) skb->data; u8 dlci = __get_dlci(rpn->dlci); u8 bit_rate = 0; u8 data_bits = 0; u8 stop_bits = 0; u8 parity = 0; u8 flow_ctrl = 0; u8 xon_char = 0; u8 xoff_char = 0; u16 rpn_mask = RFCOMM_RPN_PM_ALL; BT_DBG("dlci %d cr %d len 0x%x bitr 0x%x line 0x%x flow 0x%x xonc 0x%x xoffc 0x%x pm 0x%x", dlci, cr, len, rpn->bit_rate, rpn->line_settings, rpn->flow_ctrl, rpn->xon_char, rpn->xoff_char, rpn->param_mask); if (!cr) return 0; if (len == 1) { /* This is a request, return default (according to ETSI TS 07.10) settings */ bit_rate = RFCOMM_RPN_BR_9600; data_bits = RFCOMM_RPN_DATA_8; stop_bits = RFCOMM_RPN_STOP_1; parity = RFCOMM_RPN_PARITY_NONE; flow_ctrl = RFCOMM_RPN_FLOW_NONE; xon_char = RFCOMM_RPN_XON_CHAR; xoff_char = RFCOMM_RPN_XOFF_CHAR; goto rpn_out; } /* Check for sane values, ignore/accept bit_rate, 8 bits, 1 stop bit, * no parity, no flow control lines, normal XON/XOFF chars */ if (rpn->param_mask & cpu_to_le16(RFCOMM_RPN_PM_BITRATE)) { bit_rate = rpn->bit_rate; if (bit_rate > RFCOMM_RPN_BR_230400) { BT_DBG("RPN bit rate mismatch 0x%x", bit_rate); bit_rate = RFCOMM_RPN_BR_9600; rpn_mask ^= RFCOMM_RPN_PM_BITRATE; } } if (rpn->param_mask & cpu_to_le16(RFCOMM_RPN_PM_DATA)) { data_bits = __get_rpn_data_bits(rpn->line_settings); if (data_bits != RFCOMM_RPN_DATA_8) { BT_DBG("RPN data bits mismatch 0x%x", data_bits); data_bits = RFCOMM_RPN_DATA_8; rpn_mask ^= RFCOMM_RPN_PM_DATA; } } if (rpn->param_mask & cpu_to_le16(RFCOMM_RPN_PM_STOP)) { stop_bits = __get_rpn_stop_bits(rpn->line_settings); if (stop_bits != RFCOMM_RPN_STOP_1) { BT_DBG("RPN stop bits mismatch 0x%x", stop_bits); stop_bits = RFCOMM_RPN_STOP_1; rpn_mask ^= RFCOMM_RPN_PM_STOP; } } if (rpn->param_mask & cpu_to_le16(RFCOMM_RPN_PM_PARITY)) { parity = __get_rpn_parity(rpn->line_settings); if (parity != RFCOMM_RPN_PARITY_NONE) { BT_DBG("RPN parity mismatch 0x%x", parity); parity = RFCOMM_RPN_PARITY_NONE; rpn_mask ^= RFCOMM_RPN_PM_PARITY; } } if (rpn->param_mask & cpu_to_le16(RFCOMM_RPN_PM_FLOW)) { flow_ctrl = rpn->flow_ctrl; if (flow_ctrl != RFCOMM_RPN_FLOW_NONE) { BT_DBG("RPN flow ctrl mismatch 0x%x", flow_ctrl); flow_ctrl = RFCOMM_RPN_FLOW_NONE; rpn_mask ^= RFCOMM_RPN_PM_FLOW; } } if (rpn->param_mask & cpu_to_le16(RFCOMM_RPN_PM_XON)) { xon_char = rpn->xon_char; if (xon_char != RFCOMM_RPN_XON_CHAR) { BT_DBG("RPN XON char mismatch 0x%x", xon_char); xon_char = RFCOMM_RPN_XON_CHAR; rpn_mask ^= RFCOMM_RPN_PM_XON; } } if (rpn->param_mask & cpu_to_le16(RFCOMM_RPN_PM_XOFF)) { xoff_char = rpn->xoff_char; if (xoff_char != RFCOMM_RPN_XOFF_CHAR) { BT_DBG("RPN XOFF char mismatch 0x%x", xoff_char); xoff_char = RFCOMM_RPN_XOFF_CHAR; rpn_mask ^= RFCOMM_RPN_PM_XOFF; } } rpn_out: rfcomm_send_rpn(s, 0, dlci, bit_rate, data_bits, stop_bits, parity, flow_ctrl, xon_char, xoff_char, rpn_mask); return 0; } static int rfcomm_recv_rls(struct rfcomm_session *s, int cr, struct sk_buff *skb) { struct rfcomm_rls *rls = (void *) skb->data; u8 dlci = __get_dlci(rls->dlci); BT_DBG("dlci %d cr %d status 0x%x", dlci, cr, rls->status); if (!cr) return 0; /* We should probably do something with this information here. But * for now it's sufficient just to reply -- Bluetooth 1.1 says it's * mandatory to recognise and respond to RLS */ rfcomm_send_rls(s, 0, dlci, rls->status); return 0; } static int rfcomm_recv_msc(struct rfcomm_session *s, int cr, struct sk_buff *skb) { struct rfcomm_msc *msc = (void *) skb->data; struct rfcomm_dlc *d; u8 dlci = __get_dlci(msc->dlci); BT_DBG("dlci %d cr %d v24 0x%x", dlci, cr, msc->v24_sig); d = rfcomm_dlc_get(s, dlci); if (!d) return 0; if (cr) { if (msc->v24_sig & RFCOMM_V24_FC && !d->cfc) set_bit(RFCOMM_TX_THROTTLED, &d->flags); else clear_bit(RFCOMM_TX_THROTTLED, &d->flags); rfcomm_dlc_lock(d); d->remote_v24_sig = msc->v24_sig; if (d->modem_status) d->modem_status(d, msc->v24_sig); rfcomm_dlc_unlock(d); rfcomm_send_msc(s, 0, dlci, msc->v24_sig); d->mscex |= RFCOMM_MSCEX_RX; } else d->mscex |= RFCOMM_MSCEX_TX; return 0; } static int rfcomm_recv_mcc(struct rfcomm_session *s, struct sk_buff *skb) { struct rfcomm_mcc *mcc = (void *) skb->data; u8 type, cr, len; cr = __test_cr(mcc->type); type = __get_mcc_type(mcc->type); len = __get_mcc_len(mcc->len); BT_DBG("%p type 0x%x cr %d", s, type, cr); skb_pull(skb, 2); switch (type) { case RFCOMM_PN: rfcomm_recv_pn(s, cr, skb); break; case RFCOMM_RPN: rfcomm_recv_rpn(s, cr, len, skb); break; case RFCOMM_RLS: rfcomm_recv_rls(s, cr, skb); break; case RFCOMM_MSC: rfcomm_recv_msc(s, cr, skb); break; case RFCOMM_FCOFF: if (cr) { set_bit(RFCOMM_TX_THROTTLED, &s->flags); rfcomm_send_fcoff(s, 0); } break; case RFCOMM_FCON: if (cr) { clear_bit(RFCOMM_TX_THROTTLED, &s->flags); rfcomm_send_fcon(s, 0); } break; case RFCOMM_TEST: if (cr) rfcomm_send_test(s, 0, skb->data, skb->len); break; case RFCOMM_NSC: break; default: BT_ERR("Unknown control type 0x%02x", type); rfcomm_send_nsc(s, cr, type); break; } return 0; } static int rfcomm_recv_data(struct rfcomm_session *s, u8 dlci, int pf, struct sk_buff *skb) { struct rfcomm_dlc *d; BT_DBG("session %p state %ld dlci %d pf %d", s, s->state, dlci, pf); d = rfcomm_dlc_get(s, dlci); if (!d) { rfcomm_send_dm(s, dlci); goto drop; } if (pf && d->cfc) { u8 credits = *(u8 *) skb->data; skb_pull(skb, 1); d->tx_credits += credits; if (d->tx_credits) clear_bit(RFCOMM_TX_THROTTLED, &d->flags); } if (skb->len && d->state == BT_CONNECTED) { rfcomm_dlc_lock(d); d->rx_credits--; d->data_ready(d, skb); rfcomm_dlc_unlock(d); return 0; } drop: kfree_skb(skb); return 0; } static struct rfcomm_session *rfcomm_recv_frame(struct rfcomm_session *s, struct sk_buff *skb) { struct rfcomm_hdr *hdr = (void *) skb->data; u8 type, dlci, fcs; if (!s) { /* no session, so free socket data */ kfree_skb(skb); return s; } dlci = __get_dlci(hdr->addr); type = __get_type(hdr->ctrl); /* Trim FCS */ skb->len--; skb->tail--; fcs = *(u8 *)skb_tail_pointer(skb); if (__check_fcs(skb->data, type, fcs)) { BT_ERR("bad checksum in packet"); kfree_skb(skb); return s; } if (__test_ea(hdr->len)) skb_pull(skb, 3); else skb_pull(skb, 4); switch (type) { case RFCOMM_SABM: if (__test_pf(hdr->ctrl)) rfcomm_recv_sabm(s, dlci); break; case RFCOMM_DISC: if (__test_pf(hdr->ctrl)) s = rfcomm_recv_disc(s, dlci); break; case RFCOMM_UA: if (__test_pf(hdr->ctrl)) s = rfcomm_recv_ua(s, dlci); break; case RFCOMM_DM: s = rfcomm_recv_dm(s, dlci); break; case RFCOMM_UIH: if (dlci) { rfcomm_recv_data(s, dlci, __test_pf(hdr->ctrl), skb); return s; } rfcomm_recv_mcc(s, skb); break; default: BT_ERR("Unknown packet type 0x%02x", type); break; } kfree_skb(skb); return s; } /* ---- Connection and data processing ---- */ static void rfcomm_process_connect(struct rfcomm_session *s) { struct rfcomm_dlc *d, *n; BT_DBG("session %p state %ld", s, s->state); list_for_each_entry_safe(d, n, &s->dlcs, list) { if (d->state == BT_CONFIG) { d->mtu = s->mtu; if (rfcomm_check_security(d)) { rfcomm_send_pn(s, 1, d); } else { set_bit(RFCOMM_AUTH_PENDING, &d->flags); rfcomm_dlc_set_timer(d, RFCOMM_AUTH_TIMEOUT); } } } } /* Send data queued for the DLC. * Return number of frames left in the queue. */ static int rfcomm_process_tx(struct rfcomm_dlc *d) { struct sk_buff *skb; int err; BT_DBG("dlc %p state %ld cfc %d rx_credits %d tx_credits %d", d, d->state, d->cfc, d->rx_credits, d->tx_credits); /* Send pending MSC */ if (test_and_clear_bit(RFCOMM_MSC_PENDING, &d->flags)) rfcomm_send_msc(d->session, 1, d->dlci, d->v24_sig); if (d->cfc) { /* CFC enabled. * Give them some credits */ if (!test_bit(RFCOMM_RX_THROTTLED, &d->flags) && d->rx_credits <= (d->cfc >> 2)) { rfcomm_send_credits(d->session, d->addr, d->cfc - d->rx_credits); d->rx_credits = d->cfc; } } else { /* CFC disabled. * Give ourselves some credits */ d->tx_credits = 5; } if (test_bit(RFCOMM_TX_THROTTLED, &d->flags)) return skb_queue_len(&d->tx_queue); while (d->tx_credits && (skb = skb_dequeue(&d->tx_queue))) { err = rfcomm_send_frame(d->session, skb->data, skb->len); if (err < 0) { skb_queue_head(&d->tx_queue, skb); break; } kfree_skb(skb); d->tx_credits--; } if (d->cfc && !d->tx_credits) { /* We're out of TX credits. * Set TX_THROTTLED flag to avoid unnesary wakeups by dlc_send. */ set_bit(RFCOMM_TX_THROTTLED, &d->flags); } return skb_queue_len(&d->tx_queue); } static void rfcomm_process_dlcs(struct rfcomm_session *s) { struct rfcomm_dlc *d, *n; BT_DBG("session %p state %ld", s, s->state); list_for_each_entry_safe(d, n, &s->dlcs, list) { if (test_bit(RFCOMM_TIMED_OUT, &d->flags)) { __rfcomm_dlc_close(d, ETIMEDOUT); continue; } if (test_bit(RFCOMM_ENC_DROP, &d->flags)) { __rfcomm_dlc_close(d, ECONNREFUSED); continue; } if (test_and_clear_bit(RFCOMM_AUTH_ACCEPT, &d->flags)) { rfcomm_dlc_clear_timer(d); if (d->out) { rfcomm_send_pn(s, 1, d); rfcomm_dlc_set_timer(d, RFCOMM_CONN_TIMEOUT); } else { if (d->defer_setup) { set_bit(RFCOMM_DEFER_SETUP, &d->flags); rfcomm_dlc_set_timer(d, RFCOMM_AUTH_TIMEOUT); rfcomm_dlc_lock(d); d->state = BT_CONNECT2; d->state_change(d, 0); rfcomm_dlc_unlock(d); } else rfcomm_dlc_accept(d); } continue; } else if (test_and_clear_bit(RFCOMM_AUTH_REJECT, &d->flags)) { rfcomm_dlc_clear_timer(d); if (!d->out) rfcomm_send_dm(s, d->dlci); else d->state = BT_CLOSED; __rfcomm_dlc_close(d, ECONNREFUSED); continue; } if (test_bit(RFCOMM_SEC_PENDING, &d->flags)) continue; if (test_bit(RFCOMM_TX_THROTTLED, &s->flags)) continue; if ((d->state == BT_CONNECTED || d->state == BT_DISCONN) && d->mscex == RFCOMM_MSCEX_OK) rfcomm_process_tx(d); } } static struct rfcomm_session *rfcomm_process_rx(struct rfcomm_session *s) { struct socket *sock = s->sock; struct sock *sk = sock->sk; struct sk_buff *skb; BT_DBG("session %p state %ld qlen %d", s, s->state, skb_queue_len(&sk->sk_receive_queue)); /* Get data directly from socket receive queue without copying it. */ while ((skb = skb_dequeue(&sk->sk_receive_queue))) { skb_orphan(skb); if (!skb_linearize(skb)) { s = rfcomm_recv_frame(s, skb); if (!s) break; } else { kfree_skb(skb); } } if (s && (sk->sk_state == BT_CLOSED)) s = rfcomm_session_close(s, sk->sk_err); return s; } static void rfcomm_accept_connection(struct rfcomm_session *s) { struct socket *sock = s->sock, *nsock; int err; /* Fast check for a new connection. * Avoids unnesesary socket allocations. */ if (list_empty(&bt_sk(sock->sk)->accept_q)) return; BT_DBG("session %p", s); err = kernel_accept(sock, &nsock, O_NONBLOCK); if (err < 0) return; /* Set our callbacks */ nsock->sk->sk_data_ready = rfcomm_l2data_ready; nsock->sk->sk_state_change = rfcomm_l2state_change; s = rfcomm_session_add(nsock, BT_OPEN); if (s) { /* We should adjust MTU on incoming sessions. * L2CAP MTU minus UIH header and FCS. */ s->mtu = min(l2cap_pi(nsock->sk)->chan->omtu, l2cap_pi(nsock->sk)->chan->imtu) - 5; rfcomm_schedule(); } else sock_release(nsock); } static struct rfcomm_session *rfcomm_check_connection(struct rfcomm_session *s) { struct sock *sk = s->sock->sk; BT_DBG("%p state %ld", s, s->state); switch (sk->sk_state) { case BT_CONNECTED: s->state = BT_CONNECT; /* We can adjust MTU on outgoing sessions. * L2CAP MTU minus UIH header and FCS. */ s->mtu = min(l2cap_pi(sk)->chan->omtu, l2cap_pi(sk)->chan->imtu) - 5; rfcomm_send_sabm(s, 0); break; case BT_CLOSED: s = rfcomm_session_close(s, sk->sk_err); break; } return s; } static void rfcomm_process_sessions(void) { struct rfcomm_session *s, *n; rfcomm_lock(); list_for_each_entry_safe(s, n, &session_list, list) { if (test_and_clear_bit(RFCOMM_TIMED_OUT, &s->flags)) { s->state = BT_DISCONN; rfcomm_send_disc(s, 0); continue; } switch (s->state) { case BT_LISTEN: rfcomm_accept_connection(s); continue; case BT_BOUND: s = rfcomm_check_connection(s); break; default: s = rfcomm_process_rx(s); break; } if (s) rfcomm_process_dlcs(s); } rfcomm_unlock(); } static int rfcomm_add_listener(bdaddr_t *ba) { struct sockaddr_l2 addr; struct socket *sock; struct sock *sk; struct rfcomm_session *s; int err = 0; /* Create socket */ err = rfcomm_l2sock_create(&sock); if (err < 0) { BT_ERR("Create socket failed %d", err); return err; } /* Bind socket */ bacpy(&addr.l2_bdaddr, ba); addr.l2_family = AF_BLUETOOTH; addr.l2_psm = cpu_to_le16(L2CAP_PSM_RFCOMM); addr.l2_cid = 0; addr.l2_bdaddr_type = BDADDR_BREDR; err = kernel_bind(sock, (struct sockaddr *) &addr, sizeof(addr)); if (err < 0) { BT_ERR("Bind failed %d", err); goto failed; } /* Set L2CAP options */ sk = sock->sk; lock_sock(sk); /* Set MTU to 0 so L2CAP can auto select the MTU */ l2cap_pi(sk)->chan->imtu = 0; release_sock(sk); /* Start listening on the socket */ err = kernel_listen(sock, 10); if (err) { BT_ERR("Listen failed %d", err); goto failed; } /* Add listening session */ s = rfcomm_session_add(sock, BT_LISTEN); if (!s) { err = -ENOMEM; goto failed; } return 0; failed: sock_release(sock); return err; } static void rfcomm_kill_listener(void) { struct rfcomm_session *s, *n; BT_DBG(""); list_for_each_entry_safe(s, n, &session_list, list) rfcomm_session_del(s); } static int rfcomm_run(void *unused) { DEFINE_WAIT_FUNC(wait, woken_wake_function); BT_DBG(""); set_user_nice(current, -10); rfcomm_add_listener(BDADDR_ANY); add_wait_queue(&rfcomm_wq, &wait); while (!kthread_should_stop()) { /* Process stuff */ rfcomm_process_sessions(); wait_woken(&wait, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT); } remove_wait_queue(&rfcomm_wq, &wait); rfcomm_kill_listener(); return 0; } static void rfcomm_security_cfm(struct hci_conn *conn, u8 status, u8 encrypt) { struct rfcomm_session *s; struct rfcomm_dlc *d, *n; BT_DBG("conn %p status 0x%02x encrypt 0x%02x", conn, status, encrypt); s = rfcomm_session_get(&conn->hdev->bdaddr, &conn->dst); if (!s) return; list_for_each_entry_safe(d, n, &s->dlcs, list) { if (test_and_clear_bit(RFCOMM_SEC_PENDING, &d->flags)) { rfcomm_dlc_clear_timer(d); if (status || encrypt == 0x00) { set_bit(RFCOMM_ENC_DROP, &d->flags); continue; } } if (d->state == BT_CONNECTED && !status && encrypt == 0x00) { if (d->sec_level == BT_SECURITY_MEDIUM) { set_bit(RFCOMM_SEC_PENDING, &d->flags); rfcomm_dlc_set_timer(d, RFCOMM_AUTH_TIMEOUT); continue; } else if (d->sec_level == BT_SECURITY_HIGH || d->sec_level == BT_SECURITY_FIPS) { set_bit(RFCOMM_ENC_DROP, &d->flags); continue; } } if (!test_and_clear_bit(RFCOMM_AUTH_PENDING, &d->flags)) continue; if (!status && hci_conn_check_secure(conn, d->sec_level)) set_bit(RFCOMM_AUTH_ACCEPT, &d->flags); else set_bit(RFCOMM_AUTH_REJECT, &d->flags); } rfcomm_schedule(); } static struct hci_cb rfcomm_cb = { .name = "RFCOMM", .security_cfm = rfcomm_security_cfm }; static int rfcomm_dlc_debugfs_show(struct seq_file *f, void *x) { struct rfcomm_session *s; rfcomm_lock(); list_for_each_entry(s, &session_list, list) { struct l2cap_chan *chan = l2cap_pi(s->sock->sk)->chan; struct rfcomm_dlc *d; list_for_each_entry(d, &s->dlcs, list) { seq_printf(f, "%pMR %pMR %ld %d %d %d %d\n", &chan->src, &chan->dst, d->state, d->dlci, d->mtu, d->rx_credits, d->tx_credits); } } rfcomm_unlock(); return 0; } DEFINE_SHOW_ATTRIBUTE(rfcomm_dlc_debugfs); static struct dentry *rfcomm_dlc_debugfs; /* ---- Initialization ---- */ static int __init rfcomm_init(void) { int err; hci_register_cb(&rfcomm_cb); rfcomm_thread = kthread_run(rfcomm_run, NULL, "krfcommd"); if (IS_ERR(rfcomm_thread)) { err = PTR_ERR(rfcomm_thread); goto unregister; } err = rfcomm_init_ttys(); if (err < 0) goto stop; err = rfcomm_init_sockets(); if (err < 0) goto cleanup; BT_INFO("RFCOMM ver %s", VERSION); if (IS_ERR_OR_NULL(bt_debugfs)) return 0; rfcomm_dlc_debugfs = debugfs_create_file("rfcomm_dlc", 0444, bt_debugfs, NULL, &rfcomm_dlc_debugfs_fops); return 0; cleanup: rfcomm_cleanup_ttys(); stop: kthread_stop(rfcomm_thread); unregister: hci_unregister_cb(&rfcomm_cb); return err; } static void __exit rfcomm_exit(void) { debugfs_remove(rfcomm_dlc_debugfs); hci_unregister_cb(&rfcomm_cb); kthread_stop(rfcomm_thread); rfcomm_cleanup_ttys(); rfcomm_cleanup_sockets(); } module_init(rfcomm_init); module_exit(rfcomm_exit); module_param(disable_cfc, bool, 0644); MODULE_PARM_DESC(disable_cfc, "Disable credit based flow control"); module_param(channel_mtu, int, 0644); MODULE_PARM_DESC(channel_mtu, "Default MTU for the RFCOMM channel"); module_param(l2cap_ertm, bool, 0644); MODULE_PARM_DESC(l2cap_ertm, "Use L2CAP ERTM mode for connection"); MODULE_AUTHOR("Marcel Holtmann <[email protected]>"); MODULE_DESCRIPTION("Bluetooth RFCOMM ver " VERSION); MODULE_VERSION(VERSION); MODULE_LICENSE("GPL"); MODULE_ALIAS("bt-proto-3");
linux-master
net/bluetooth/rfcomm/core.c
/* RFCOMM implementation for Linux Bluetooth stack (BlueZ). Copyright (C) 2002 Maxim Krasnyansky <[email protected]> Copyright (C) 2002 Marcel Holtmann <[email protected]> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License version 2 as published by the Free Software Foundation; THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS SOFTWARE IS DISCLAIMED. */ /* * RFCOMM sockets. */ #include <linux/compat.h> #include <linux/export.h> #include <linux/debugfs.h> #include <linux/sched/signal.h> #include <net/bluetooth/bluetooth.h> #include <net/bluetooth/hci_core.h> #include <net/bluetooth/l2cap.h> #include <net/bluetooth/rfcomm.h> static const struct proto_ops rfcomm_sock_ops; static struct bt_sock_list rfcomm_sk_list = { .lock = __RW_LOCK_UNLOCKED(rfcomm_sk_list.lock) }; static void rfcomm_sock_close(struct sock *sk); static void rfcomm_sock_kill(struct sock *sk); /* ---- DLC callbacks ---- * * called under rfcomm_dlc_lock() */ static void rfcomm_sk_data_ready(struct rfcomm_dlc *d, struct sk_buff *skb) { struct sock *sk = d->owner; if (!sk) return; atomic_add(skb->len, &sk->sk_rmem_alloc); skb_queue_tail(&sk->sk_receive_queue, skb); sk->sk_data_ready(sk); if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) rfcomm_dlc_throttle(d); } static void rfcomm_sk_state_change(struct rfcomm_dlc *d, int err) { struct sock *sk = d->owner, *parent; if (!sk) return; BT_DBG("dlc %p state %ld err %d", d, d->state, err); lock_sock(sk); if (err) sk->sk_err = err; sk->sk_state = d->state; parent = bt_sk(sk)->parent; if (parent) { if (d->state == BT_CLOSED) { sock_set_flag(sk, SOCK_ZAPPED); bt_accept_unlink(sk); } parent->sk_data_ready(parent); } else { if (d->state == BT_CONNECTED) rfcomm_session_getaddr(d->session, &rfcomm_pi(sk)->src, NULL); sk->sk_state_change(sk); } release_sock(sk); if (parent && sock_flag(sk, SOCK_ZAPPED)) { /* We have to drop DLC lock here, otherwise * rfcomm_sock_destruct() will dead lock. */ rfcomm_dlc_unlock(d); rfcomm_sock_kill(sk); rfcomm_dlc_lock(d); } } /* ---- Socket functions ---- */ static struct sock *__rfcomm_get_listen_sock_by_addr(u8 channel, bdaddr_t *src) { struct sock *sk = NULL; sk_for_each(sk, &rfcomm_sk_list.head) { if (rfcomm_pi(sk)->channel != channel) continue; if (bacmp(&rfcomm_pi(sk)->src, src)) continue; if (sk->sk_state == BT_BOUND || sk->sk_state == BT_LISTEN) break; } return sk ? sk : NULL; } /* Find socket with channel and source bdaddr. * Returns closest match. */ static struct sock *rfcomm_get_sock_by_channel(int state, u8 channel, bdaddr_t *src) { struct sock *sk = NULL, *sk1 = NULL; read_lock(&rfcomm_sk_list.lock); sk_for_each(sk, &rfcomm_sk_list.head) { if (state && sk->sk_state != state) continue; if (rfcomm_pi(sk)->channel == channel) { /* Exact match. */ if (!bacmp(&rfcomm_pi(sk)->src, src)) break; /* Closest match */ if (!bacmp(&rfcomm_pi(sk)->src, BDADDR_ANY)) sk1 = sk; } } read_unlock(&rfcomm_sk_list.lock); return sk ? sk : sk1; } static void rfcomm_sock_destruct(struct sock *sk) { struct rfcomm_dlc *d = rfcomm_pi(sk)->dlc; BT_DBG("sk %p dlc %p", sk, d); skb_queue_purge(&sk->sk_receive_queue); skb_queue_purge(&sk->sk_write_queue); rfcomm_dlc_lock(d); rfcomm_pi(sk)->dlc = NULL; /* Detach DLC if it's owned by this socket */ if (d->owner == sk) d->owner = NULL; rfcomm_dlc_unlock(d); rfcomm_dlc_put(d); } static void rfcomm_sock_cleanup_listen(struct sock *parent) { struct sock *sk; BT_DBG("parent %p", parent); /* Close not yet accepted dlcs */ while ((sk = bt_accept_dequeue(parent, NULL))) { rfcomm_sock_close(sk); rfcomm_sock_kill(sk); } parent->sk_state = BT_CLOSED; sock_set_flag(parent, SOCK_ZAPPED); } /* Kill socket (only if zapped and orphan) * Must be called on unlocked socket. */ static void rfcomm_sock_kill(struct sock *sk) { if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket) return; BT_DBG("sk %p state %d refcnt %d", sk, sk->sk_state, refcount_read(&sk->sk_refcnt)); /* Kill poor orphan */ bt_sock_unlink(&rfcomm_sk_list, sk); sock_set_flag(sk, SOCK_DEAD); sock_put(sk); } static void __rfcomm_sock_close(struct sock *sk) { struct rfcomm_dlc *d = rfcomm_pi(sk)->dlc; BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket); switch (sk->sk_state) { case BT_LISTEN: rfcomm_sock_cleanup_listen(sk); break; case BT_CONNECT: case BT_CONNECT2: case BT_CONFIG: case BT_CONNECTED: rfcomm_dlc_close(d, 0); fallthrough; default: sock_set_flag(sk, SOCK_ZAPPED); break; } } /* Close socket. * Must be called on unlocked socket. */ static void rfcomm_sock_close(struct sock *sk) { lock_sock(sk); __rfcomm_sock_close(sk); release_sock(sk); } static void rfcomm_sock_init(struct sock *sk, struct sock *parent) { struct rfcomm_pinfo *pi = rfcomm_pi(sk); BT_DBG("sk %p", sk); if (parent) { sk->sk_type = parent->sk_type; pi->dlc->defer_setup = test_bit(BT_SK_DEFER_SETUP, &bt_sk(parent)->flags); pi->sec_level = rfcomm_pi(parent)->sec_level; pi->role_switch = rfcomm_pi(parent)->role_switch; security_sk_clone(parent, sk); } else { pi->dlc->defer_setup = 0; pi->sec_level = BT_SECURITY_LOW; pi->role_switch = 0; } pi->dlc->sec_level = pi->sec_level; pi->dlc->role_switch = pi->role_switch; } static struct proto rfcomm_proto = { .name = "RFCOMM", .owner = THIS_MODULE, .obj_size = sizeof(struct rfcomm_pinfo) }; static struct sock *rfcomm_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio, int kern) { struct rfcomm_dlc *d; struct sock *sk; sk = bt_sock_alloc(net, sock, &rfcomm_proto, proto, prio, kern); if (!sk) return NULL; d = rfcomm_dlc_alloc(prio); if (!d) { sk_free(sk); return NULL; } d->data_ready = rfcomm_sk_data_ready; d->state_change = rfcomm_sk_state_change; rfcomm_pi(sk)->dlc = d; d->owner = sk; sk->sk_destruct = rfcomm_sock_destruct; sk->sk_sndtimeo = RFCOMM_CONN_TIMEOUT; sk->sk_sndbuf = RFCOMM_MAX_CREDITS * RFCOMM_DEFAULT_MTU * 10; sk->sk_rcvbuf = RFCOMM_MAX_CREDITS * RFCOMM_DEFAULT_MTU * 10; bt_sock_link(&rfcomm_sk_list, sk); BT_DBG("sk %p", sk); return sk; } static int rfcomm_sock_create(struct net *net, struct socket *sock, int protocol, int kern) { struct sock *sk; BT_DBG("sock %p", sock); sock->state = SS_UNCONNECTED; if (sock->type != SOCK_STREAM && sock->type != SOCK_RAW) return -ESOCKTNOSUPPORT; sock->ops = &rfcomm_sock_ops; sk = rfcomm_sock_alloc(net, sock, protocol, GFP_ATOMIC, kern); if (!sk) return -ENOMEM; rfcomm_sock_init(sk, NULL); return 0; } static int rfcomm_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_len) { struct sockaddr_rc sa; struct sock *sk = sock->sk; int len, err = 0; if (!addr || addr_len < offsetofend(struct sockaddr, sa_family) || addr->sa_family != AF_BLUETOOTH) return -EINVAL; memset(&sa, 0, sizeof(sa)); len = min_t(unsigned int, sizeof(sa), addr_len); memcpy(&sa, addr, len); BT_DBG("sk %p %pMR", sk, &sa.rc_bdaddr); lock_sock(sk); if (sk->sk_state != BT_OPEN) { err = -EBADFD; goto done; } if (sk->sk_type != SOCK_STREAM) { err = -EINVAL; goto done; } write_lock(&rfcomm_sk_list.lock); if (sa.rc_channel && __rfcomm_get_listen_sock_by_addr(sa.rc_channel, &sa.rc_bdaddr)) { err = -EADDRINUSE; } else { /* Save source address */ bacpy(&rfcomm_pi(sk)->src, &sa.rc_bdaddr); rfcomm_pi(sk)->channel = sa.rc_channel; sk->sk_state = BT_BOUND; } write_unlock(&rfcomm_sk_list.lock); done: release_sock(sk); return err; } static int rfcomm_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags) { struct sockaddr_rc *sa = (struct sockaddr_rc *) addr; struct sock *sk = sock->sk; struct rfcomm_dlc *d = rfcomm_pi(sk)->dlc; int err = 0; BT_DBG("sk %p", sk); if (alen < sizeof(struct sockaddr_rc) || addr->sa_family != AF_BLUETOOTH) return -EINVAL; sock_hold(sk); lock_sock(sk); if (sk->sk_state != BT_OPEN && sk->sk_state != BT_BOUND) { err = -EBADFD; goto done; } if (sk->sk_type != SOCK_STREAM) { err = -EINVAL; goto done; } sk->sk_state = BT_CONNECT; bacpy(&rfcomm_pi(sk)->dst, &sa->rc_bdaddr); rfcomm_pi(sk)->channel = sa->rc_channel; d->sec_level = rfcomm_pi(sk)->sec_level; d->role_switch = rfcomm_pi(sk)->role_switch; /* Drop sock lock to avoid potential deadlock with the RFCOMM lock */ release_sock(sk); err = rfcomm_dlc_open(d, &rfcomm_pi(sk)->src, &sa->rc_bdaddr, sa->rc_channel); lock_sock(sk); if (!err && !sock_flag(sk, SOCK_ZAPPED)) err = bt_sock_wait_state(sk, BT_CONNECTED, sock_sndtimeo(sk, flags & O_NONBLOCK)); done: release_sock(sk); sock_put(sk); return err; } static int rfcomm_sock_listen(struct socket *sock, int backlog) { struct sock *sk = sock->sk; int err = 0; BT_DBG("sk %p backlog %d", sk, backlog); lock_sock(sk); if (sk->sk_state != BT_BOUND) { err = -EBADFD; goto done; } if (sk->sk_type != SOCK_STREAM) { err = -EINVAL; goto done; } if (!rfcomm_pi(sk)->channel) { bdaddr_t *src = &rfcomm_pi(sk)->src; u8 channel; err = -EINVAL; write_lock(&rfcomm_sk_list.lock); for (channel = 1; channel < 31; channel++) if (!__rfcomm_get_listen_sock_by_addr(channel, src)) { rfcomm_pi(sk)->channel = channel; err = 0; break; } write_unlock(&rfcomm_sk_list.lock); if (err < 0) goto done; } sk->sk_max_ack_backlog = backlog; sk->sk_ack_backlog = 0; sk->sk_state = BT_LISTEN; done: release_sock(sk); return err; } static int rfcomm_sock_accept(struct socket *sock, struct socket *newsock, int flags, bool kern) { DEFINE_WAIT_FUNC(wait, woken_wake_function); struct sock *sk = sock->sk, *nsk; long timeo; int err = 0; lock_sock_nested(sk, SINGLE_DEPTH_NESTING); if (sk->sk_type != SOCK_STREAM) { err = -EINVAL; goto done; } timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK); BT_DBG("sk %p timeo %ld", sk, timeo); /* Wait for an incoming connection. (wake-one). */ add_wait_queue_exclusive(sk_sleep(sk), &wait); while (1) { if (sk->sk_state != BT_LISTEN) { err = -EBADFD; break; } nsk = bt_accept_dequeue(sk, newsock); if (nsk) break; if (!timeo) { err = -EAGAIN; break; } if (signal_pending(current)) { err = sock_intr_errno(timeo); break; } release_sock(sk); timeo = wait_woken(&wait, TASK_INTERRUPTIBLE, timeo); lock_sock_nested(sk, SINGLE_DEPTH_NESTING); } remove_wait_queue(sk_sleep(sk), &wait); if (err) goto done; newsock->state = SS_CONNECTED; BT_DBG("new socket %p", nsk); done: release_sock(sk); return err; } static int rfcomm_sock_getname(struct socket *sock, struct sockaddr *addr, int peer) { struct sockaddr_rc *sa = (struct sockaddr_rc *) addr; struct sock *sk = sock->sk; BT_DBG("sock %p, sk %p", sock, sk); if (peer && sk->sk_state != BT_CONNECTED && sk->sk_state != BT_CONNECT && sk->sk_state != BT_CONNECT2) return -ENOTCONN; memset(sa, 0, sizeof(*sa)); sa->rc_family = AF_BLUETOOTH; sa->rc_channel = rfcomm_pi(sk)->channel; if (peer) bacpy(&sa->rc_bdaddr, &rfcomm_pi(sk)->dst); else bacpy(&sa->rc_bdaddr, &rfcomm_pi(sk)->src); return sizeof(struct sockaddr_rc); } static int rfcomm_sock_sendmsg(struct socket *sock, struct msghdr *msg, size_t len) { struct sock *sk = sock->sk; struct rfcomm_dlc *d = rfcomm_pi(sk)->dlc; struct sk_buff *skb; int sent; if (test_bit(RFCOMM_DEFER_SETUP, &d->flags)) return -ENOTCONN; if (msg->msg_flags & MSG_OOB) return -EOPNOTSUPP; if (sk->sk_shutdown & SEND_SHUTDOWN) return -EPIPE; BT_DBG("sock %p, sk %p", sock, sk); lock_sock(sk); sent = bt_sock_wait_ready(sk, msg->msg_flags); release_sock(sk); if (sent) return sent; skb = bt_skb_sendmmsg(sk, msg, len, d->mtu, RFCOMM_SKB_HEAD_RESERVE, RFCOMM_SKB_TAIL_RESERVE); if (IS_ERR(skb)) return PTR_ERR(skb); sent = rfcomm_dlc_send(d, skb); if (sent < 0) kfree_skb(skb); return sent; } static int rfcomm_sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t size, int flags) { struct sock *sk = sock->sk; struct rfcomm_dlc *d = rfcomm_pi(sk)->dlc; int len; if (test_and_clear_bit(RFCOMM_DEFER_SETUP, &d->flags)) { rfcomm_dlc_accept(d); return 0; } len = bt_sock_stream_recvmsg(sock, msg, size, flags); lock_sock(sk); if (!(flags & MSG_PEEK) && len > 0) atomic_sub(len, &sk->sk_rmem_alloc); if (atomic_read(&sk->sk_rmem_alloc) <= (sk->sk_rcvbuf >> 2)) rfcomm_dlc_unthrottle(rfcomm_pi(sk)->dlc); release_sock(sk); return len; } static int rfcomm_sock_setsockopt_old(struct socket *sock, int optname, sockptr_t optval, unsigned int optlen) { struct sock *sk = sock->sk; int err = 0; u32 opt; BT_DBG("sk %p", sk); lock_sock(sk); switch (optname) { case RFCOMM_LM: if (copy_from_sockptr(&opt, optval, sizeof(u32))) { err = -EFAULT; break; } if (opt & RFCOMM_LM_FIPS) { err = -EINVAL; break; } if (opt & RFCOMM_LM_AUTH) rfcomm_pi(sk)->sec_level = BT_SECURITY_LOW; if (opt & RFCOMM_LM_ENCRYPT) rfcomm_pi(sk)->sec_level = BT_SECURITY_MEDIUM; if (opt & RFCOMM_LM_SECURE) rfcomm_pi(sk)->sec_level = BT_SECURITY_HIGH; rfcomm_pi(sk)->role_switch = (opt & RFCOMM_LM_MASTER); break; default: err = -ENOPROTOOPT; break; } release_sock(sk); return err; } static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname, sockptr_t optval, unsigned int optlen) { struct sock *sk = sock->sk; struct bt_security sec; int err = 0; size_t len; u32 opt; BT_DBG("sk %p", sk); if (level == SOL_RFCOMM) return rfcomm_sock_setsockopt_old(sock, optname, optval, optlen); if (level != SOL_BLUETOOTH) return -ENOPROTOOPT; lock_sock(sk); switch (optname) { case BT_SECURITY: if (sk->sk_type != SOCK_STREAM) { err = -EINVAL; break; } sec.level = BT_SECURITY_LOW; len = min_t(unsigned int, sizeof(sec), optlen); if (copy_from_sockptr(&sec, optval, len)) { err = -EFAULT; break; } if (sec.level > BT_SECURITY_HIGH) { err = -EINVAL; break; } rfcomm_pi(sk)->sec_level = sec.level; break; case BT_DEFER_SETUP: if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) { err = -EINVAL; break; } if (copy_from_sockptr(&opt, optval, sizeof(u32))) { err = -EFAULT; break; } if (opt) set_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags); else clear_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags); break; default: err = -ENOPROTOOPT; break; } release_sock(sk); return err; } static int rfcomm_sock_getsockopt_old(struct socket *sock, int optname, char __user *optval, int __user *optlen) { struct sock *sk = sock->sk; struct sock *l2cap_sk; struct l2cap_conn *conn; struct rfcomm_conninfo cinfo; int len, err = 0; u32 opt; BT_DBG("sk %p", sk); if (get_user(len, optlen)) return -EFAULT; lock_sock(sk); switch (optname) { case RFCOMM_LM: switch (rfcomm_pi(sk)->sec_level) { case BT_SECURITY_LOW: opt = RFCOMM_LM_AUTH; break; case BT_SECURITY_MEDIUM: opt = RFCOMM_LM_AUTH | RFCOMM_LM_ENCRYPT; break; case BT_SECURITY_HIGH: opt = RFCOMM_LM_AUTH | RFCOMM_LM_ENCRYPT | RFCOMM_LM_SECURE; break; case BT_SECURITY_FIPS: opt = RFCOMM_LM_AUTH | RFCOMM_LM_ENCRYPT | RFCOMM_LM_SECURE | RFCOMM_LM_FIPS; break; default: opt = 0; break; } if (rfcomm_pi(sk)->role_switch) opt |= RFCOMM_LM_MASTER; if (put_user(opt, (u32 __user *) optval)) err = -EFAULT; break; case RFCOMM_CONNINFO: if (sk->sk_state != BT_CONNECTED && !rfcomm_pi(sk)->dlc->defer_setup) { err = -ENOTCONN; break; } l2cap_sk = rfcomm_pi(sk)->dlc->session->sock->sk; conn = l2cap_pi(l2cap_sk)->chan->conn; memset(&cinfo, 0, sizeof(cinfo)); cinfo.hci_handle = conn->hcon->handle; memcpy(cinfo.dev_class, conn->hcon->dev_class, 3); len = min_t(unsigned int, len, sizeof(cinfo)); if (copy_to_user(optval, (char *) &cinfo, len)) err = -EFAULT; break; default: err = -ENOPROTOOPT; break; } release_sock(sk); return err; } static int rfcomm_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen) { struct sock *sk = sock->sk; struct bt_security sec; int len, err = 0; BT_DBG("sk %p", sk); if (level == SOL_RFCOMM) return rfcomm_sock_getsockopt_old(sock, optname, optval, optlen); if (level != SOL_BLUETOOTH) return -ENOPROTOOPT; if (get_user(len, optlen)) return -EFAULT; lock_sock(sk); switch (optname) { case BT_SECURITY: if (sk->sk_type != SOCK_STREAM) { err = -EINVAL; break; } sec.level = rfcomm_pi(sk)->sec_level; sec.key_size = 0; len = min_t(unsigned int, len, sizeof(sec)); if (copy_to_user(optval, (char *) &sec, len)) err = -EFAULT; break; case BT_DEFER_SETUP: if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) { err = -EINVAL; break; } if (put_user(test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags), (u32 __user *) optval)) err = -EFAULT; break; default: err = -ENOPROTOOPT; break; } release_sock(sk); return err; } static int rfcomm_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) { struct sock *sk __maybe_unused = sock->sk; int err; BT_DBG("sk %p cmd %x arg %lx", sk, cmd, arg); err = bt_sock_ioctl(sock, cmd, arg); if (err == -ENOIOCTLCMD) { #ifdef CONFIG_BT_RFCOMM_TTY lock_sock(sk); err = rfcomm_dev_ioctl(sk, cmd, (void __user *) arg); release_sock(sk); #else err = -EOPNOTSUPP; #endif } return err; } #ifdef CONFIG_COMPAT static int rfcomm_sock_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) { return rfcomm_sock_ioctl(sock, cmd, (unsigned long)compat_ptr(arg)); } #endif static int rfcomm_sock_shutdown(struct socket *sock, int how) { struct sock *sk = sock->sk; int err = 0; BT_DBG("sock %p, sk %p", sock, sk); if (!sk) return 0; lock_sock(sk); if (!sk->sk_shutdown) { sk->sk_shutdown = SHUTDOWN_MASK; release_sock(sk); __rfcomm_sock_close(sk); lock_sock(sk); if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime && !(current->flags & PF_EXITING)) err = bt_sock_wait_state(sk, BT_CLOSED, sk->sk_lingertime); } release_sock(sk); return err; } static int rfcomm_sock_release(struct socket *sock) { struct sock *sk = sock->sk; int err; BT_DBG("sock %p, sk %p", sock, sk); if (!sk) return 0; err = rfcomm_sock_shutdown(sock, 2); sock_orphan(sk); rfcomm_sock_kill(sk); return err; } /* ---- RFCOMM core layer callbacks ---- * * called under rfcomm_lock() */ int rfcomm_connect_ind(struct rfcomm_session *s, u8 channel, struct rfcomm_dlc **d) { struct sock *sk, *parent; bdaddr_t src, dst; int result = 0; BT_DBG("session %p channel %d", s, channel); rfcomm_session_getaddr(s, &src, &dst); /* Check if we have socket listening on channel */ parent = rfcomm_get_sock_by_channel(BT_LISTEN, channel, &src); if (!parent) return 0; lock_sock(parent); /* Check for backlog size */ if (sk_acceptq_is_full(parent)) { BT_DBG("backlog full %d", parent->sk_ack_backlog); goto done; } sk = rfcomm_sock_alloc(sock_net(parent), NULL, BTPROTO_RFCOMM, GFP_ATOMIC, 0); if (!sk) goto done; bt_sock_reclassify_lock(sk, BTPROTO_RFCOMM); rfcomm_sock_init(sk, parent); bacpy(&rfcomm_pi(sk)->src, &src); bacpy(&rfcomm_pi(sk)->dst, &dst); rfcomm_pi(sk)->channel = channel; sk->sk_state = BT_CONFIG; bt_accept_enqueue(parent, sk, true); /* Accept connection and return socket DLC */ *d = rfcomm_pi(sk)->dlc; result = 1; done: release_sock(parent); if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(parent)->flags)) parent->sk_state_change(parent); return result; } static int rfcomm_sock_debugfs_show(struct seq_file *f, void *p) { struct sock *sk; read_lock(&rfcomm_sk_list.lock); sk_for_each(sk, &rfcomm_sk_list.head) { seq_printf(f, "%pMR %pMR %d %d\n", &rfcomm_pi(sk)->src, &rfcomm_pi(sk)->dst, sk->sk_state, rfcomm_pi(sk)->channel); } read_unlock(&rfcomm_sk_list.lock); return 0; } DEFINE_SHOW_ATTRIBUTE(rfcomm_sock_debugfs); static struct dentry *rfcomm_sock_debugfs; static const struct proto_ops rfcomm_sock_ops = { .family = PF_BLUETOOTH, .owner = THIS_MODULE, .release = rfcomm_sock_release, .bind = rfcomm_sock_bind, .connect = rfcomm_sock_connect, .listen = rfcomm_sock_listen, .accept = rfcomm_sock_accept, .getname = rfcomm_sock_getname, .sendmsg = rfcomm_sock_sendmsg, .recvmsg = rfcomm_sock_recvmsg, .shutdown = rfcomm_sock_shutdown, .setsockopt = rfcomm_sock_setsockopt, .getsockopt = rfcomm_sock_getsockopt, .ioctl = rfcomm_sock_ioctl, .gettstamp = sock_gettstamp, .poll = bt_sock_poll, .socketpair = sock_no_socketpair, .mmap = sock_no_mmap, #ifdef CONFIG_COMPAT .compat_ioctl = rfcomm_sock_compat_ioctl, #endif }; static const struct net_proto_family rfcomm_sock_family_ops = { .family = PF_BLUETOOTH, .owner = THIS_MODULE, .create = rfcomm_sock_create }; int __init rfcomm_init_sockets(void) { int err; BUILD_BUG_ON(sizeof(struct sockaddr_rc) > sizeof(struct sockaddr)); err = proto_register(&rfcomm_proto, 0); if (err < 0) return err; err = bt_sock_register(BTPROTO_RFCOMM, &rfcomm_sock_family_ops); if (err < 0) { BT_ERR("RFCOMM socket layer registration failed"); goto error; } err = bt_procfs_init(&init_net, "rfcomm", &rfcomm_sk_list, NULL); if (err < 0) { BT_ERR("Failed to create RFCOMM proc file"); bt_sock_unregister(BTPROTO_RFCOMM); goto error; } BT_INFO("RFCOMM socket layer initialized"); if (IS_ERR_OR_NULL(bt_debugfs)) return 0; rfcomm_sock_debugfs = debugfs_create_file("rfcomm", 0444, bt_debugfs, NULL, &rfcomm_sock_debugfs_fops); return 0; error: proto_unregister(&rfcomm_proto); return err; } void __exit rfcomm_cleanup_sockets(void) { bt_procfs_cleanup(&init_net, "rfcomm"); debugfs_remove(rfcomm_sock_debugfs); bt_sock_unregister(BTPROTO_RFCOMM); proto_unregister(&rfcomm_proto); }
linux-master
net/bluetooth/rfcomm/sock.c
/* RFCOMM implementation for Linux Bluetooth stack (BlueZ). Copyright (C) 2002 Maxim Krasnyansky <[email protected]> Copyright (C) 2002 Marcel Holtmann <[email protected]> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License version 2 as published by the Free Software Foundation; THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS SOFTWARE IS DISCLAIMED. */ /* * RFCOMM TTY. */ #include <linux/module.h> #include <linux/tty.h> #include <linux/tty_driver.h> #include <linux/tty_flip.h> #include <net/bluetooth/bluetooth.h> #include <net/bluetooth/hci_core.h> #include <net/bluetooth/rfcomm.h> #define RFCOMM_TTY_PORTS RFCOMM_MAX_DEV /* whole lotta rfcomm devices */ #define RFCOMM_TTY_MAJOR 216 /* device node major id of the usb/bluetooth.c driver */ #define RFCOMM_TTY_MINOR 0 static DEFINE_MUTEX(rfcomm_ioctl_mutex); static struct tty_driver *rfcomm_tty_driver; struct rfcomm_dev { struct tty_port port; struct list_head list; char name[12]; int id; unsigned long flags; int err; unsigned long status; /* don't export to userspace */ bdaddr_t src; bdaddr_t dst; u8 channel; uint modem_status; struct rfcomm_dlc *dlc; struct device *tty_dev; atomic_t wmem_alloc; struct sk_buff_head pending; }; static LIST_HEAD(rfcomm_dev_list); static DEFINE_MUTEX(rfcomm_dev_lock); static void rfcomm_dev_data_ready(struct rfcomm_dlc *dlc, struct sk_buff *skb); static void rfcomm_dev_state_change(struct rfcomm_dlc *dlc, int err); static void rfcomm_dev_modem_status(struct rfcomm_dlc *dlc, u8 v24_sig); /* ---- Device functions ---- */ static void rfcomm_dev_destruct(struct tty_port *port) { struct rfcomm_dev *dev = container_of(port, struct rfcomm_dev, port); struct rfcomm_dlc *dlc = dev->dlc; BT_DBG("dev %p dlc %p", dev, dlc); rfcomm_dlc_lock(dlc); /* Detach DLC if it's owned by this dev */ if (dlc->owner == dev) dlc->owner = NULL; rfcomm_dlc_unlock(dlc); rfcomm_dlc_put(dlc); if (dev->tty_dev) tty_unregister_device(rfcomm_tty_driver, dev->id); mutex_lock(&rfcomm_dev_lock); list_del(&dev->list); mutex_unlock(&rfcomm_dev_lock); kfree(dev); /* It's safe to call module_put() here because socket still holds reference to this module. */ module_put(THIS_MODULE); } /* device-specific initialization: open the dlc */ static int rfcomm_dev_activate(struct tty_port *port, struct tty_struct *tty) { struct rfcomm_dev *dev = container_of(port, struct rfcomm_dev, port); int err; err = rfcomm_dlc_open(dev->dlc, &dev->src, &dev->dst, dev->channel); if (err) set_bit(TTY_IO_ERROR, &tty->flags); return err; } /* we block the open until the dlc->state becomes BT_CONNECTED */ static bool rfcomm_dev_carrier_raised(struct tty_port *port) { struct rfcomm_dev *dev = container_of(port, struct rfcomm_dev, port); return (dev->dlc->state == BT_CONNECTED); } /* device-specific cleanup: close the dlc */ static void rfcomm_dev_shutdown(struct tty_port *port) { struct rfcomm_dev *dev = container_of(port, struct rfcomm_dev, port); if (dev->tty_dev->parent) device_move(dev->tty_dev, NULL, DPM_ORDER_DEV_LAST); /* close the dlc */ rfcomm_dlc_close(dev->dlc, 0); } static const struct tty_port_operations rfcomm_port_ops = { .destruct = rfcomm_dev_destruct, .activate = rfcomm_dev_activate, .shutdown = rfcomm_dev_shutdown, .carrier_raised = rfcomm_dev_carrier_raised, }; static struct rfcomm_dev *__rfcomm_dev_lookup(int id) { struct rfcomm_dev *dev; list_for_each_entry(dev, &rfcomm_dev_list, list) if (dev->id == id) return dev; return NULL; } static struct rfcomm_dev *rfcomm_dev_get(int id) { struct rfcomm_dev *dev; mutex_lock(&rfcomm_dev_lock); dev = __rfcomm_dev_lookup(id); if (dev && !tty_port_get(&dev->port)) dev = NULL; mutex_unlock(&rfcomm_dev_lock); return dev; } static void rfcomm_reparent_device(struct rfcomm_dev *dev) { struct hci_dev *hdev; struct hci_conn *conn; hdev = hci_get_route(&dev->dst, &dev->src, BDADDR_BREDR); if (!hdev) return; /* The lookup results are unsafe to access without the * hci device lock (FIXME: why is this not documented?) */ hci_dev_lock(hdev); conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &dev->dst); /* Just because the acl link is in the hash table is no * guarantee the sysfs device has been added ... */ if (conn && device_is_registered(&conn->dev)) device_move(dev->tty_dev, &conn->dev, DPM_ORDER_DEV_AFTER_PARENT); hci_dev_unlock(hdev); hci_dev_put(hdev); } static ssize_t address_show(struct device *tty_dev, struct device_attribute *attr, char *buf) { struct rfcomm_dev *dev = dev_get_drvdata(tty_dev); return sprintf(buf, "%pMR\n", &dev->dst); } static ssize_t channel_show(struct device *tty_dev, struct device_attribute *attr, char *buf) { struct rfcomm_dev *dev = dev_get_drvdata(tty_dev); return sprintf(buf, "%d\n", dev->channel); } static DEVICE_ATTR_RO(address); static DEVICE_ATTR_RO(channel); static struct rfcomm_dev *__rfcomm_dev_add(struct rfcomm_dev_req *req, struct rfcomm_dlc *dlc) { struct rfcomm_dev *dev, *entry; struct list_head *head = &rfcomm_dev_list; int err = 0; dev = kzalloc(sizeof(struct rfcomm_dev), GFP_KERNEL); if (!dev) return ERR_PTR(-ENOMEM); mutex_lock(&rfcomm_dev_lock); if (req->dev_id < 0) { dev->id = 0; list_for_each_entry(entry, &rfcomm_dev_list, list) { if (entry->id != dev->id) break; dev->id++; head = &entry->list; } } else { dev->id = req->dev_id; list_for_each_entry(entry, &rfcomm_dev_list, list) { if (entry->id == dev->id) { err = -EADDRINUSE; goto out; } if (entry->id > dev->id - 1) break; head = &entry->list; } } if ((dev->id < 0) || (dev->id > RFCOMM_MAX_DEV - 1)) { err = -ENFILE; goto out; } sprintf(dev->name, "rfcomm%d", dev->id); list_add(&dev->list, head); bacpy(&dev->src, &req->src); bacpy(&dev->dst, &req->dst); dev->channel = req->channel; dev->flags = req->flags & ((1 << RFCOMM_RELEASE_ONHUP) | (1 << RFCOMM_REUSE_DLC)); tty_port_init(&dev->port); dev->port.ops = &rfcomm_port_ops; skb_queue_head_init(&dev->pending); rfcomm_dlc_lock(dlc); if (req->flags & (1 << RFCOMM_REUSE_DLC)) { struct sock *sk = dlc->owner; struct sk_buff *skb; BUG_ON(!sk); rfcomm_dlc_throttle(dlc); while ((skb = skb_dequeue(&sk->sk_receive_queue))) { skb_orphan(skb); skb_queue_tail(&dev->pending, skb); atomic_sub(skb->len, &sk->sk_rmem_alloc); } } dlc->data_ready = rfcomm_dev_data_ready; dlc->state_change = rfcomm_dev_state_change; dlc->modem_status = rfcomm_dev_modem_status; dlc->owner = dev; dev->dlc = dlc; rfcomm_dev_modem_status(dlc, dlc->remote_v24_sig); rfcomm_dlc_unlock(dlc); /* It's safe to call __module_get() here because socket already holds reference to this module. */ __module_get(THIS_MODULE); mutex_unlock(&rfcomm_dev_lock); return dev; out: mutex_unlock(&rfcomm_dev_lock); kfree(dev); return ERR_PTR(err); } static int rfcomm_dev_add(struct rfcomm_dev_req *req, struct rfcomm_dlc *dlc) { struct rfcomm_dev *dev; struct device *tty; BT_DBG("id %d channel %d", req->dev_id, req->channel); dev = __rfcomm_dev_add(req, dlc); if (IS_ERR(dev)) { rfcomm_dlc_put(dlc); return PTR_ERR(dev); } tty = tty_port_register_device(&dev->port, rfcomm_tty_driver, dev->id, NULL); if (IS_ERR(tty)) { tty_port_put(&dev->port); return PTR_ERR(tty); } dev->tty_dev = tty; rfcomm_reparent_device(dev); dev_set_drvdata(dev->tty_dev, dev); if (device_create_file(dev->tty_dev, &dev_attr_address) < 0) BT_ERR("Failed to create address attribute"); if (device_create_file(dev->tty_dev, &dev_attr_channel) < 0) BT_ERR("Failed to create channel attribute"); return dev->id; } /* ---- Send buffer ---- */ static inline unsigned int rfcomm_room(struct rfcomm_dev *dev) { struct rfcomm_dlc *dlc = dev->dlc; /* Limit the outstanding number of packets not yet sent to 40 */ int pending = 40 - atomic_read(&dev->wmem_alloc); return max(0, pending) * dlc->mtu; } static void rfcomm_wfree(struct sk_buff *skb) { struct rfcomm_dev *dev = (void *) skb->sk; atomic_dec(&dev->wmem_alloc); if (test_bit(RFCOMM_TTY_ATTACHED, &dev->flags)) tty_port_tty_wakeup(&dev->port); tty_port_put(&dev->port); } static void rfcomm_set_owner_w(struct sk_buff *skb, struct rfcomm_dev *dev) { tty_port_get(&dev->port); atomic_inc(&dev->wmem_alloc); skb->sk = (void *) dev; skb->destructor = rfcomm_wfree; } static struct sk_buff *rfcomm_wmalloc(struct rfcomm_dev *dev, unsigned long size, gfp_t priority) { struct sk_buff *skb = alloc_skb(size, priority); if (skb) rfcomm_set_owner_w(skb, dev); return skb; } /* ---- Device IOCTLs ---- */ #define NOCAP_FLAGS ((1 << RFCOMM_REUSE_DLC) | (1 << RFCOMM_RELEASE_ONHUP)) static int __rfcomm_create_dev(struct sock *sk, void __user *arg) { struct rfcomm_dev_req req; struct rfcomm_dlc *dlc; int id; if (copy_from_user(&req, arg, sizeof(req))) return -EFAULT; BT_DBG("sk %p dev_id %d flags 0x%x", sk, req.dev_id, req.flags); if (req.flags != NOCAP_FLAGS && !capable(CAP_NET_ADMIN)) return -EPERM; if (req.flags & (1 << RFCOMM_REUSE_DLC)) { /* Socket must be connected */ if (sk->sk_state != BT_CONNECTED) return -EBADFD; dlc = rfcomm_pi(sk)->dlc; rfcomm_dlc_hold(dlc); } else { /* Validate the channel is unused */ dlc = rfcomm_dlc_exists(&req.src, &req.dst, req.channel); if (IS_ERR(dlc)) return PTR_ERR(dlc); if (dlc) return -EBUSY; dlc = rfcomm_dlc_alloc(GFP_KERNEL); if (!dlc) return -ENOMEM; } id = rfcomm_dev_add(&req, dlc); if (id < 0) return id; if (req.flags & (1 << RFCOMM_REUSE_DLC)) { /* DLC is now used by device. * Socket must be disconnected */ sk->sk_state = BT_CLOSED; } return id; } static int __rfcomm_release_dev(void __user *arg) { struct rfcomm_dev_req req; struct rfcomm_dev *dev; struct tty_struct *tty; if (copy_from_user(&req, arg, sizeof(req))) return -EFAULT; BT_DBG("dev_id %d flags 0x%x", req.dev_id, req.flags); dev = rfcomm_dev_get(req.dev_id); if (!dev) return -ENODEV; if (dev->flags != NOCAP_FLAGS && !capable(CAP_NET_ADMIN)) { tty_port_put(&dev->port); return -EPERM; } /* only release once */ if (test_and_set_bit(RFCOMM_DEV_RELEASED, &dev->status)) { tty_port_put(&dev->port); return -EALREADY; } if (req.flags & (1 << RFCOMM_HANGUP_NOW)) rfcomm_dlc_close(dev->dlc, 0); /* Shut down TTY synchronously before freeing rfcomm_dev */ tty = tty_port_tty_get(&dev->port); if (tty) { tty_vhangup(tty); tty_kref_put(tty); } if (!test_bit(RFCOMM_TTY_OWNED, &dev->status)) tty_port_put(&dev->port); tty_port_put(&dev->port); return 0; } static int rfcomm_create_dev(struct sock *sk, void __user *arg) { int ret; mutex_lock(&rfcomm_ioctl_mutex); ret = __rfcomm_create_dev(sk, arg); mutex_unlock(&rfcomm_ioctl_mutex); return ret; } static int rfcomm_release_dev(void __user *arg) { int ret; mutex_lock(&rfcomm_ioctl_mutex); ret = __rfcomm_release_dev(arg); mutex_unlock(&rfcomm_ioctl_mutex); return ret; } static int rfcomm_get_dev_list(void __user *arg) { struct rfcomm_dev *dev; struct rfcomm_dev_list_req *dl; struct rfcomm_dev_info *di; int n = 0, size, err; u16 dev_num; BT_DBG(""); if (get_user(dev_num, (u16 __user *) arg)) return -EFAULT; if (!dev_num || dev_num > (PAGE_SIZE * 4) / sizeof(*di)) return -EINVAL; size = sizeof(*dl) + dev_num * sizeof(*di); dl = kzalloc(size, GFP_KERNEL); if (!dl) return -ENOMEM; di = dl->dev_info; mutex_lock(&rfcomm_dev_lock); list_for_each_entry(dev, &rfcomm_dev_list, list) { if (!tty_port_get(&dev->port)) continue; (di + n)->id = dev->id; (di + n)->flags = dev->flags; (di + n)->state = dev->dlc->state; (di + n)->channel = dev->channel; bacpy(&(di + n)->src, &dev->src); bacpy(&(di + n)->dst, &dev->dst); tty_port_put(&dev->port); if (++n >= dev_num) break; } mutex_unlock(&rfcomm_dev_lock); dl->dev_num = n; size = sizeof(*dl) + n * sizeof(*di); err = copy_to_user(arg, dl, size); kfree(dl); return err ? -EFAULT : 0; } static int rfcomm_get_dev_info(void __user *arg) { struct rfcomm_dev *dev; struct rfcomm_dev_info di; int err = 0; BT_DBG(""); if (copy_from_user(&di, arg, sizeof(di))) return -EFAULT; dev = rfcomm_dev_get(di.id); if (!dev) return -ENODEV; di.flags = dev->flags; di.channel = dev->channel; di.state = dev->dlc->state; bacpy(&di.src, &dev->src); bacpy(&di.dst, &dev->dst); if (copy_to_user(arg, &di, sizeof(di))) err = -EFAULT; tty_port_put(&dev->port); return err; } int rfcomm_dev_ioctl(struct sock *sk, unsigned int cmd, void __user *arg) { BT_DBG("cmd %d arg %p", cmd, arg); switch (cmd) { case RFCOMMCREATEDEV: return rfcomm_create_dev(sk, arg); case RFCOMMRELEASEDEV: return rfcomm_release_dev(arg); case RFCOMMGETDEVLIST: return rfcomm_get_dev_list(arg); case RFCOMMGETDEVINFO: return rfcomm_get_dev_info(arg); } return -EINVAL; } /* ---- DLC callbacks ---- */ static void rfcomm_dev_data_ready(struct rfcomm_dlc *dlc, struct sk_buff *skb) { struct rfcomm_dev *dev = dlc->owner; if (!dev) { kfree_skb(skb); return; } if (!skb_queue_empty(&dev->pending)) { skb_queue_tail(&dev->pending, skb); return; } BT_DBG("dlc %p len %d", dlc, skb->len); tty_insert_flip_string(&dev->port, skb->data, skb->len); tty_flip_buffer_push(&dev->port); kfree_skb(skb); } static void rfcomm_dev_state_change(struct rfcomm_dlc *dlc, int err) { struct rfcomm_dev *dev = dlc->owner; if (!dev) return; BT_DBG("dlc %p dev %p err %d", dlc, dev, err); dev->err = err; if (dlc->state == BT_CONNECTED) { rfcomm_reparent_device(dev); wake_up_interruptible(&dev->port.open_wait); } else if (dlc->state == BT_CLOSED) tty_port_tty_hangup(&dev->port, false); } static void rfcomm_dev_modem_status(struct rfcomm_dlc *dlc, u8 v24_sig) { struct rfcomm_dev *dev = dlc->owner; if (!dev) return; BT_DBG("dlc %p dev %p v24_sig 0x%02x", dlc, dev, v24_sig); if ((dev->modem_status & TIOCM_CD) && !(v24_sig & RFCOMM_V24_DV)) tty_port_tty_hangup(&dev->port, true); dev->modem_status = ((v24_sig & RFCOMM_V24_RTC) ? (TIOCM_DSR | TIOCM_DTR) : 0) | ((v24_sig & RFCOMM_V24_RTR) ? (TIOCM_RTS | TIOCM_CTS) : 0) | ((v24_sig & RFCOMM_V24_IC) ? TIOCM_RI : 0) | ((v24_sig & RFCOMM_V24_DV) ? TIOCM_CD : 0); } /* ---- TTY functions ---- */ static void rfcomm_tty_copy_pending(struct rfcomm_dev *dev) { struct sk_buff *skb; int inserted = 0; BT_DBG("dev %p", dev); rfcomm_dlc_lock(dev->dlc); while ((skb = skb_dequeue(&dev->pending))) { inserted += tty_insert_flip_string(&dev->port, skb->data, skb->len); kfree_skb(skb); } rfcomm_dlc_unlock(dev->dlc); if (inserted > 0) tty_flip_buffer_push(&dev->port); } /* do the reverse of install, clearing the tty fields and releasing the * reference to tty_port */ static void rfcomm_tty_cleanup(struct tty_struct *tty) { struct rfcomm_dev *dev = tty->driver_data; clear_bit(RFCOMM_TTY_ATTACHED, &dev->flags); rfcomm_dlc_lock(dev->dlc); tty->driver_data = NULL; rfcomm_dlc_unlock(dev->dlc); /* * purge the dlc->tx_queue to avoid circular dependencies * between dev and dlc */ skb_queue_purge(&dev->dlc->tx_queue); tty_port_put(&dev->port); } /* we acquire the tty_port reference since it's here the tty is first used * by setting the termios. We also populate the driver_data field and install * the tty port */ static int rfcomm_tty_install(struct tty_driver *driver, struct tty_struct *tty) { struct rfcomm_dev *dev; struct rfcomm_dlc *dlc; int err; dev = rfcomm_dev_get(tty->index); if (!dev) return -ENODEV; dlc = dev->dlc; /* Attach TTY and open DLC */ rfcomm_dlc_lock(dlc); tty->driver_data = dev; rfcomm_dlc_unlock(dlc); set_bit(RFCOMM_TTY_ATTACHED, &dev->flags); /* install the tty_port */ err = tty_port_install(&dev->port, driver, tty); if (err) { rfcomm_tty_cleanup(tty); return err; } /* take over the tty_port reference if the port was created with the * flag RFCOMM_RELEASE_ONHUP. This will force the release of the port * when the last process closes the tty. The behaviour is expected by * userspace. */ if (test_bit(RFCOMM_RELEASE_ONHUP, &dev->flags)) { set_bit(RFCOMM_TTY_OWNED, &dev->status); tty_port_put(&dev->port); } return 0; } static int rfcomm_tty_open(struct tty_struct *tty, struct file *filp) { struct rfcomm_dev *dev = tty->driver_data; int err; BT_DBG("tty %p id %d", tty, tty->index); BT_DBG("dev %p dst %pMR channel %d opened %d", dev, &dev->dst, dev->channel, dev->port.count); err = tty_port_open(&dev->port, tty, filp); if (err) return err; /* * FIXME: rfcomm should use proper flow control for * received data. This hack will be unnecessary and can * be removed when that's implemented */ rfcomm_tty_copy_pending(dev); rfcomm_dlc_unthrottle(dev->dlc); return 0; } static void rfcomm_tty_close(struct tty_struct *tty, struct file *filp) { struct rfcomm_dev *dev = tty->driver_data; BT_DBG("tty %p dev %p dlc %p opened %d", tty, dev, dev->dlc, dev->port.count); tty_port_close(&dev->port, tty, filp); } static ssize_t rfcomm_tty_write(struct tty_struct *tty, const u8 *buf, size_t count) { struct rfcomm_dev *dev = tty->driver_data; struct rfcomm_dlc *dlc = dev->dlc; struct sk_buff *skb; size_t sent = 0, size; BT_DBG("tty %p count %zu", tty, count); while (count) { size = min_t(size_t, count, dlc->mtu); skb = rfcomm_wmalloc(dev, size + RFCOMM_SKB_RESERVE, GFP_ATOMIC); if (!skb) break; skb_reserve(skb, RFCOMM_SKB_HEAD_RESERVE); skb_put_data(skb, buf + sent, size); rfcomm_dlc_send_noerror(dlc, skb); sent += size; count -= size; } return sent; } static unsigned int rfcomm_tty_write_room(struct tty_struct *tty) { struct rfcomm_dev *dev = tty->driver_data; int room = 0; if (dev && dev->dlc) room = rfcomm_room(dev); BT_DBG("tty %p room %d", tty, room); return room; } static int rfcomm_tty_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg) { BT_DBG("tty %p cmd 0x%02x", tty, cmd); switch (cmd) { case TCGETS: BT_DBG("TCGETS is not supported"); return -ENOIOCTLCMD; case TCSETS: BT_DBG("TCSETS is not supported"); return -ENOIOCTLCMD; case TIOCMIWAIT: BT_DBG("TIOCMIWAIT"); break; case TIOCSERGETLSR: BT_ERR("TIOCSERGETLSR is not supported"); return -ENOIOCTLCMD; case TIOCSERCONFIG: BT_ERR("TIOCSERCONFIG is not supported"); return -ENOIOCTLCMD; default: return -ENOIOCTLCMD; /* ioctls which we must ignore */ } return -ENOIOCTLCMD; } static void rfcomm_tty_set_termios(struct tty_struct *tty, const struct ktermios *old) { struct ktermios *new = &tty->termios; int old_baud_rate = tty_termios_baud_rate(old); int new_baud_rate = tty_termios_baud_rate(new); u8 baud, data_bits, stop_bits, parity, x_on, x_off; u16 changes = 0; struct rfcomm_dev *dev = tty->driver_data; BT_DBG("tty %p termios %p", tty, old); if (!dev || !dev->dlc || !dev->dlc->session) return; /* Handle turning off CRTSCTS */ if ((old->c_cflag & CRTSCTS) && !(new->c_cflag & CRTSCTS)) BT_DBG("Turning off CRTSCTS unsupported"); /* Parity on/off and when on, odd/even */ if (((old->c_cflag & PARENB) != (new->c_cflag & PARENB)) || ((old->c_cflag & PARODD) != (new->c_cflag & PARODD))) { changes |= RFCOMM_RPN_PM_PARITY; BT_DBG("Parity change detected."); } /* Mark and space parity are not supported! */ if (new->c_cflag & PARENB) { if (new->c_cflag & PARODD) { BT_DBG("Parity is ODD"); parity = RFCOMM_RPN_PARITY_ODD; } else { BT_DBG("Parity is EVEN"); parity = RFCOMM_RPN_PARITY_EVEN; } } else { BT_DBG("Parity is OFF"); parity = RFCOMM_RPN_PARITY_NONE; } /* Setting the x_on / x_off characters */ if (old->c_cc[VSTOP] != new->c_cc[VSTOP]) { BT_DBG("XOFF custom"); x_on = new->c_cc[VSTOP]; changes |= RFCOMM_RPN_PM_XON; } else { BT_DBG("XOFF default"); x_on = RFCOMM_RPN_XON_CHAR; } if (old->c_cc[VSTART] != new->c_cc[VSTART]) { BT_DBG("XON custom"); x_off = new->c_cc[VSTART]; changes |= RFCOMM_RPN_PM_XOFF; } else { BT_DBG("XON default"); x_off = RFCOMM_RPN_XOFF_CHAR; } /* Handle setting of stop bits */ if ((old->c_cflag & CSTOPB) != (new->c_cflag & CSTOPB)) changes |= RFCOMM_RPN_PM_STOP; /* POSIX does not support 1.5 stop bits and RFCOMM does not * support 2 stop bits. So a request for 2 stop bits gets * translated to 1.5 stop bits */ if (new->c_cflag & CSTOPB) stop_bits = RFCOMM_RPN_STOP_15; else stop_bits = RFCOMM_RPN_STOP_1; /* Handle number of data bits [5-8] */ if ((old->c_cflag & CSIZE) != (new->c_cflag & CSIZE)) changes |= RFCOMM_RPN_PM_DATA; switch (new->c_cflag & CSIZE) { case CS5: data_bits = RFCOMM_RPN_DATA_5; break; case CS6: data_bits = RFCOMM_RPN_DATA_6; break; case CS7: data_bits = RFCOMM_RPN_DATA_7; break; case CS8: data_bits = RFCOMM_RPN_DATA_8; break; default: data_bits = RFCOMM_RPN_DATA_8; break; } /* Handle baudrate settings */ if (old_baud_rate != new_baud_rate) changes |= RFCOMM_RPN_PM_BITRATE; switch (new_baud_rate) { case 2400: baud = RFCOMM_RPN_BR_2400; break; case 4800: baud = RFCOMM_RPN_BR_4800; break; case 7200: baud = RFCOMM_RPN_BR_7200; break; case 9600: baud = RFCOMM_RPN_BR_9600; break; case 19200: baud = RFCOMM_RPN_BR_19200; break; case 38400: baud = RFCOMM_RPN_BR_38400; break; case 57600: baud = RFCOMM_RPN_BR_57600; break; case 115200: baud = RFCOMM_RPN_BR_115200; break; case 230400: baud = RFCOMM_RPN_BR_230400; break; default: /* 9600 is standard accordinag to the RFCOMM specification */ baud = RFCOMM_RPN_BR_9600; break; } if (changes) rfcomm_send_rpn(dev->dlc->session, 1, dev->dlc->dlci, baud, data_bits, stop_bits, parity, RFCOMM_RPN_FLOW_NONE, x_on, x_off, changes); } static void rfcomm_tty_throttle(struct tty_struct *tty) { struct rfcomm_dev *dev = tty->driver_data; BT_DBG("tty %p dev %p", tty, dev); rfcomm_dlc_throttle(dev->dlc); } static void rfcomm_tty_unthrottle(struct tty_struct *tty) { struct rfcomm_dev *dev = tty->driver_data; BT_DBG("tty %p dev %p", tty, dev); rfcomm_dlc_unthrottle(dev->dlc); } static unsigned int rfcomm_tty_chars_in_buffer(struct tty_struct *tty) { struct rfcomm_dev *dev = tty->driver_data; BT_DBG("tty %p dev %p", tty, dev); if (!dev || !dev->dlc) return 0; if (!skb_queue_empty(&dev->dlc->tx_queue)) return dev->dlc->mtu; return 0; } static void rfcomm_tty_flush_buffer(struct tty_struct *tty) { struct rfcomm_dev *dev = tty->driver_data; BT_DBG("tty %p dev %p", tty, dev); if (!dev || !dev->dlc) return; skb_queue_purge(&dev->dlc->tx_queue); tty_wakeup(tty); } static void rfcomm_tty_send_xchar(struct tty_struct *tty, char ch) { BT_DBG("tty %p ch %c", tty, ch); } static void rfcomm_tty_wait_until_sent(struct tty_struct *tty, int timeout) { BT_DBG("tty %p timeout %d", tty, timeout); } static void rfcomm_tty_hangup(struct tty_struct *tty) { struct rfcomm_dev *dev = tty->driver_data; BT_DBG("tty %p dev %p", tty, dev); tty_port_hangup(&dev->port); } static int rfcomm_tty_tiocmget(struct tty_struct *tty) { struct rfcomm_dev *dev = tty->driver_data; BT_DBG("tty %p dev %p", tty, dev); return dev->modem_status; } static int rfcomm_tty_tiocmset(struct tty_struct *tty, unsigned int set, unsigned int clear) { struct rfcomm_dev *dev = tty->driver_data; struct rfcomm_dlc *dlc = dev->dlc; u8 v24_sig; BT_DBG("tty %p dev %p set 0x%02x clear 0x%02x", tty, dev, set, clear); rfcomm_dlc_get_modem_status(dlc, &v24_sig); if (set & TIOCM_DSR || set & TIOCM_DTR) v24_sig |= RFCOMM_V24_RTC; if (set & TIOCM_RTS || set & TIOCM_CTS) v24_sig |= RFCOMM_V24_RTR; if (set & TIOCM_RI) v24_sig |= RFCOMM_V24_IC; if (set & TIOCM_CD) v24_sig |= RFCOMM_V24_DV; if (clear & TIOCM_DSR || clear & TIOCM_DTR) v24_sig &= ~RFCOMM_V24_RTC; if (clear & TIOCM_RTS || clear & TIOCM_CTS) v24_sig &= ~RFCOMM_V24_RTR; if (clear & TIOCM_RI) v24_sig &= ~RFCOMM_V24_IC; if (clear & TIOCM_CD) v24_sig &= ~RFCOMM_V24_DV; rfcomm_dlc_set_modem_status(dlc, v24_sig); return 0; } /* ---- TTY structure ---- */ static const struct tty_operations rfcomm_ops = { .open = rfcomm_tty_open, .close = rfcomm_tty_close, .write = rfcomm_tty_write, .write_room = rfcomm_tty_write_room, .chars_in_buffer = rfcomm_tty_chars_in_buffer, .flush_buffer = rfcomm_tty_flush_buffer, .ioctl = rfcomm_tty_ioctl, .throttle = rfcomm_tty_throttle, .unthrottle = rfcomm_tty_unthrottle, .set_termios = rfcomm_tty_set_termios, .send_xchar = rfcomm_tty_send_xchar, .hangup = rfcomm_tty_hangup, .wait_until_sent = rfcomm_tty_wait_until_sent, .tiocmget = rfcomm_tty_tiocmget, .tiocmset = rfcomm_tty_tiocmset, .install = rfcomm_tty_install, .cleanup = rfcomm_tty_cleanup, }; int __init rfcomm_init_ttys(void) { int error; rfcomm_tty_driver = tty_alloc_driver(RFCOMM_TTY_PORTS, TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV); if (IS_ERR(rfcomm_tty_driver)) return PTR_ERR(rfcomm_tty_driver); rfcomm_tty_driver->driver_name = "rfcomm"; rfcomm_tty_driver->name = "rfcomm"; rfcomm_tty_driver->major = RFCOMM_TTY_MAJOR; rfcomm_tty_driver->minor_start = RFCOMM_TTY_MINOR; rfcomm_tty_driver->type = TTY_DRIVER_TYPE_SERIAL; rfcomm_tty_driver->subtype = SERIAL_TYPE_NORMAL; rfcomm_tty_driver->init_termios = tty_std_termios; rfcomm_tty_driver->init_termios.c_cflag = B9600 | CS8 | CREAD | HUPCL; rfcomm_tty_driver->init_termios.c_lflag &= ~ICANON; tty_set_operations(rfcomm_tty_driver, &rfcomm_ops); error = tty_register_driver(rfcomm_tty_driver); if (error) { BT_ERR("Can't register RFCOMM TTY driver"); tty_driver_kref_put(rfcomm_tty_driver); return error; } BT_INFO("RFCOMM TTY layer initialized"); return 0; } void rfcomm_cleanup_ttys(void) { tty_unregister_driver(rfcomm_tty_driver); tty_driver_kref_put(rfcomm_tty_driver); }
linux-master
net/bluetooth/rfcomm/tty.c
/* BNEP implementation for Linux Bluetooth stack (BlueZ). Copyright (C) 2001-2002 Inventel Systemes Written 2001-2002 by Clément Moreau <[email protected]> David Libault <[email protected]> Copyright (C) 2002 Maxim Krasnyansky <[email protected]> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License version 2 as published by the Free Software Foundation; THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS SOFTWARE IS DISCLAIMED. */ #include <linux/module.h> #include <linux/kthread.h> #include <linux/file.h> #include <linux/etherdevice.h> #include <asm/unaligned.h> #include <net/bluetooth/bluetooth.h> #include <net/bluetooth/l2cap.h> #include <net/bluetooth/hci_core.h> #include "bnep.h" #define VERSION "1.3" static bool compress_src = true; static bool compress_dst = true; static LIST_HEAD(bnep_session_list); static DECLARE_RWSEM(bnep_session_sem); static struct bnep_session *__bnep_get_session(u8 *dst) { struct bnep_session *s; BT_DBG(""); list_for_each_entry(s, &bnep_session_list, list) if (ether_addr_equal(dst, s->eh.h_source)) return s; return NULL; } static void __bnep_link_session(struct bnep_session *s) { list_add(&s->list, &bnep_session_list); } static void __bnep_unlink_session(struct bnep_session *s) { list_del(&s->list); } static int bnep_send(struct bnep_session *s, void *data, size_t len) { struct socket *sock = s->sock; struct kvec iv = { data, len }; return kernel_sendmsg(sock, &s->msg, &iv, 1, len); } static int bnep_send_rsp(struct bnep_session *s, u8 ctrl, u16 resp) { struct bnep_control_rsp rsp; rsp.type = BNEP_CONTROL; rsp.ctrl = ctrl; rsp.resp = htons(resp); return bnep_send(s, &rsp, sizeof(rsp)); } #ifdef CONFIG_BT_BNEP_PROTO_FILTER static inline void bnep_set_default_proto_filter(struct bnep_session *s) { /* (IPv4, ARP) */ s->proto_filter[0].start = ETH_P_IP; s->proto_filter[0].end = ETH_P_ARP; /* (RARP, AppleTalk) */ s->proto_filter[1].start = ETH_P_RARP; s->proto_filter[1].end = ETH_P_AARP; /* (IPX, IPv6) */ s->proto_filter[2].start = ETH_P_IPX; s->proto_filter[2].end = ETH_P_IPV6; } #endif static int bnep_ctrl_set_netfilter(struct bnep_session *s, __be16 *data, int len) { int n; if (len < 2) return -EILSEQ; n = get_unaligned_be16(data); data++; len -= 2; if (len < n) return -EILSEQ; BT_DBG("filter len %d", n); #ifdef CONFIG_BT_BNEP_PROTO_FILTER n /= 4; if (n <= BNEP_MAX_PROTO_FILTERS) { struct bnep_proto_filter *f = s->proto_filter; int i; for (i = 0; i < n; i++) { f[i].start = get_unaligned_be16(data++); f[i].end = get_unaligned_be16(data++); BT_DBG("proto filter start %u end %u", f[i].start, f[i].end); } if (i < BNEP_MAX_PROTO_FILTERS) memset(f + i, 0, sizeof(*f)); if (n == 0) bnep_set_default_proto_filter(s); bnep_send_rsp(s, BNEP_FILTER_NET_TYPE_RSP, BNEP_SUCCESS); } else { bnep_send_rsp(s, BNEP_FILTER_NET_TYPE_RSP, BNEP_FILTER_LIMIT_REACHED); } #else bnep_send_rsp(s, BNEP_FILTER_NET_TYPE_RSP, BNEP_FILTER_UNSUPPORTED_REQ); #endif return 0; } static int bnep_ctrl_set_mcfilter(struct bnep_session *s, u8 *data, int len) { int n; if (len < 2) return -EILSEQ; n = get_unaligned_be16(data); data += 2; len -= 2; if (len < n) return -EILSEQ; BT_DBG("filter len %d", n); #ifdef CONFIG_BT_BNEP_MC_FILTER n /= (ETH_ALEN * 2); if (n > 0) { int i; s->mc_filter = 0; /* Always send broadcast */ set_bit(bnep_mc_hash(s->dev->broadcast), (ulong *) &s->mc_filter); /* Add address ranges to the multicast hash */ for (; n > 0; n--) { u8 a1[6], *a2; memcpy(a1, data, ETH_ALEN); data += ETH_ALEN; a2 = data; data += ETH_ALEN; BT_DBG("mc filter %pMR -> %pMR", a1, a2); /* Iterate from a1 to a2 */ set_bit(bnep_mc_hash(a1), (ulong *) &s->mc_filter); while (memcmp(a1, a2, 6) < 0 && s->mc_filter != ~0LL) { /* Increment a1 */ i = 5; while (i >= 0 && ++a1[i--] == 0) ; set_bit(bnep_mc_hash(a1), (ulong *) &s->mc_filter); } } } BT_DBG("mc filter hash 0x%llx", s->mc_filter); bnep_send_rsp(s, BNEP_FILTER_MULTI_ADDR_RSP, BNEP_SUCCESS); #else bnep_send_rsp(s, BNEP_FILTER_MULTI_ADDR_RSP, BNEP_FILTER_UNSUPPORTED_REQ); #endif return 0; } static int bnep_rx_control(struct bnep_session *s, void *data, int len) { u8 cmd = *(u8 *)data; int err = 0; data++; len--; switch (cmd) { case BNEP_CMD_NOT_UNDERSTOOD: case BNEP_SETUP_CONN_RSP: case BNEP_FILTER_NET_TYPE_RSP: case BNEP_FILTER_MULTI_ADDR_RSP: /* Ignore these for now */ break; case BNEP_FILTER_NET_TYPE_SET: err = bnep_ctrl_set_netfilter(s, data, len); break; case BNEP_FILTER_MULTI_ADDR_SET: err = bnep_ctrl_set_mcfilter(s, data, len); break; case BNEP_SETUP_CONN_REQ: /* Successful response should be sent only once */ if (test_bit(BNEP_SETUP_RESPONSE, &s->flags) && !test_and_set_bit(BNEP_SETUP_RSP_SENT, &s->flags)) err = bnep_send_rsp(s, BNEP_SETUP_CONN_RSP, BNEP_SUCCESS); else err = bnep_send_rsp(s, BNEP_SETUP_CONN_RSP, BNEP_CONN_NOT_ALLOWED); break; default: { u8 pkt[3]; pkt[0] = BNEP_CONTROL; pkt[1] = BNEP_CMD_NOT_UNDERSTOOD; pkt[2] = cmd; err = bnep_send(s, pkt, sizeof(pkt)); } break; } return err; } static int bnep_rx_extension(struct bnep_session *s, struct sk_buff *skb) { struct bnep_ext_hdr *h; int err = 0; do { h = (void *) skb->data; if (!skb_pull(skb, sizeof(*h))) { err = -EILSEQ; break; } BT_DBG("type 0x%x len %u", h->type, h->len); switch (h->type & BNEP_TYPE_MASK) { case BNEP_EXT_CONTROL: bnep_rx_control(s, skb->data, skb->len); break; default: /* Unknown extension, skip it. */ break; } if (!skb_pull(skb, h->len)) { err = -EILSEQ; break; } } while (!err && (h->type & BNEP_EXT_HEADER)); return err; } static u8 __bnep_rx_hlen[] = { ETH_HLEN, /* BNEP_GENERAL */ 0, /* BNEP_CONTROL */ 2, /* BNEP_COMPRESSED */ ETH_ALEN + 2, /* BNEP_COMPRESSED_SRC_ONLY */ ETH_ALEN + 2 /* BNEP_COMPRESSED_DST_ONLY */ }; static int bnep_rx_frame(struct bnep_session *s, struct sk_buff *skb) { struct net_device *dev = s->dev; struct sk_buff *nskb; u8 type, ctrl_type; dev->stats.rx_bytes += skb->len; type = *(u8 *) skb->data; skb_pull(skb, 1); ctrl_type = *(u8 *)skb->data; if ((type & BNEP_TYPE_MASK) >= sizeof(__bnep_rx_hlen)) goto badframe; if ((type & BNEP_TYPE_MASK) == BNEP_CONTROL) { if (bnep_rx_control(s, skb->data, skb->len) < 0) { dev->stats.tx_errors++; kfree_skb(skb); return 0; } if (!(type & BNEP_EXT_HEADER)) { kfree_skb(skb); return 0; } /* Verify and pull ctrl message since it's already processed */ switch (ctrl_type) { case BNEP_SETUP_CONN_REQ: /* Pull: ctrl type (1 b), len (1 b), data (len bytes) */ if (!skb_pull(skb, 2 + *(u8 *)(skb->data + 1) * 2)) goto badframe; break; case BNEP_FILTER_MULTI_ADDR_SET: case BNEP_FILTER_NET_TYPE_SET: /* Pull: ctrl type (1 b), len (2 b), data (len bytes) */ if (!skb_pull(skb, 3 + *(u16 *)(skb->data + 1) * 2)) goto badframe; break; default: kfree_skb(skb); return 0; } } else { skb_reset_mac_header(skb); /* Verify and pull out header */ if (!skb_pull(skb, __bnep_rx_hlen[type & BNEP_TYPE_MASK])) goto badframe; s->eh.h_proto = get_unaligned((__be16 *) (skb->data - 2)); } if (type & BNEP_EXT_HEADER) { if (bnep_rx_extension(s, skb) < 0) goto badframe; } /* Strip 802.1p header */ if (ntohs(s->eh.h_proto) == ETH_P_8021Q) { if (!skb_pull(skb, 4)) goto badframe; s->eh.h_proto = get_unaligned((__be16 *) (skb->data - 2)); } /* We have to alloc new skb and copy data here :(. Because original skb * may not be modified and because of the alignment requirements. */ nskb = alloc_skb(2 + ETH_HLEN + skb->len, GFP_KERNEL); if (!nskb) { dev->stats.rx_dropped++; kfree_skb(skb); return -ENOMEM; } skb_reserve(nskb, 2); /* Decompress header and construct ether frame */ switch (type & BNEP_TYPE_MASK) { case BNEP_COMPRESSED: __skb_put_data(nskb, &s->eh, ETH_HLEN); break; case BNEP_COMPRESSED_SRC_ONLY: __skb_put_data(nskb, s->eh.h_dest, ETH_ALEN); __skb_put_data(nskb, skb_mac_header(skb), ETH_ALEN); put_unaligned(s->eh.h_proto, (__be16 *) __skb_put(nskb, 2)); break; case BNEP_COMPRESSED_DST_ONLY: __skb_put_data(nskb, skb_mac_header(skb), ETH_ALEN); __skb_put_data(nskb, s->eh.h_source, ETH_ALEN + 2); break; case BNEP_GENERAL: __skb_put_data(nskb, skb_mac_header(skb), ETH_ALEN * 2); put_unaligned(s->eh.h_proto, (__be16 *) __skb_put(nskb, 2)); break; } skb_copy_from_linear_data(skb, __skb_put(nskb, skb->len), skb->len); kfree_skb(skb); dev->stats.rx_packets++; nskb->ip_summed = CHECKSUM_NONE; nskb->protocol = eth_type_trans(nskb, dev); netif_rx(nskb); return 0; badframe: dev->stats.rx_errors++; kfree_skb(skb); return 0; } static u8 __bnep_tx_types[] = { BNEP_GENERAL, BNEP_COMPRESSED_SRC_ONLY, BNEP_COMPRESSED_DST_ONLY, BNEP_COMPRESSED }; static int bnep_tx_frame(struct bnep_session *s, struct sk_buff *skb) { struct ethhdr *eh = (void *) skb->data; struct socket *sock = s->sock; struct kvec iv[3]; int len = 0, il = 0; u8 type = 0; BT_DBG("skb %p dev %p type %u", skb, skb->dev, skb->pkt_type); if (!skb->dev) { /* Control frame sent by us */ goto send; } iv[il++] = (struct kvec) { &type, 1 }; len++; if (compress_src && ether_addr_equal(eh->h_dest, s->eh.h_source)) type |= 0x01; if (compress_dst && ether_addr_equal(eh->h_source, s->eh.h_dest)) type |= 0x02; if (type) skb_pull(skb, ETH_ALEN * 2); type = __bnep_tx_types[type]; switch (type) { case BNEP_COMPRESSED_SRC_ONLY: iv[il++] = (struct kvec) { eh->h_source, ETH_ALEN }; len += ETH_ALEN; break; case BNEP_COMPRESSED_DST_ONLY: iv[il++] = (struct kvec) { eh->h_dest, ETH_ALEN }; len += ETH_ALEN; break; } send: iv[il++] = (struct kvec) { skb->data, skb->len }; len += skb->len; /* FIXME: linearize skb */ { len = kernel_sendmsg(sock, &s->msg, iv, il, len); } kfree_skb(skb); if (len > 0) { s->dev->stats.tx_bytes += len; s->dev->stats.tx_packets++; return 0; } return len; } static int bnep_session(void *arg) { struct bnep_session *s = arg; struct net_device *dev = s->dev; struct sock *sk = s->sock->sk; struct sk_buff *skb; DEFINE_WAIT_FUNC(wait, woken_wake_function); BT_DBG(""); set_user_nice(current, -15); add_wait_queue(sk_sleep(sk), &wait); while (1) { if (atomic_read(&s->terminate)) break; /* RX */ while ((skb = skb_dequeue(&sk->sk_receive_queue))) { skb_orphan(skb); if (!skb_linearize(skb)) bnep_rx_frame(s, skb); else kfree_skb(skb); } if (sk->sk_state != BT_CONNECTED) break; /* TX */ while ((skb = skb_dequeue(&sk->sk_write_queue))) if (bnep_tx_frame(s, skb)) break; netif_wake_queue(dev); /* * wait_woken() performs the necessary memory barriers * for us; see the header comment for this primitive. */ wait_woken(&wait, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT); } remove_wait_queue(sk_sleep(sk), &wait); /* Cleanup session */ down_write(&bnep_session_sem); /* Delete network device */ unregister_netdev(dev); /* Wakeup user-space polling for socket errors */ s->sock->sk->sk_err = EUNATCH; wake_up_interruptible(sk_sleep(s->sock->sk)); /* Release the socket */ fput(s->sock->file); __bnep_unlink_session(s); up_write(&bnep_session_sem); free_netdev(dev); module_put_and_kthread_exit(0); return 0; } static struct device *bnep_get_device(struct bnep_session *session) { struct l2cap_conn *conn = l2cap_pi(session->sock->sk)->chan->conn; if (!conn || !conn->hcon) return NULL; return &conn->hcon->dev; } static struct device_type bnep_type = { .name = "bluetooth", }; int bnep_add_connection(struct bnep_connadd_req *req, struct socket *sock) { u32 valid_flags = BIT(BNEP_SETUP_RESPONSE); struct net_device *dev; struct bnep_session *s, *ss; u8 dst[ETH_ALEN], src[ETH_ALEN]; int err; BT_DBG(""); if (!l2cap_is_socket(sock)) return -EBADFD; if (req->flags & ~valid_flags) return -EINVAL; baswap((void *) dst, &l2cap_pi(sock->sk)->chan->dst); baswap((void *) src, &l2cap_pi(sock->sk)->chan->src); /* session struct allocated as private part of net_device */ dev = alloc_netdev(sizeof(struct bnep_session), (*req->device) ? req->device : "bnep%d", NET_NAME_UNKNOWN, bnep_net_setup); if (!dev) return -ENOMEM; down_write(&bnep_session_sem); ss = __bnep_get_session(dst); if (ss && ss->state == BT_CONNECTED) { err = -EEXIST; goto failed; } s = netdev_priv(dev); /* This is rx header therefore addresses are swapped. * ie. eh.h_dest is our local address. */ memcpy(s->eh.h_dest, &src, ETH_ALEN); memcpy(s->eh.h_source, &dst, ETH_ALEN); eth_hw_addr_set(dev, s->eh.h_dest); s->dev = dev; s->sock = sock; s->role = req->role; s->state = BT_CONNECTED; s->flags = req->flags; s->msg.msg_flags = MSG_NOSIGNAL; #ifdef CONFIG_BT_BNEP_MC_FILTER /* Set default mc filter to not filter out any mc addresses * as defined in the BNEP specification (revision 0.95a) * http://grouper.ieee.org/groups/802/15/Bluetooth/BNEP.pdf */ s->mc_filter = ~0LL; #endif #ifdef CONFIG_BT_BNEP_PROTO_FILTER /* Set default protocol filter */ bnep_set_default_proto_filter(s); #endif SET_NETDEV_DEV(dev, bnep_get_device(s)); SET_NETDEV_DEVTYPE(dev, &bnep_type); err = register_netdev(dev); if (err) goto failed; __bnep_link_session(s); __module_get(THIS_MODULE); s->task = kthread_run(bnep_session, s, "kbnepd %s", dev->name); if (IS_ERR(s->task)) { /* Session thread start failed, gotta cleanup. */ module_put(THIS_MODULE); unregister_netdev(dev); __bnep_unlink_session(s); err = PTR_ERR(s->task); goto failed; } up_write(&bnep_session_sem); strcpy(req->device, dev->name); return 0; failed: up_write(&bnep_session_sem); free_netdev(dev); return err; } int bnep_del_connection(struct bnep_conndel_req *req) { u32 valid_flags = 0; struct bnep_session *s; int err = 0; BT_DBG(""); if (req->flags & ~valid_flags) return -EINVAL; down_read(&bnep_session_sem); s = __bnep_get_session(req->dst); if (s) { atomic_inc(&s->terminate); wake_up_interruptible(sk_sleep(s->sock->sk)); } else err = -ENOENT; up_read(&bnep_session_sem); return err; } static void __bnep_copy_ci(struct bnep_conninfo *ci, struct bnep_session *s) { u32 valid_flags = BIT(BNEP_SETUP_RESPONSE); memset(ci, 0, sizeof(*ci)); memcpy(ci->dst, s->eh.h_source, ETH_ALEN); strcpy(ci->device, s->dev->name); ci->flags = s->flags & valid_flags; ci->state = s->state; ci->role = s->role; } int bnep_get_connlist(struct bnep_connlist_req *req) { struct bnep_session *s; int err = 0, n = 0; down_read(&bnep_session_sem); list_for_each_entry(s, &bnep_session_list, list) { struct bnep_conninfo ci; __bnep_copy_ci(&ci, s); if (copy_to_user(req->ci, &ci, sizeof(ci))) { err = -EFAULT; break; } if (++n >= req->cnum) break; req->ci++; } req->cnum = n; up_read(&bnep_session_sem); return err; } int bnep_get_conninfo(struct bnep_conninfo *ci) { struct bnep_session *s; int err = 0; down_read(&bnep_session_sem); s = __bnep_get_session(ci->dst); if (s) __bnep_copy_ci(ci, s); else err = -ENOENT; up_read(&bnep_session_sem); return err; } static int __init bnep_init(void) { char flt[50] = ""; #ifdef CONFIG_BT_BNEP_PROTO_FILTER strcat(flt, "protocol "); #endif #ifdef CONFIG_BT_BNEP_MC_FILTER strcat(flt, "multicast"); #endif BT_INFO("BNEP (Ethernet Emulation) ver %s", VERSION); if (flt[0]) BT_INFO("BNEP filters: %s", flt); bnep_sock_init(); return 0; } static void __exit bnep_exit(void) { bnep_sock_cleanup(); } module_init(bnep_init); module_exit(bnep_exit); module_param(compress_src, bool, 0644); MODULE_PARM_DESC(compress_src, "Compress sources headers"); module_param(compress_dst, bool, 0644); MODULE_PARM_DESC(compress_dst, "Compress destination headers"); MODULE_AUTHOR("Marcel Holtmann <[email protected]>"); MODULE_DESCRIPTION("Bluetooth BNEP ver " VERSION); MODULE_VERSION(VERSION); MODULE_LICENSE("GPL"); MODULE_ALIAS("bt-proto-4");
linux-master
net/bluetooth/bnep/core.c
/* BNEP implementation for Linux Bluetooth stack (BlueZ). Copyright (C) 2001-2002 Inventel Systemes Written 2001-2002 by David Libault <[email protected]> Copyright (C) 2002 Maxim Krasnyansky <[email protected]> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License version 2 as published by the Free Software Foundation; THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS SOFTWARE IS DISCLAIMED. */ #include <linux/compat.h> #include <linux/export.h> #include <linux/file.h> #include "bnep.h" static struct bt_sock_list bnep_sk_list = { .lock = __RW_LOCK_UNLOCKED(bnep_sk_list.lock) }; static int bnep_sock_release(struct socket *sock) { struct sock *sk = sock->sk; BT_DBG("sock %p sk %p", sock, sk); if (!sk) return 0; bt_sock_unlink(&bnep_sk_list, sk); sock_orphan(sk); sock_put(sk); return 0; } static int do_bnep_sock_ioctl(struct socket *sock, unsigned int cmd, void __user *argp) { struct bnep_connlist_req cl; struct bnep_connadd_req ca; struct bnep_conndel_req cd; struct bnep_conninfo ci; struct socket *nsock; __u32 supp_feat = BIT(BNEP_SETUP_RESPONSE); int err; BT_DBG("cmd %x arg %p", cmd, argp); switch (cmd) { case BNEPCONNADD: if (!capable(CAP_NET_ADMIN)) return -EPERM; if (copy_from_user(&ca, argp, sizeof(ca))) return -EFAULT; nsock = sockfd_lookup(ca.sock, &err); if (!nsock) return err; if (nsock->sk->sk_state != BT_CONNECTED) { sockfd_put(nsock); return -EBADFD; } ca.device[sizeof(ca.device)-1] = 0; err = bnep_add_connection(&ca, nsock); if (!err) { if (copy_to_user(argp, &ca, sizeof(ca))) err = -EFAULT; } else sockfd_put(nsock); return err; case BNEPCONNDEL: if (!capable(CAP_NET_ADMIN)) return -EPERM; if (copy_from_user(&cd, argp, sizeof(cd))) return -EFAULT; return bnep_del_connection(&cd); case BNEPGETCONNLIST: if (copy_from_user(&cl, argp, sizeof(cl))) return -EFAULT; if (cl.cnum <= 0) return -EINVAL; err = bnep_get_connlist(&cl); if (!err && copy_to_user(argp, &cl, sizeof(cl))) return -EFAULT; return err; case BNEPGETCONNINFO: if (copy_from_user(&ci, argp, sizeof(ci))) return -EFAULT; err = bnep_get_conninfo(&ci); if (!err && copy_to_user(argp, &ci, sizeof(ci))) return -EFAULT; return err; case BNEPGETSUPPFEAT: if (copy_to_user(argp, &supp_feat, sizeof(supp_feat))) return -EFAULT; return 0; default: return -EINVAL; } return 0; } static int bnep_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) { return do_bnep_sock_ioctl(sock, cmd, (void __user *)arg); } #ifdef CONFIG_COMPAT static int bnep_sock_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) { void __user *argp = compat_ptr(arg); if (cmd == BNEPGETCONNLIST) { struct bnep_connlist_req cl; unsigned __user *p = argp; u32 uci; int err; if (get_user(cl.cnum, p) || get_user(uci, p + 1)) return -EFAULT; cl.ci = compat_ptr(uci); if (cl.cnum <= 0) return -EINVAL; err = bnep_get_connlist(&cl); if (!err && put_user(cl.cnum, p)) err = -EFAULT; return err; } return do_bnep_sock_ioctl(sock, cmd, argp); } #endif static const struct proto_ops bnep_sock_ops = { .family = PF_BLUETOOTH, .owner = THIS_MODULE, .release = bnep_sock_release, .ioctl = bnep_sock_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = bnep_sock_compat_ioctl, #endif .bind = sock_no_bind, .getname = sock_no_getname, .sendmsg = sock_no_sendmsg, .recvmsg = sock_no_recvmsg, .listen = sock_no_listen, .shutdown = sock_no_shutdown, .connect = sock_no_connect, .socketpair = sock_no_socketpair, .accept = sock_no_accept, .mmap = sock_no_mmap }; static struct proto bnep_proto = { .name = "BNEP", .owner = THIS_MODULE, .obj_size = sizeof(struct bt_sock) }; static int bnep_sock_create(struct net *net, struct socket *sock, int protocol, int kern) { struct sock *sk; BT_DBG("sock %p", sock); if (sock->type != SOCK_RAW) return -ESOCKTNOSUPPORT; sk = bt_sock_alloc(net, sock, &bnep_proto, protocol, GFP_ATOMIC, kern); if (!sk) return -ENOMEM; sock->ops = &bnep_sock_ops; sock->state = SS_UNCONNECTED; bt_sock_link(&bnep_sk_list, sk); return 0; } static const struct net_proto_family bnep_sock_family_ops = { .family = PF_BLUETOOTH, .owner = THIS_MODULE, .create = bnep_sock_create }; int __init bnep_sock_init(void) { int err; err = proto_register(&bnep_proto, 0); if (err < 0) return err; err = bt_sock_register(BTPROTO_BNEP, &bnep_sock_family_ops); if (err < 0) { BT_ERR("Can't register BNEP socket"); goto error; } err = bt_procfs_init(&init_net, "bnep", &bnep_sk_list, NULL); if (err < 0) { BT_ERR("Failed to create BNEP proc file"); bt_sock_unregister(BTPROTO_BNEP); goto error; } BT_INFO("BNEP socket layer initialized"); return 0; error: proto_unregister(&bnep_proto); return err; } void __exit bnep_sock_cleanup(void) { bt_procfs_cleanup(&init_net, "bnep"); bt_sock_unregister(BTPROTO_BNEP); proto_unregister(&bnep_proto); }
linux-master
net/bluetooth/bnep/sock.c
/* BNEP implementation for Linux Bluetooth stack (BlueZ). Copyright (C) 2001-2002 Inventel Systemes Written 2001-2002 by Clément Moreau <[email protected]> David Libault <[email protected]> Copyright (C) 2002 Maxim Krasnyansky <[email protected]> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License version 2 as published by the Free Software Foundation; THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS SOFTWARE IS DISCLAIMED. */ #include <linux/etherdevice.h> #include <net/bluetooth/bluetooth.h> #include <net/bluetooth/hci_core.h> #include <net/bluetooth/l2cap.h> #include "bnep.h" #define BNEP_TX_QUEUE_LEN 20 static int bnep_net_open(struct net_device *dev) { netif_start_queue(dev); return 0; } static int bnep_net_close(struct net_device *dev) { netif_stop_queue(dev); return 0; } static void bnep_net_set_mc_list(struct net_device *dev) { #ifdef CONFIG_BT_BNEP_MC_FILTER struct bnep_session *s = netdev_priv(dev); struct sock *sk = s->sock->sk; struct bnep_set_filter_req *r; struct sk_buff *skb; int size; BT_DBG("%s mc_count %d", dev->name, netdev_mc_count(dev)); size = sizeof(*r) + (BNEP_MAX_MULTICAST_FILTERS + 1) * ETH_ALEN * 2; skb = alloc_skb(size, GFP_ATOMIC); if (!skb) { BT_ERR("%s Multicast list allocation failed", dev->name); return; } r = (void *) skb->data; __skb_put(skb, sizeof(*r)); r->type = BNEP_CONTROL; r->ctrl = BNEP_FILTER_MULTI_ADDR_SET; if (dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) { u8 start[ETH_ALEN] = { 0x01 }; /* Request all addresses */ __skb_put_data(skb, start, ETH_ALEN); __skb_put_data(skb, dev->broadcast, ETH_ALEN); r->len = htons(ETH_ALEN * 2); } else { struct netdev_hw_addr *ha; int i, len = skb->len; if (dev->flags & IFF_BROADCAST) { __skb_put_data(skb, dev->broadcast, ETH_ALEN); __skb_put_data(skb, dev->broadcast, ETH_ALEN); } /* FIXME: We should group addresses here. */ i = 0; netdev_for_each_mc_addr(ha, dev) { if (i == BNEP_MAX_MULTICAST_FILTERS) break; __skb_put_data(skb, ha->addr, ETH_ALEN); __skb_put_data(skb, ha->addr, ETH_ALEN); i++; } r->len = htons(skb->len - len); } skb_queue_tail(&sk->sk_write_queue, skb); wake_up_interruptible(sk_sleep(sk)); #endif } static int bnep_net_set_mac_addr(struct net_device *dev, void *arg) { BT_DBG("%s", dev->name); return 0; } static void bnep_net_timeout(struct net_device *dev, unsigned int txqueue) { BT_DBG("net_timeout"); netif_wake_queue(dev); } #ifdef CONFIG_BT_BNEP_MC_FILTER static int bnep_net_mc_filter(struct sk_buff *skb, struct bnep_session *s) { struct ethhdr *eh = (void *) skb->data; if ((eh->h_dest[0] & 1) && !test_bit(bnep_mc_hash(eh->h_dest), (ulong *) &s->mc_filter)) return 1; return 0; } #endif #ifdef CONFIG_BT_BNEP_PROTO_FILTER /* Determine ether protocol. Based on eth_type_trans. */ static u16 bnep_net_eth_proto(struct sk_buff *skb) { struct ethhdr *eh = (void *) skb->data; u16 proto = ntohs(eh->h_proto); if (proto >= ETH_P_802_3_MIN) return proto; if (get_unaligned((__be16 *) skb->data) == htons(0xFFFF)) return ETH_P_802_3; return ETH_P_802_2; } static int bnep_net_proto_filter(struct sk_buff *skb, struct bnep_session *s) { u16 proto = bnep_net_eth_proto(skb); struct bnep_proto_filter *f = s->proto_filter; int i; for (i = 0; i < BNEP_MAX_PROTO_FILTERS && f[i].end; i++) { if (proto >= f[i].start && proto <= f[i].end) return 0; } BT_DBG("BNEP: filtered skb %p, proto 0x%.4x", skb, proto); return 1; } #endif static netdev_tx_t bnep_net_xmit(struct sk_buff *skb, struct net_device *dev) { struct bnep_session *s = netdev_priv(dev); struct sock *sk = s->sock->sk; BT_DBG("skb %p, dev %p", skb, dev); #ifdef CONFIG_BT_BNEP_MC_FILTER if (bnep_net_mc_filter(skb, s)) { kfree_skb(skb); return NETDEV_TX_OK; } #endif #ifdef CONFIG_BT_BNEP_PROTO_FILTER if (bnep_net_proto_filter(skb, s)) { kfree_skb(skb); return NETDEV_TX_OK; } #endif /* * We cannot send L2CAP packets from here as we are potentially in a bh. * So we have to queue them and wake up session thread which is sleeping * on the sk_sleep(sk). */ netif_trans_update(dev); skb_queue_tail(&sk->sk_write_queue, skb); wake_up_interruptible(sk_sleep(sk)); if (skb_queue_len(&sk->sk_write_queue) >= BNEP_TX_QUEUE_LEN) { BT_DBG("tx queue is full"); /* Stop queuing. * Session thread will do netif_wake_queue() */ netif_stop_queue(dev); } return NETDEV_TX_OK; } static const struct net_device_ops bnep_netdev_ops = { .ndo_open = bnep_net_open, .ndo_stop = bnep_net_close, .ndo_start_xmit = bnep_net_xmit, .ndo_validate_addr = eth_validate_addr, .ndo_set_rx_mode = bnep_net_set_mc_list, .ndo_set_mac_address = bnep_net_set_mac_addr, .ndo_tx_timeout = bnep_net_timeout, }; void bnep_net_setup(struct net_device *dev) { eth_broadcast_addr(dev->broadcast); dev->addr_len = ETH_ALEN; ether_setup(dev); dev->min_mtu = 0; dev->max_mtu = ETH_MAX_MTU; dev->priv_flags &= ~IFF_TX_SKB_SHARING; dev->netdev_ops = &bnep_netdev_ops; dev->watchdog_timeo = HZ * 2; }
linux-master
net/bluetooth/bnep/netdev.c
/* HIDP implementation for Linux Bluetooth stack (BlueZ). Copyright (C) 2003-2004 Marcel Holtmann <[email protected]> Copyright (C) 2013 David Herrmann <[email protected]> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License version 2 as published by the Free Software Foundation; THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS SOFTWARE IS DISCLAIMED. */ #include <linux/kref.h> #include <linux/module.h> #include <linux/file.h> #include <linux/kthread.h> #include <linux/hidraw.h> #include <net/bluetooth/bluetooth.h> #include <net/bluetooth/hci_core.h> #include <net/bluetooth/l2cap.h> #include "hidp.h" #define VERSION "1.2" static DECLARE_RWSEM(hidp_session_sem); static DECLARE_WAIT_QUEUE_HEAD(hidp_session_wq); static LIST_HEAD(hidp_session_list); static unsigned char hidp_keycode[256] = { 0, 0, 0, 0, 30, 48, 46, 32, 18, 33, 34, 35, 23, 36, 37, 38, 50, 49, 24, 25, 16, 19, 31, 20, 22, 47, 17, 45, 21, 44, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 28, 1, 14, 15, 57, 12, 13, 26, 27, 43, 43, 39, 40, 41, 51, 52, 53, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 87, 88, 99, 70, 119, 110, 102, 104, 111, 107, 109, 106, 105, 108, 103, 69, 98, 55, 74, 78, 96, 79, 80, 81, 75, 76, 77, 71, 72, 73, 82, 83, 86, 127, 116, 117, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 134, 138, 130, 132, 128, 129, 131, 137, 133, 135, 136, 113, 115, 114, 0, 0, 0, 121, 0, 89, 93, 124, 92, 94, 95, 0, 0, 0, 122, 123, 90, 91, 85, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 29, 42, 56, 125, 97, 54, 100, 126, 164, 166, 165, 163, 161, 115, 114, 113, 150, 158, 159, 128, 136, 177, 178, 176, 142, 152, 173, 140 }; static unsigned char hidp_mkeyspat[] = { 0x01, 0x01, 0x01, 0x01, 0x01, 0x01 }; static int hidp_session_probe(struct l2cap_conn *conn, struct l2cap_user *user); static void hidp_session_remove(struct l2cap_conn *conn, struct l2cap_user *user); static int hidp_session_thread(void *arg); static void hidp_session_terminate(struct hidp_session *s); static void hidp_copy_session(struct hidp_session *session, struct hidp_conninfo *ci) { u32 valid_flags = 0; memset(ci, 0, sizeof(*ci)); bacpy(&ci->bdaddr, &session->bdaddr); ci->flags = session->flags & valid_flags; ci->state = BT_CONNECTED; if (session->input) { ci->vendor = session->input->id.vendor; ci->product = session->input->id.product; ci->version = session->input->id.version; if (session->input->name) strscpy(ci->name, session->input->name, 128); else strscpy(ci->name, "HID Boot Device", 128); } else if (session->hid) { ci->vendor = session->hid->vendor; ci->product = session->hid->product; ci->version = session->hid->version; strscpy(ci->name, session->hid->name, 128); } } /* assemble skb, queue message on @transmit and wake up the session thread */ static int hidp_send_message(struct hidp_session *session, struct socket *sock, struct sk_buff_head *transmit, unsigned char hdr, const unsigned char *data, int size) { struct sk_buff *skb; struct sock *sk = sock->sk; int ret; BT_DBG("session %p data %p size %d", session, data, size); if (atomic_read(&session->terminate)) return -EIO; skb = alloc_skb(size + 1, GFP_ATOMIC); if (!skb) { BT_ERR("Can't allocate memory for new frame"); return -ENOMEM; } skb_put_u8(skb, hdr); if (data && size > 0) { skb_put_data(skb, data, size); ret = size; } else { ret = 0; } skb_queue_tail(transmit, skb); wake_up_interruptible(sk_sleep(sk)); return ret; } static int hidp_send_ctrl_message(struct hidp_session *session, unsigned char hdr, const unsigned char *data, int size) { return hidp_send_message(session, session->ctrl_sock, &session->ctrl_transmit, hdr, data, size); } static int hidp_send_intr_message(struct hidp_session *session, unsigned char hdr, const unsigned char *data, int size) { return hidp_send_message(session, session->intr_sock, &session->intr_transmit, hdr, data, size); } static int hidp_input_event(struct input_dev *dev, unsigned int type, unsigned int code, int value) { struct hidp_session *session = input_get_drvdata(dev); unsigned char newleds; unsigned char hdr, data[2]; BT_DBG("session %p type %d code %d value %d", session, type, code, value); if (type != EV_LED) return -1; newleds = (!!test_bit(LED_KANA, dev->led) << 3) | (!!test_bit(LED_COMPOSE, dev->led) << 3) | (!!test_bit(LED_SCROLLL, dev->led) << 2) | (!!test_bit(LED_CAPSL, dev->led) << 1) | (!!test_bit(LED_NUML, dev->led) << 0); if (session->leds == newleds) return 0; session->leds = newleds; hdr = HIDP_TRANS_DATA | HIDP_DATA_RTYPE_OUPUT; data[0] = 0x01; data[1] = newleds; return hidp_send_intr_message(session, hdr, data, 2); } static void hidp_input_report(struct hidp_session *session, struct sk_buff *skb) { struct input_dev *dev = session->input; unsigned char *keys = session->keys; unsigned char *udata = skb->data + 1; signed char *sdata = skb->data + 1; int i, size = skb->len - 1; switch (skb->data[0]) { case 0x01: /* Keyboard report */ for (i = 0; i < 8; i++) input_report_key(dev, hidp_keycode[i + 224], (udata[0] >> i) & 1); /* If all the key codes have been set to 0x01, it means * too many keys were pressed at the same time. */ if (!memcmp(udata + 2, hidp_mkeyspat, 6)) break; for (i = 2; i < 8; i++) { if (keys[i] > 3 && memscan(udata + 2, keys[i], 6) == udata + 8) { if (hidp_keycode[keys[i]]) input_report_key(dev, hidp_keycode[keys[i]], 0); else BT_ERR("Unknown key (scancode %#x) released.", keys[i]); } if (udata[i] > 3 && memscan(keys + 2, udata[i], 6) == keys + 8) { if (hidp_keycode[udata[i]]) input_report_key(dev, hidp_keycode[udata[i]], 1); else BT_ERR("Unknown key (scancode %#x) pressed.", udata[i]); } } memcpy(keys, udata, 8); break; case 0x02: /* Mouse report */ input_report_key(dev, BTN_LEFT, sdata[0] & 0x01); input_report_key(dev, BTN_RIGHT, sdata[0] & 0x02); input_report_key(dev, BTN_MIDDLE, sdata[0] & 0x04); input_report_key(dev, BTN_SIDE, sdata[0] & 0x08); input_report_key(dev, BTN_EXTRA, sdata[0] & 0x10); input_report_rel(dev, REL_X, sdata[1]); input_report_rel(dev, REL_Y, sdata[2]); if (size > 3) input_report_rel(dev, REL_WHEEL, sdata[3]); break; } input_sync(dev); } static int hidp_get_raw_report(struct hid_device *hid, unsigned char report_number, unsigned char *data, size_t count, unsigned char report_type) { struct hidp_session *session = hid->driver_data; struct sk_buff *skb; size_t len; int numbered_reports = hid->report_enum[report_type].numbered; int ret; if (atomic_read(&session->terminate)) return -EIO; switch (report_type) { case HID_FEATURE_REPORT: report_type = HIDP_TRANS_GET_REPORT | HIDP_DATA_RTYPE_FEATURE; break; case HID_INPUT_REPORT: report_type = HIDP_TRANS_GET_REPORT | HIDP_DATA_RTYPE_INPUT; break; case HID_OUTPUT_REPORT: report_type = HIDP_TRANS_GET_REPORT | HIDP_DATA_RTYPE_OUPUT; break; default: return -EINVAL; } if (mutex_lock_interruptible(&session->report_mutex)) return -ERESTARTSYS; /* Set up our wait, and send the report request to the device. */ session->waiting_report_type = report_type & HIDP_DATA_RTYPE_MASK; session->waiting_report_number = numbered_reports ? report_number : -1; set_bit(HIDP_WAITING_FOR_RETURN, &session->flags); data[0] = report_number; ret = hidp_send_ctrl_message(session, report_type, data, 1); if (ret < 0) goto err; /* Wait for the return of the report. The returned report gets put in session->report_return. */ while (test_bit(HIDP_WAITING_FOR_RETURN, &session->flags) && !atomic_read(&session->terminate)) { int res; res = wait_event_interruptible_timeout(session->report_queue, !test_bit(HIDP_WAITING_FOR_RETURN, &session->flags) || atomic_read(&session->terminate), 5*HZ); if (res == 0) { /* timeout */ ret = -EIO; goto err; } if (res < 0) { /* signal */ ret = -ERESTARTSYS; goto err; } } skb = session->report_return; if (skb) { len = skb->len < count ? skb->len : count; memcpy(data, skb->data, len); kfree_skb(skb); session->report_return = NULL; } else { /* Device returned a HANDSHAKE, indicating protocol error. */ len = -EIO; } clear_bit(HIDP_WAITING_FOR_RETURN, &session->flags); mutex_unlock(&session->report_mutex); return len; err: clear_bit(HIDP_WAITING_FOR_RETURN, &session->flags); mutex_unlock(&session->report_mutex); return ret; } static int hidp_set_raw_report(struct hid_device *hid, unsigned char reportnum, unsigned char *data, size_t count, unsigned char report_type) { struct hidp_session *session = hid->driver_data; int ret; switch (report_type) { case HID_FEATURE_REPORT: report_type = HIDP_TRANS_SET_REPORT | HIDP_DATA_RTYPE_FEATURE; break; case HID_INPUT_REPORT: report_type = HIDP_TRANS_SET_REPORT | HIDP_DATA_RTYPE_INPUT; break; case HID_OUTPUT_REPORT: report_type = HIDP_TRANS_SET_REPORT | HIDP_DATA_RTYPE_OUPUT; break; default: return -EINVAL; } if (mutex_lock_interruptible(&session->report_mutex)) return -ERESTARTSYS; /* Set up our wait, and send the report request to the device. */ data[0] = reportnum; set_bit(HIDP_WAITING_FOR_SEND_ACK, &session->flags); ret = hidp_send_ctrl_message(session, report_type, data, count); if (ret < 0) goto err; /* Wait for the ACK from the device. */ while (test_bit(HIDP_WAITING_FOR_SEND_ACK, &session->flags) && !atomic_read(&session->terminate)) { int res; res = wait_event_interruptible_timeout(session->report_queue, !test_bit(HIDP_WAITING_FOR_SEND_ACK, &session->flags) || atomic_read(&session->terminate), 10*HZ); if (res == 0) { /* timeout */ ret = -EIO; goto err; } if (res < 0) { /* signal */ ret = -ERESTARTSYS; goto err; } } if (!session->output_report_success) { ret = -EIO; goto err; } ret = count; err: clear_bit(HIDP_WAITING_FOR_SEND_ACK, &session->flags); mutex_unlock(&session->report_mutex); return ret; } static int hidp_output_report(struct hid_device *hid, __u8 *data, size_t count) { struct hidp_session *session = hid->driver_data; return hidp_send_intr_message(session, HIDP_TRANS_DATA | HIDP_DATA_RTYPE_OUPUT, data, count); } static int hidp_raw_request(struct hid_device *hid, unsigned char reportnum, __u8 *buf, size_t len, unsigned char rtype, int reqtype) { switch (reqtype) { case HID_REQ_GET_REPORT: return hidp_get_raw_report(hid, reportnum, buf, len, rtype); case HID_REQ_SET_REPORT: return hidp_set_raw_report(hid, reportnum, buf, len, rtype); default: return -EIO; } } static void hidp_idle_timeout(struct timer_list *t) { struct hidp_session *session = from_timer(session, t, timer); /* The HIDP user-space API only contains calls to add and remove * devices. There is no way to forward events of any kind. Therefore, * we have to forcefully disconnect a device on idle-timeouts. This is * unfortunate and weird API design, but it is spec-compliant and * required for backwards-compatibility. Hence, on idle-timeout, we * signal driver-detach events, so poll() will be woken up with an * error-condition on both sockets. */ session->intr_sock->sk->sk_err = EUNATCH; session->ctrl_sock->sk->sk_err = EUNATCH; wake_up_interruptible(sk_sleep(session->intr_sock->sk)); wake_up_interruptible(sk_sleep(session->ctrl_sock->sk)); hidp_session_terminate(session); } static void hidp_set_timer(struct hidp_session *session) { if (session->idle_to > 0) mod_timer(&session->timer, jiffies + HZ * session->idle_to); } static void hidp_del_timer(struct hidp_session *session) { if (session->idle_to > 0) del_timer_sync(&session->timer); } static void hidp_process_report(struct hidp_session *session, int type, const u8 *data, unsigned int len, int intr) { if (len > HID_MAX_BUFFER_SIZE) len = HID_MAX_BUFFER_SIZE; memcpy(session->input_buf, data, len); hid_input_report(session->hid, type, session->input_buf, len, intr); } static void hidp_process_handshake(struct hidp_session *session, unsigned char param) { BT_DBG("session %p param 0x%02x", session, param); session->output_report_success = 0; /* default condition */ switch (param) { case HIDP_HSHK_SUCCESSFUL: /* FIXME: Call into SET_ GET_ handlers here */ session->output_report_success = 1; break; case HIDP_HSHK_NOT_READY: case HIDP_HSHK_ERR_INVALID_REPORT_ID: case HIDP_HSHK_ERR_UNSUPPORTED_REQUEST: case HIDP_HSHK_ERR_INVALID_PARAMETER: if (test_and_clear_bit(HIDP_WAITING_FOR_RETURN, &session->flags)) wake_up_interruptible(&session->report_queue); /* FIXME: Call into SET_ GET_ handlers here */ break; case HIDP_HSHK_ERR_UNKNOWN: break; case HIDP_HSHK_ERR_FATAL: /* Device requests a reboot, as this is the only way this error * can be recovered. */ hidp_send_ctrl_message(session, HIDP_TRANS_HID_CONTROL | HIDP_CTRL_SOFT_RESET, NULL, 0); break; default: hidp_send_ctrl_message(session, HIDP_TRANS_HANDSHAKE | HIDP_HSHK_ERR_INVALID_PARAMETER, NULL, 0); break; } /* Wake up the waiting thread. */ if (test_and_clear_bit(HIDP_WAITING_FOR_SEND_ACK, &session->flags)) wake_up_interruptible(&session->report_queue); } static void hidp_process_hid_control(struct hidp_session *session, unsigned char param) { BT_DBG("session %p param 0x%02x", session, param); if (param == HIDP_CTRL_VIRTUAL_CABLE_UNPLUG) { /* Flush the transmit queues */ skb_queue_purge(&session->ctrl_transmit); skb_queue_purge(&session->intr_transmit); hidp_session_terminate(session); } } /* Returns true if the passed-in skb should be freed by the caller. */ static int hidp_process_data(struct hidp_session *session, struct sk_buff *skb, unsigned char param) { int done_with_skb = 1; BT_DBG("session %p skb %p len %u param 0x%02x", session, skb, skb->len, param); switch (param) { case HIDP_DATA_RTYPE_INPUT: hidp_set_timer(session); if (session->input) hidp_input_report(session, skb); if (session->hid) hidp_process_report(session, HID_INPUT_REPORT, skb->data, skb->len, 0); break; case HIDP_DATA_RTYPE_OTHER: case HIDP_DATA_RTYPE_OUPUT: case HIDP_DATA_RTYPE_FEATURE: break; default: hidp_send_ctrl_message(session, HIDP_TRANS_HANDSHAKE | HIDP_HSHK_ERR_INVALID_PARAMETER, NULL, 0); } if (test_bit(HIDP_WAITING_FOR_RETURN, &session->flags) && param == session->waiting_report_type) { if (session->waiting_report_number < 0 || session->waiting_report_number == skb->data[0]) { /* hidp_get_raw_report() is waiting on this report. */ session->report_return = skb; done_with_skb = 0; clear_bit(HIDP_WAITING_FOR_RETURN, &session->flags); wake_up_interruptible(&session->report_queue); } } return done_with_skb; } static void hidp_recv_ctrl_frame(struct hidp_session *session, struct sk_buff *skb) { unsigned char hdr, type, param; int free_skb = 1; BT_DBG("session %p skb %p len %u", session, skb, skb->len); hdr = skb->data[0]; skb_pull(skb, 1); type = hdr & HIDP_HEADER_TRANS_MASK; param = hdr & HIDP_HEADER_PARAM_MASK; switch (type) { case HIDP_TRANS_HANDSHAKE: hidp_process_handshake(session, param); break; case HIDP_TRANS_HID_CONTROL: hidp_process_hid_control(session, param); break; case HIDP_TRANS_DATA: free_skb = hidp_process_data(session, skb, param); break; default: hidp_send_ctrl_message(session, HIDP_TRANS_HANDSHAKE | HIDP_HSHK_ERR_UNSUPPORTED_REQUEST, NULL, 0); break; } if (free_skb) kfree_skb(skb); } static void hidp_recv_intr_frame(struct hidp_session *session, struct sk_buff *skb) { unsigned char hdr; BT_DBG("session %p skb %p len %u", session, skb, skb->len); hdr = skb->data[0]; skb_pull(skb, 1); if (hdr == (HIDP_TRANS_DATA | HIDP_DATA_RTYPE_INPUT)) { hidp_set_timer(session); if (session->input) hidp_input_report(session, skb); if (session->hid) { hidp_process_report(session, HID_INPUT_REPORT, skb->data, skb->len, 1); BT_DBG("report len %d", skb->len); } } else { BT_DBG("Unsupported protocol header 0x%02x", hdr); } kfree_skb(skb); } static int hidp_send_frame(struct socket *sock, unsigned char *data, int len) { struct kvec iv = { data, len }; struct msghdr msg; BT_DBG("sock %p data %p len %d", sock, data, len); if (!len) return 0; memset(&msg, 0, sizeof(msg)); return kernel_sendmsg(sock, &msg, &iv, 1, len); } /* dequeue message from @transmit and send via @sock */ static void hidp_process_transmit(struct hidp_session *session, struct sk_buff_head *transmit, struct socket *sock) { struct sk_buff *skb; int ret; BT_DBG("session %p", session); while ((skb = skb_dequeue(transmit))) { ret = hidp_send_frame(sock, skb->data, skb->len); if (ret == -EAGAIN) { skb_queue_head(transmit, skb); break; } else if (ret < 0) { hidp_session_terminate(session); kfree_skb(skb); break; } hidp_set_timer(session); kfree_skb(skb); } } static int hidp_setup_input(struct hidp_session *session, const struct hidp_connadd_req *req) { struct input_dev *input; int i; input = input_allocate_device(); if (!input) return -ENOMEM; session->input = input; input_set_drvdata(input, session); input->name = "Bluetooth HID Boot Protocol Device"; input->id.bustype = BUS_BLUETOOTH; input->id.vendor = req->vendor; input->id.product = req->product; input->id.version = req->version; if (req->subclass & 0x40) { set_bit(EV_KEY, input->evbit); set_bit(EV_LED, input->evbit); set_bit(EV_REP, input->evbit); set_bit(LED_NUML, input->ledbit); set_bit(LED_CAPSL, input->ledbit); set_bit(LED_SCROLLL, input->ledbit); set_bit(LED_COMPOSE, input->ledbit); set_bit(LED_KANA, input->ledbit); for (i = 0; i < sizeof(hidp_keycode); i++) set_bit(hidp_keycode[i], input->keybit); clear_bit(0, input->keybit); } if (req->subclass & 0x80) { input->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REL); input->keybit[BIT_WORD(BTN_MOUSE)] = BIT_MASK(BTN_LEFT) | BIT_MASK(BTN_RIGHT) | BIT_MASK(BTN_MIDDLE); input->relbit[0] = BIT_MASK(REL_X) | BIT_MASK(REL_Y); input->keybit[BIT_WORD(BTN_MOUSE)] |= BIT_MASK(BTN_SIDE) | BIT_MASK(BTN_EXTRA); input->relbit[0] |= BIT_MASK(REL_WHEEL); } input->dev.parent = &session->conn->hcon->dev; input->event = hidp_input_event; return 0; } static int hidp_open(struct hid_device *hid) { return 0; } static void hidp_close(struct hid_device *hid) { } static int hidp_parse(struct hid_device *hid) { struct hidp_session *session = hid->driver_data; return hid_parse_report(session->hid, session->rd_data, session->rd_size); } static int hidp_start(struct hid_device *hid) { return 0; } static void hidp_stop(struct hid_device *hid) { struct hidp_session *session = hid->driver_data; skb_queue_purge(&session->ctrl_transmit); skb_queue_purge(&session->intr_transmit); hid->claimed = 0; } static const struct hid_ll_driver hidp_hid_driver = { .parse = hidp_parse, .start = hidp_start, .stop = hidp_stop, .open = hidp_open, .close = hidp_close, .raw_request = hidp_raw_request, .output_report = hidp_output_report, }; /* This function sets up the hid device. It does not add it to the HID system. That is done in hidp_add_connection(). */ static int hidp_setup_hid(struct hidp_session *session, const struct hidp_connadd_req *req) { struct hid_device *hid; int err; session->rd_data = memdup_user(req->rd_data, req->rd_size); if (IS_ERR(session->rd_data)) return PTR_ERR(session->rd_data); session->rd_size = req->rd_size; hid = hid_allocate_device(); if (IS_ERR(hid)) { err = PTR_ERR(hid); goto fault; } session->hid = hid; hid->driver_data = session; hid->bus = BUS_BLUETOOTH; hid->vendor = req->vendor; hid->product = req->product; hid->version = req->version; hid->country = req->country; strscpy(hid->name, req->name, sizeof(hid->name)); snprintf(hid->phys, sizeof(hid->phys), "%pMR", &l2cap_pi(session->ctrl_sock->sk)->chan->src); /* NOTE: Some device modules depend on the dst address being stored in * uniq. Please be aware of this before making changes to this behavior. */ snprintf(hid->uniq, sizeof(hid->uniq), "%pMR", &l2cap_pi(session->ctrl_sock->sk)->chan->dst); hid->dev.parent = &session->conn->hcon->dev; hid->ll_driver = &hidp_hid_driver; /* True if device is blocked in drivers/hid/hid-quirks.c */ if (hid_ignore(hid)) { hid_destroy_device(session->hid); session->hid = NULL; return -ENODEV; } return 0; fault: kfree(session->rd_data); session->rd_data = NULL; return err; } /* initialize session devices */ static int hidp_session_dev_init(struct hidp_session *session, const struct hidp_connadd_req *req) { int ret; if (req->rd_size > 0) { ret = hidp_setup_hid(session, req); if (ret && ret != -ENODEV) return ret; } if (!session->hid) { ret = hidp_setup_input(session, req); if (ret < 0) return ret; } return 0; } /* destroy session devices */ static void hidp_session_dev_destroy(struct hidp_session *session) { if (session->hid) put_device(&session->hid->dev); else if (session->input) input_put_device(session->input); kfree(session->rd_data); session->rd_data = NULL; } /* add HID/input devices to their underlying bus systems */ static int hidp_session_dev_add(struct hidp_session *session) { int ret; /* Both HID and input systems drop a ref-count when unregistering the * device but they don't take a ref-count when registering them. Work * around this by explicitly taking a refcount during registration * which is dropped automatically by unregistering the devices. */ if (session->hid) { ret = hid_add_device(session->hid); if (ret) return ret; get_device(&session->hid->dev); } else if (session->input) { ret = input_register_device(session->input); if (ret) return ret; input_get_device(session->input); } return 0; } /* remove HID/input devices from their bus systems */ static void hidp_session_dev_del(struct hidp_session *session) { if (session->hid) hid_destroy_device(session->hid); else if (session->input) input_unregister_device(session->input); } /* * Asynchronous device registration * HID device drivers might want to perform I/O during initialization to * detect device types. Therefore, call device registration in a separate * worker so the HIDP thread can schedule I/O operations. * Note that this must be called after the worker thread was initialized * successfully. This will then add the devices and increase session state * on success, otherwise it will terminate the session thread. */ static void hidp_session_dev_work(struct work_struct *work) { struct hidp_session *session = container_of(work, struct hidp_session, dev_init); int ret; ret = hidp_session_dev_add(session); if (!ret) atomic_inc(&session->state); else hidp_session_terminate(session); } /* * Create new session object * Allocate session object, initialize static fields, copy input data into the * object and take a reference to all sub-objects. * This returns 0 on success and puts a pointer to the new session object in * \out. Otherwise, an error code is returned. * The new session object has an initial ref-count of 1. */ static int hidp_session_new(struct hidp_session **out, const bdaddr_t *bdaddr, struct socket *ctrl_sock, struct socket *intr_sock, const struct hidp_connadd_req *req, struct l2cap_conn *conn) { struct hidp_session *session; int ret; struct bt_sock *ctrl, *intr; ctrl = bt_sk(ctrl_sock->sk); intr = bt_sk(intr_sock->sk); session = kzalloc(sizeof(*session), GFP_KERNEL); if (!session) return -ENOMEM; /* object and runtime management */ kref_init(&session->ref); atomic_set(&session->state, HIDP_SESSION_IDLING); init_waitqueue_head(&session->state_queue); session->flags = req->flags & BIT(HIDP_BLUETOOTH_VENDOR_ID); /* connection management */ bacpy(&session->bdaddr, bdaddr); session->conn = l2cap_conn_get(conn); session->user.probe = hidp_session_probe; session->user.remove = hidp_session_remove; INIT_LIST_HEAD(&session->user.list); session->ctrl_sock = ctrl_sock; session->intr_sock = intr_sock; skb_queue_head_init(&session->ctrl_transmit); skb_queue_head_init(&session->intr_transmit); session->ctrl_mtu = min_t(uint, l2cap_pi(ctrl)->chan->omtu, l2cap_pi(ctrl)->chan->imtu); session->intr_mtu = min_t(uint, l2cap_pi(intr)->chan->omtu, l2cap_pi(intr)->chan->imtu); session->idle_to = req->idle_to; /* device management */ INIT_WORK(&session->dev_init, hidp_session_dev_work); timer_setup(&session->timer, hidp_idle_timeout, 0); /* session data */ mutex_init(&session->report_mutex); init_waitqueue_head(&session->report_queue); ret = hidp_session_dev_init(session, req); if (ret) goto err_free; get_file(session->intr_sock->file); get_file(session->ctrl_sock->file); *out = session; return 0; err_free: l2cap_conn_put(session->conn); kfree(session); return ret; } /* increase ref-count of the given session by one */ static void hidp_session_get(struct hidp_session *session) { kref_get(&session->ref); } /* release callback */ static void session_free(struct kref *ref) { struct hidp_session *session = container_of(ref, struct hidp_session, ref); hidp_session_dev_destroy(session); skb_queue_purge(&session->ctrl_transmit); skb_queue_purge(&session->intr_transmit); fput(session->intr_sock->file); fput(session->ctrl_sock->file); l2cap_conn_put(session->conn); kfree(session); } /* decrease ref-count of the given session by one */ static void hidp_session_put(struct hidp_session *session) { kref_put(&session->ref, session_free); } /* * Search the list of active sessions for a session with target address * \bdaddr. You must hold at least a read-lock on \hidp_session_sem. As long as * you do not release this lock, the session objects cannot vanish and you can * safely take a reference to the session yourself. */ static struct hidp_session *__hidp_session_find(const bdaddr_t *bdaddr) { struct hidp_session *session; list_for_each_entry(session, &hidp_session_list, list) { if (!bacmp(bdaddr, &session->bdaddr)) return session; } return NULL; } /* * Same as __hidp_session_find() but no locks must be held. This also takes a * reference of the returned session (if non-NULL) so you must drop this * reference if you no longer use the object. */ static struct hidp_session *hidp_session_find(const bdaddr_t *bdaddr) { struct hidp_session *session; down_read(&hidp_session_sem); session = __hidp_session_find(bdaddr); if (session) hidp_session_get(session); up_read(&hidp_session_sem); return session; } /* * Start session synchronously * This starts a session thread and waits until initialization * is done or returns an error if it couldn't be started. * If this returns 0 the session thread is up and running. You must call * hipd_session_stop_sync() before deleting any runtime resources. */ static int hidp_session_start_sync(struct hidp_session *session) { unsigned int vendor, product; if (session->hid) { vendor = session->hid->vendor; product = session->hid->product; } else if (session->input) { vendor = session->input->id.vendor; product = session->input->id.product; } else { vendor = 0x0000; product = 0x0000; } session->task = kthread_run(hidp_session_thread, session, "khidpd_%04x%04x", vendor, product); if (IS_ERR(session->task)) return PTR_ERR(session->task); while (atomic_read(&session->state) <= HIDP_SESSION_IDLING) wait_event(session->state_queue, atomic_read(&session->state) > HIDP_SESSION_IDLING); return 0; } /* * Terminate session thread * Wake up session thread and notify it to stop. This is asynchronous and * returns immediately. Call this whenever a runtime error occurs and you want * the session to stop. * Note: wake_up_interruptible() performs any necessary memory-barriers for us. */ static void hidp_session_terminate(struct hidp_session *session) { atomic_inc(&session->terminate); /* * See the comment preceding the call to wait_woken() * in hidp_session_run(). */ wake_up_interruptible(&hidp_session_wq); } /* * Probe HIDP session * This is called from the l2cap_conn core when our l2cap_user object is bound * to the hci-connection. We get the session via the \user object and can now * start the session thread, link it into the global session list and * schedule HID/input device registration. * The global session-list owns its own reference to the session object so you * can drop your own reference after registering the l2cap_user object. */ static int hidp_session_probe(struct l2cap_conn *conn, struct l2cap_user *user) { struct hidp_session *session = container_of(user, struct hidp_session, user); struct hidp_session *s; int ret; down_write(&hidp_session_sem); /* check that no other session for this device exists */ s = __hidp_session_find(&session->bdaddr); if (s) { ret = -EEXIST; goto out_unlock; } if (session->input) { ret = hidp_session_dev_add(session); if (ret) goto out_unlock; } ret = hidp_session_start_sync(session); if (ret) goto out_del; /* HID device registration is async to allow I/O during probe */ if (session->input) atomic_inc(&session->state); else schedule_work(&session->dev_init); hidp_session_get(session); list_add(&session->list, &hidp_session_list); ret = 0; goto out_unlock; out_del: if (session->input) hidp_session_dev_del(session); out_unlock: up_write(&hidp_session_sem); return ret; } /* * Remove HIDP session * Called from the l2cap_conn core when either we explicitly unregistered * the l2cap_user object or if the underlying connection is shut down. * We signal the hidp-session thread to shut down, unregister the HID/input * devices and unlink the session from the global list. * This drops the reference to the session that is owned by the global * session-list. * Note: We _must_ not synchronosly wait for the session-thread to shut down. * This is, because the session-thread might be waiting for an HCI lock that is * held while we are called. Therefore, we only unregister the devices and * notify the session-thread to terminate. The thread itself owns a reference * to the session object so it can safely shut down. */ static void hidp_session_remove(struct l2cap_conn *conn, struct l2cap_user *user) { struct hidp_session *session = container_of(user, struct hidp_session, user); down_write(&hidp_session_sem); hidp_session_terminate(session); cancel_work_sync(&session->dev_init); if (session->input || atomic_read(&session->state) > HIDP_SESSION_PREPARING) hidp_session_dev_del(session); list_del(&session->list); up_write(&hidp_session_sem); hidp_session_put(session); } /* * Session Worker * This performs the actual main-loop of the HIDP worker. We first check * whether the underlying connection is still alive, then parse all pending * messages and finally send all outstanding messages. */ static void hidp_session_run(struct hidp_session *session) { struct sock *ctrl_sk = session->ctrl_sock->sk; struct sock *intr_sk = session->intr_sock->sk; struct sk_buff *skb; DEFINE_WAIT_FUNC(wait, woken_wake_function); add_wait_queue(&hidp_session_wq, &wait); for (;;) { /* * This thread can be woken up two ways: * - You call hidp_session_terminate() which sets the * session->terminate flag and wakes this thread up. * - Via modifying the socket state of ctrl/intr_sock. This * thread is woken up by ->sk_state_changed(). */ if (atomic_read(&session->terminate)) break; if (ctrl_sk->sk_state != BT_CONNECTED || intr_sk->sk_state != BT_CONNECTED) break; /* parse incoming intr-skbs */ while ((skb = skb_dequeue(&intr_sk->sk_receive_queue))) { skb_orphan(skb); if (!skb_linearize(skb)) hidp_recv_intr_frame(session, skb); else kfree_skb(skb); } /* send pending intr-skbs */ hidp_process_transmit(session, &session->intr_transmit, session->intr_sock); /* parse incoming ctrl-skbs */ while ((skb = skb_dequeue(&ctrl_sk->sk_receive_queue))) { skb_orphan(skb); if (!skb_linearize(skb)) hidp_recv_ctrl_frame(session, skb); else kfree_skb(skb); } /* send pending ctrl-skbs */ hidp_process_transmit(session, &session->ctrl_transmit, session->ctrl_sock); /* * wait_woken() performs the necessary memory barriers * for us; see the header comment for this primitive. */ wait_woken(&wait, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT); } remove_wait_queue(&hidp_session_wq, &wait); atomic_inc(&session->terminate); } static int hidp_session_wake_function(wait_queue_entry_t *wait, unsigned int mode, int sync, void *key) { wake_up_interruptible(&hidp_session_wq); return false; } /* * HIDP session thread * This thread runs the I/O for a single HIDP session. Startup is synchronous * which allows us to take references to ourself here instead of doing that in * the caller. * When we are ready to run we notify the caller and call hidp_session_run(). */ static int hidp_session_thread(void *arg) { struct hidp_session *session = arg; DEFINE_WAIT_FUNC(ctrl_wait, hidp_session_wake_function); DEFINE_WAIT_FUNC(intr_wait, hidp_session_wake_function); BT_DBG("session %p", session); /* initialize runtime environment */ hidp_session_get(session); __module_get(THIS_MODULE); set_user_nice(current, -15); hidp_set_timer(session); add_wait_queue(sk_sleep(session->ctrl_sock->sk), &ctrl_wait); add_wait_queue(sk_sleep(session->intr_sock->sk), &intr_wait); /* This memory barrier is paired with wq_has_sleeper(). See * sock_poll_wait() for more information why this is needed. */ smp_mb__before_atomic(); /* notify synchronous startup that we're ready */ atomic_inc(&session->state); wake_up(&session->state_queue); /* run session */ hidp_session_run(session); /* cleanup runtime environment */ remove_wait_queue(sk_sleep(session->intr_sock->sk), &intr_wait); remove_wait_queue(sk_sleep(session->ctrl_sock->sk), &ctrl_wait); wake_up_interruptible(&session->report_queue); hidp_del_timer(session); /* * If we stopped ourself due to any internal signal, we should try to * unregister our own session here to avoid having it linger until the * parent l2cap_conn dies or user-space cleans it up. * This does not deadlock as we don't do any synchronous shutdown. * Instead, this call has the same semantics as if user-space tried to * delete the session. */ l2cap_unregister_user(session->conn, &session->user); hidp_session_put(session); module_put_and_kthread_exit(0); return 0; } static int hidp_verify_sockets(struct socket *ctrl_sock, struct socket *intr_sock) { struct l2cap_chan *ctrl_chan, *intr_chan; struct bt_sock *ctrl, *intr; struct hidp_session *session; if (!l2cap_is_socket(ctrl_sock) || !l2cap_is_socket(intr_sock)) return -EINVAL; ctrl_chan = l2cap_pi(ctrl_sock->sk)->chan; intr_chan = l2cap_pi(intr_sock->sk)->chan; if (bacmp(&ctrl_chan->src, &intr_chan->src) || bacmp(&ctrl_chan->dst, &intr_chan->dst)) return -ENOTUNIQ; ctrl = bt_sk(ctrl_sock->sk); intr = bt_sk(intr_sock->sk); if (ctrl->sk.sk_state != BT_CONNECTED || intr->sk.sk_state != BT_CONNECTED) return -EBADFD; /* early session check, we check again during session registration */ session = hidp_session_find(&ctrl_chan->dst); if (session) { hidp_session_put(session); return -EEXIST; } return 0; } int hidp_connection_add(const struct hidp_connadd_req *req, struct socket *ctrl_sock, struct socket *intr_sock) { u32 valid_flags = BIT(HIDP_VIRTUAL_CABLE_UNPLUG) | BIT(HIDP_BOOT_PROTOCOL_MODE); struct hidp_session *session; struct l2cap_conn *conn; struct l2cap_chan *chan; int ret; ret = hidp_verify_sockets(ctrl_sock, intr_sock); if (ret) return ret; if (req->flags & ~valid_flags) return -EINVAL; chan = l2cap_pi(ctrl_sock->sk)->chan; conn = NULL; l2cap_chan_lock(chan); if (chan->conn) conn = l2cap_conn_get(chan->conn); l2cap_chan_unlock(chan); if (!conn) return -EBADFD; ret = hidp_session_new(&session, &chan->dst, ctrl_sock, intr_sock, req, conn); if (ret) goto out_conn; ret = l2cap_register_user(conn, &session->user); if (ret) goto out_session; ret = 0; out_session: hidp_session_put(session); out_conn: l2cap_conn_put(conn); return ret; } int hidp_connection_del(struct hidp_conndel_req *req) { u32 valid_flags = BIT(HIDP_VIRTUAL_CABLE_UNPLUG); struct hidp_session *session; if (req->flags & ~valid_flags) return -EINVAL; session = hidp_session_find(&req->bdaddr); if (!session) return -ENOENT; if (req->flags & BIT(HIDP_VIRTUAL_CABLE_UNPLUG)) hidp_send_ctrl_message(session, HIDP_TRANS_HID_CONTROL | HIDP_CTRL_VIRTUAL_CABLE_UNPLUG, NULL, 0); else l2cap_unregister_user(session->conn, &session->user); hidp_session_put(session); return 0; } int hidp_get_connlist(struct hidp_connlist_req *req) { struct hidp_session *session; int err = 0, n = 0; BT_DBG(""); down_read(&hidp_session_sem); list_for_each_entry(session, &hidp_session_list, list) { struct hidp_conninfo ci; hidp_copy_session(session, &ci); if (copy_to_user(req->ci, &ci, sizeof(ci))) { err = -EFAULT; break; } if (++n >= req->cnum) break; req->ci++; } req->cnum = n; up_read(&hidp_session_sem); return err; } int hidp_get_conninfo(struct hidp_conninfo *ci) { struct hidp_session *session; session = hidp_session_find(&ci->bdaddr); if (session) { hidp_copy_session(session, ci); hidp_session_put(session); } return session ? 0 : -ENOENT; } static int __init hidp_init(void) { BT_INFO("HIDP (Human Interface Emulation) ver %s", VERSION); return hidp_init_sockets(); } static void __exit hidp_exit(void) { hidp_cleanup_sockets(); } module_init(hidp_init); module_exit(hidp_exit); MODULE_AUTHOR("Marcel Holtmann <[email protected]>"); MODULE_AUTHOR("David Herrmann <[email protected]>"); MODULE_DESCRIPTION("Bluetooth HIDP ver " VERSION); MODULE_VERSION(VERSION); MODULE_LICENSE("GPL"); MODULE_ALIAS("bt-proto-6");
linux-master
net/bluetooth/hidp/core.c
/* HIDP implementation for Linux Bluetooth stack (BlueZ). Copyright (C) 2003-2004 Marcel Holtmann <[email protected]> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License version 2 as published by the Free Software Foundation; THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS SOFTWARE IS DISCLAIMED. */ #include <linux/compat.h> #include <linux/export.h> #include <linux/file.h> #include "hidp.h" static struct bt_sock_list hidp_sk_list = { .lock = __RW_LOCK_UNLOCKED(hidp_sk_list.lock) }; static int hidp_sock_release(struct socket *sock) { struct sock *sk = sock->sk; BT_DBG("sock %p sk %p", sock, sk); if (!sk) return 0; bt_sock_unlink(&hidp_sk_list, sk); sock_orphan(sk); sock_put(sk); return 0; } static int do_hidp_sock_ioctl(struct socket *sock, unsigned int cmd, void __user *argp) { struct hidp_connadd_req ca; struct hidp_conndel_req cd; struct hidp_connlist_req cl; struct hidp_conninfo ci; struct socket *csock; struct socket *isock; int err; BT_DBG("cmd %x arg %p", cmd, argp); switch (cmd) { case HIDPCONNADD: if (!capable(CAP_NET_ADMIN)) return -EPERM; if (copy_from_user(&ca, argp, sizeof(ca))) return -EFAULT; csock = sockfd_lookup(ca.ctrl_sock, &err); if (!csock) return err; isock = sockfd_lookup(ca.intr_sock, &err); if (!isock) { sockfd_put(csock); return err; } ca.name[sizeof(ca.name)-1] = 0; err = hidp_connection_add(&ca, csock, isock); if (!err && copy_to_user(argp, &ca, sizeof(ca))) err = -EFAULT; sockfd_put(csock); sockfd_put(isock); return err; case HIDPCONNDEL: if (!capable(CAP_NET_ADMIN)) return -EPERM; if (copy_from_user(&cd, argp, sizeof(cd))) return -EFAULT; return hidp_connection_del(&cd); case HIDPGETCONNLIST: if (copy_from_user(&cl, argp, sizeof(cl))) return -EFAULT; if (cl.cnum <= 0) return -EINVAL; err = hidp_get_connlist(&cl); if (!err && copy_to_user(argp, &cl, sizeof(cl))) return -EFAULT; return err; case HIDPGETCONNINFO: if (copy_from_user(&ci, argp, sizeof(ci))) return -EFAULT; err = hidp_get_conninfo(&ci); if (!err && copy_to_user(argp, &ci, sizeof(ci))) return -EFAULT; return err; } return -EINVAL; } static int hidp_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) { return do_hidp_sock_ioctl(sock, cmd, (void __user *)arg); } #ifdef CONFIG_COMPAT struct compat_hidp_connadd_req { int ctrl_sock; /* Connected control socket */ int intr_sock; /* Connected interrupt socket */ __u16 parser; __u16 rd_size; compat_uptr_t rd_data; __u8 country; __u8 subclass; __u16 vendor; __u16 product; __u16 version; __u32 flags; __u32 idle_to; char name[128]; }; static int hidp_sock_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) { void __user *argp = compat_ptr(arg); int err; if (cmd == HIDPGETCONNLIST) { struct hidp_connlist_req cl; u32 __user *p = argp; u32 uci; if (get_user(cl.cnum, p) || get_user(uci, p + 1)) return -EFAULT; cl.ci = compat_ptr(uci); if (cl.cnum <= 0) return -EINVAL; err = hidp_get_connlist(&cl); if (!err && put_user(cl.cnum, p)) err = -EFAULT; return err; } else if (cmd == HIDPCONNADD) { struct compat_hidp_connadd_req ca32; struct hidp_connadd_req ca; struct socket *csock; struct socket *isock; if (!capable(CAP_NET_ADMIN)) return -EPERM; if (copy_from_user(&ca32, (void __user *) arg, sizeof(ca32))) return -EFAULT; ca.ctrl_sock = ca32.ctrl_sock; ca.intr_sock = ca32.intr_sock; ca.parser = ca32.parser; ca.rd_size = ca32.rd_size; ca.rd_data = compat_ptr(ca32.rd_data); ca.country = ca32.country; ca.subclass = ca32.subclass; ca.vendor = ca32.vendor; ca.product = ca32.product; ca.version = ca32.version; ca.flags = ca32.flags; ca.idle_to = ca32.idle_to; ca32.name[sizeof(ca32.name) - 1] = '\0'; memcpy(ca.name, ca32.name, 128); csock = sockfd_lookup(ca.ctrl_sock, &err); if (!csock) return err; isock = sockfd_lookup(ca.intr_sock, &err); if (!isock) { sockfd_put(csock); return err; } err = hidp_connection_add(&ca, csock, isock); if (!err && copy_to_user(argp, &ca32, sizeof(ca32))) err = -EFAULT; sockfd_put(csock); sockfd_put(isock); return err; } return hidp_sock_ioctl(sock, cmd, arg); } #endif static const struct proto_ops hidp_sock_ops = { .family = PF_BLUETOOTH, .owner = THIS_MODULE, .release = hidp_sock_release, .ioctl = hidp_sock_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = hidp_sock_compat_ioctl, #endif .bind = sock_no_bind, .getname = sock_no_getname, .sendmsg = sock_no_sendmsg, .recvmsg = sock_no_recvmsg, .listen = sock_no_listen, .shutdown = sock_no_shutdown, .connect = sock_no_connect, .socketpair = sock_no_socketpair, .accept = sock_no_accept, .mmap = sock_no_mmap }; static struct proto hidp_proto = { .name = "HIDP", .owner = THIS_MODULE, .obj_size = sizeof(struct bt_sock) }; static int hidp_sock_create(struct net *net, struct socket *sock, int protocol, int kern) { struct sock *sk; BT_DBG("sock %p", sock); if (sock->type != SOCK_RAW) return -ESOCKTNOSUPPORT; sk = bt_sock_alloc(net, sock, &hidp_proto, protocol, GFP_ATOMIC, kern); if (!sk) return -ENOMEM; sock->ops = &hidp_sock_ops; sock->state = SS_UNCONNECTED; bt_sock_link(&hidp_sk_list, sk); return 0; } static const struct net_proto_family hidp_sock_family_ops = { .family = PF_BLUETOOTH, .owner = THIS_MODULE, .create = hidp_sock_create }; int __init hidp_init_sockets(void) { int err; err = proto_register(&hidp_proto, 0); if (err < 0) return err; err = bt_sock_register(BTPROTO_HIDP, &hidp_sock_family_ops); if (err < 0) { BT_ERR("Can't register HIDP socket"); goto error; } err = bt_procfs_init(&init_net, "hidp", &hidp_sk_list, NULL); if (err < 0) { BT_ERR("Failed to create HIDP proc file"); bt_sock_unregister(BTPROTO_HIDP); goto error; } BT_INFO("HIDP socket layer initialized"); return 0; error: proto_unregister(&hidp_proto); return err; } void __exit hidp_cleanup_sockets(void) { bt_procfs_cleanup(&init_net, "hidp"); bt_sock_unregister(BTPROTO_HIDP); proto_unregister(&hidp_proto); }
linux-master
net/bluetooth/hidp/sock.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * net/switchdev/switchdev.c - Switch device API * Copyright (c) 2014-2015 Jiri Pirko <[email protected]> * Copyright (c) 2014-2015 Scott Feldman <[email protected]> */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/init.h> #include <linux/mutex.h> #include <linux/notifier.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/if_bridge.h> #include <linux/list.h> #include <linux/workqueue.h> #include <linux/if_vlan.h> #include <linux/rtnetlink.h> #include <net/switchdev.h> static LIST_HEAD(deferred); static DEFINE_SPINLOCK(deferred_lock); typedef void switchdev_deferred_func_t(struct net_device *dev, const void *data); struct switchdev_deferred_item { struct list_head list; struct net_device *dev; netdevice_tracker dev_tracker; switchdev_deferred_func_t *func; unsigned long data[]; }; static struct switchdev_deferred_item *switchdev_deferred_dequeue(void) { struct switchdev_deferred_item *dfitem; spin_lock_bh(&deferred_lock); if (list_empty(&deferred)) { dfitem = NULL; goto unlock; } dfitem = list_first_entry(&deferred, struct switchdev_deferred_item, list); list_del(&dfitem->list); unlock: spin_unlock_bh(&deferred_lock); return dfitem; } /** * switchdev_deferred_process - Process ops in deferred queue * * Called to flush the ops currently queued in deferred ops queue. * rtnl_lock must be held. */ void switchdev_deferred_process(void) { struct switchdev_deferred_item *dfitem; ASSERT_RTNL(); while ((dfitem = switchdev_deferred_dequeue())) { dfitem->func(dfitem->dev, dfitem->data); netdev_put(dfitem->dev, &dfitem->dev_tracker); kfree(dfitem); } } EXPORT_SYMBOL_GPL(switchdev_deferred_process); static void switchdev_deferred_process_work(struct work_struct *work) { rtnl_lock(); switchdev_deferred_process(); rtnl_unlock(); } static DECLARE_WORK(deferred_process_work, switchdev_deferred_process_work); static int switchdev_deferred_enqueue(struct net_device *dev, const void *data, size_t data_len, switchdev_deferred_func_t *func) { struct switchdev_deferred_item *dfitem; dfitem = kmalloc(struct_size(dfitem, data, data_len), GFP_ATOMIC); if (!dfitem) return -ENOMEM; dfitem->dev = dev; dfitem->func = func; memcpy(dfitem->data, data, data_len); netdev_hold(dev, &dfitem->dev_tracker, GFP_ATOMIC); spin_lock_bh(&deferred_lock); list_add_tail(&dfitem->list, &deferred); spin_unlock_bh(&deferred_lock); schedule_work(&deferred_process_work); return 0; } static int switchdev_port_attr_notify(enum switchdev_notifier_type nt, struct net_device *dev, const struct switchdev_attr *attr, struct netlink_ext_ack *extack) { int err; int rc; struct switchdev_notifier_port_attr_info attr_info = { .attr = attr, .handled = false, }; rc = call_switchdev_blocking_notifiers(nt, dev, &attr_info.info, extack); err = notifier_to_errno(rc); if (err) { WARN_ON(!attr_info.handled); return err; } if (!attr_info.handled) return -EOPNOTSUPP; return 0; } static int switchdev_port_attr_set_now(struct net_device *dev, const struct switchdev_attr *attr, struct netlink_ext_ack *extack) { return switchdev_port_attr_notify(SWITCHDEV_PORT_ATTR_SET, dev, attr, extack); } static void switchdev_port_attr_set_deferred(struct net_device *dev, const void *data) { const struct switchdev_attr *attr = data; int err; err = switchdev_port_attr_set_now(dev, attr, NULL); if (err && err != -EOPNOTSUPP) netdev_err(dev, "failed (err=%d) to set attribute (id=%d)\n", err, attr->id); if (attr->complete) attr->complete(dev, err, attr->complete_priv); } static int switchdev_port_attr_set_defer(struct net_device *dev, const struct switchdev_attr *attr) { return switchdev_deferred_enqueue(dev, attr, sizeof(*attr), switchdev_port_attr_set_deferred); } /** * switchdev_port_attr_set - Set port attribute * * @dev: port device * @attr: attribute to set * @extack: netlink extended ack, for error message propagation * * rtnl_lock must be held and must not be in atomic section, * in case SWITCHDEV_F_DEFER flag is not set. */ int switchdev_port_attr_set(struct net_device *dev, const struct switchdev_attr *attr, struct netlink_ext_ack *extack) { if (attr->flags & SWITCHDEV_F_DEFER) return switchdev_port_attr_set_defer(dev, attr); ASSERT_RTNL(); return switchdev_port_attr_set_now(dev, attr, extack); } EXPORT_SYMBOL_GPL(switchdev_port_attr_set); static size_t switchdev_obj_size(const struct switchdev_obj *obj) { switch (obj->id) { case SWITCHDEV_OBJ_ID_PORT_VLAN: return sizeof(struct switchdev_obj_port_vlan); case SWITCHDEV_OBJ_ID_PORT_MDB: return sizeof(struct switchdev_obj_port_mdb); case SWITCHDEV_OBJ_ID_HOST_MDB: return sizeof(struct switchdev_obj_port_mdb); default: BUG(); } return 0; } static int switchdev_port_obj_notify(enum switchdev_notifier_type nt, struct net_device *dev, const struct switchdev_obj *obj, struct netlink_ext_ack *extack) { int rc; int err; struct switchdev_notifier_port_obj_info obj_info = { .obj = obj, .handled = false, }; rc = call_switchdev_blocking_notifiers(nt, dev, &obj_info.info, extack); err = notifier_to_errno(rc); if (err) { WARN_ON(!obj_info.handled); return err; } if (!obj_info.handled) return -EOPNOTSUPP; return 0; } static void switchdev_port_obj_add_deferred(struct net_device *dev, const void *data) { const struct switchdev_obj *obj = data; int err; ASSERT_RTNL(); err = switchdev_port_obj_notify(SWITCHDEV_PORT_OBJ_ADD, dev, obj, NULL); if (err && err != -EOPNOTSUPP) netdev_err(dev, "failed (err=%d) to add object (id=%d)\n", err, obj->id); if (obj->complete) obj->complete(dev, err, obj->complete_priv); } static int switchdev_port_obj_add_defer(struct net_device *dev, const struct switchdev_obj *obj) { return switchdev_deferred_enqueue(dev, obj, switchdev_obj_size(obj), switchdev_port_obj_add_deferred); } /** * switchdev_port_obj_add - Add port object * * @dev: port device * @obj: object to add * @extack: netlink extended ack * * rtnl_lock must be held and must not be in atomic section, * in case SWITCHDEV_F_DEFER flag is not set. */ int switchdev_port_obj_add(struct net_device *dev, const struct switchdev_obj *obj, struct netlink_ext_ack *extack) { if (obj->flags & SWITCHDEV_F_DEFER) return switchdev_port_obj_add_defer(dev, obj); ASSERT_RTNL(); return switchdev_port_obj_notify(SWITCHDEV_PORT_OBJ_ADD, dev, obj, extack); } EXPORT_SYMBOL_GPL(switchdev_port_obj_add); static int switchdev_port_obj_del_now(struct net_device *dev, const struct switchdev_obj *obj) { return switchdev_port_obj_notify(SWITCHDEV_PORT_OBJ_DEL, dev, obj, NULL); } static void switchdev_port_obj_del_deferred(struct net_device *dev, const void *data) { const struct switchdev_obj *obj = data; int err; err = switchdev_port_obj_del_now(dev, obj); if (err && err != -EOPNOTSUPP) netdev_err(dev, "failed (err=%d) to del object (id=%d)\n", err, obj->id); if (obj->complete) obj->complete(dev, err, obj->complete_priv); } static int switchdev_port_obj_del_defer(struct net_device *dev, const struct switchdev_obj *obj) { return switchdev_deferred_enqueue(dev, obj, switchdev_obj_size(obj), switchdev_port_obj_del_deferred); } /** * switchdev_port_obj_del - Delete port object * * @dev: port device * @obj: object to delete * * rtnl_lock must be held and must not be in atomic section, * in case SWITCHDEV_F_DEFER flag is not set. */ int switchdev_port_obj_del(struct net_device *dev, const struct switchdev_obj *obj) { if (obj->flags & SWITCHDEV_F_DEFER) return switchdev_port_obj_del_defer(dev, obj); ASSERT_RTNL(); return switchdev_port_obj_del_now(dev, obj); } EXPORT_SYMBOL_GPL(switchdev_port_obj_del); static ATOMIC_NOTIFIER_HEAD(switchdev_notif_chain); static BLOCKING_NOTIFIER_HEAD(switchdev_blocking_notif_chain); /** * register_switchdev_notifier - Register notifier * @nb: notifier_block * * Register switch device notifier. */ int register_switchdev_notifier(struct notifier_block *nb) { return atomic_notifier_chain_register(&switchdev_notif_chain, nb); } EXPORT_SYMBOL_GPL(register_switchdev_notifier); /** * unregister_switchdev_notifier - Unregister notifier * @nb: notifier_block * * Unregister switch device notifier. */ int unregister_switchdev_notifier(struct notifier_block *nb) { return atomic_notifier_chain_unregister(&switchdev_notif_chain, nb); } EXPORT_SYMBOL_GPL(unregister_switchdev_notifier); /** * call_switchdev_notifiers - Call notifiers * @val: value passed unmodified to notifier function * @dev: port device * @info: notifier information data * @extack: netlink extended ack * Call all network notifier blocks. */ int call_switchdev_notifiers(unsigned long val, struct net_device *dev, struct switchdev_notifier_info *info, struct netlink_ext_ack *extack) { info->dev = dev; info->extack = extack; return atomic_notifier_call_chain(&switchdev_notif_chain, val, info); } EXPORT_SYMBOL_GPL(call_switchdev_notifiers); int register_switchdev_blocking_notifier(struct notifier_block *nb) { struct blocking_notifier_head *chain = &switchdev_blocking_notif_chain; return blocking_notifier_chain_register(chain, nb); } EXPORT_SYMBOL_GPL(register_switchdev_blocking_notifier); int unregister_switchdev_blocking_notifier(struct notifier_block *nb) { struct blocking_notifier_head *chain = &switchdev_blocking_notif_chain; return blocking_notifier_chain_unregister(chain, nb); } EXPORT_SYMBOL_GPL(unregister_switchdev_blocking_notifier); int call_switchdev_blocking_notifiers(unsigned long val, struct net_device *dev, struct switchdev_notifier_info *info, struct netlink_ext_ack *extack) { info->dev = dev; info->extack = extack; return blocking_notifier_call_chain(&switchdev_blocking_notif_chain, val, info); } EXPORT_SYMBOL_GPL(call_switchdev_blocking_notifiers); struct switchdev_nested_priv { bool (*check_cb)(const struct net_device *dev); bool (*foreign_dev_check_cb)(const struct net_device *dev, const struct net_device *foreign_dev); const struct net_device *dev; struct net_device *lower_dev; }; static int switchdev_lower_dev_walk(struct net_device *lower_dev, struct netdev_nested_priv *priv) { struct switchdev_nested_priv *switchdev_priv = priv->data; bool (*foreign_dev_check_cb)(const struct net_device *dev, const struct net_device *foreign_dev); bool (*check_cb)(const struct net_device *dev); const struct net_device *dev; check_cb = switchdev_priv->check_cb; foreign_dev_check_cb = switchdev_priv->foreign_dev_check_cb; dev = switchdev_priv->dev; if (check_cb(lower_dev) && !foreign_dev_check_cb(lower_dev, dev)) { switchdev_priv->lower_dev = lower_dev; return 1; } return 0; } static struct net_device * switchdev_lower_dev_find_rcu(struct net_device *dev, bool (*check_cb)(const struct net_device *dev), bool (*foreign_dev_check_cb)(const struct net_device *dev, const struct net_device *foreign_dev)) { struct switchdev_nested_priv switchdev_priv = { .check_cb = check_cb, .foreign_dev_check_cb = foreign_dev_check_cb, .dev = dev, .lower_dev = NULL, }; struct netdev_nested_priv priv = { .data = &switchdev_priv, }; netdev_walk_all_lower_dev_rcu(dev, switchdev_lower_dev_walk, &priv); return switchdev_priv.lower_dev; } static struct net_device * switchdev_lower_dev_find(struct net_device *dev, bool (*check_cb)(const struct net_device *dev), bool (*foreign_dev_check_cb)(const struct net_device *dev, const struct net_device *foreign_dev)) { struct switchdev_nested_priv switchdev_priv = { .check_cb = check_cb, .foreign_dev_check_cb = foreign_dev_check_cb, .dev = dev, .lower_dev = NULL, }; struct netdev_nested_priv priv = { .data = &switchdev_priv, }; netdev_walk_all_lower_dev(dev, switchdev_lower_dev_walk, &priv); return switchdev_priv.lower_dev; } static int __switchdev_handle_fdb_event_to_device(struct net_device *dev, struct net_device *orig_dev, unsigned long event, const struct switchdev_notifier_fdb_info *fdb_info, bool (*check_cb)(const struct net_device *dev), bool (*foreign_dev_check_cb)(const struct net_device *dev, const struct net_device *foreign_dev), int (*mod_cb)(struct net_device *dev, struct net_device *orig_dev, unsigned long event, const void *ctx, const struct switchdev_notifier_fdb_info *fdb_info)) { const struct switchdev_notifier_info *info = &fdb_info->info; struct net_device *br, *lower_dev, *switchdev; struct list_head *iter; int err = -EOPNOTSUPP; if (check_cb(dev)) return mod_cb(dev, orig_dev, event, info->ctx, fdb_info); /* Recurse through lower interfaces in case the FDB entry is pointing * towards a bridge or a LAG device. */ netdev_for_each_lower_dev(dev, lower_dev, iter) { /* Do not propagate FDB entries across bridges */ if (netif_is_bridge_master(lower_dev)) continue; /* Bridge ports might be either us, or LAG interfaces * that we offload. */ if (!check_cb(lower_dev) && !switchdev_lower_dev_find_rcu(lower_dev, check_cb, foreign_dev_check_cb)) continue; err = __switchdev_handle_fdb_event_to_device(lower_dev, orig_dev, event, fdb_info, check_cb, foreign_dev_check_cb, mod_cb); if (err && err != -EOPNOTSUPP) return err; } /* Event is neither on a bridge nor a LAG. Check whether it is on an * interface that is in a bridge with us. */ br = netdev_master_upper_dev_get_rcu(dev); if (!br || !netif_is_bridge_master(br)) return 0; switchdev = switchdev_lower_dev_find_rcu(br, check_cb, foreign_dev_check_cb); if (!switchdev) return 0; if (!foreign_dev_check_cb(switchdev, dev)) return err; return __switchdev_handle_fdb_event_to_device(br, orig_dev, event, fdb_info, check_cb, foreign_dev_check_cb, mod_cb); } int switchdev_handle_fdb_event_to_device(struct net_device *dev, unsigned long event, const struct switchdev_notifier_fdb_info *fdb_info, bool (*check_cb)(const struct net_device *dev), bool (*foreign_dev_check_cb)(const struct net_device *dev, const struct net_device *foreign_dev), int (*mod_cb)(struct net_device *dev, struct net_device *orig_dev, unsigned long event, const void *ctx, const struct switchdev_notifier_fdb_info *fdb_info)) { int err; err = __switchdev_handle_fdb_event_to_device(dev, dev, event, fdb_info, check_cb, foreign_dev_check_cb, mod_cb); if (err == -EOPNOTSUPP) err = 0; return err; } EXPORT_SYMBOL_GPL(switchdev_handle_fdb_event_to_device); static int __switchdev_handle_port_obj_add(struct net_device *dev, struct switchdev_notifier_port_obj_info *port_obj_info, bool (*check_cb)(const struct net_device *dev), bool (*foreign_dev_check_cb)(const struct net_device *dev, const struct net_device *foreign_dev), int (*add_cb)(struct net_device *dev, const void *ctx, const struct switchdev_obj *obj, struct netlink_ext_ack *extack)) { struct switchdev_notifier_info *info = &port_obj_info->info; struct net_device *br, *lower_dev, *switchdev; struct netlink_ext_ack *extack; struct list_head *iter; int err = -EOPNOTSUPP; extack = switchdev_notifier_info_to_extack(info); if (check_cb(dev)) { err = add_cb(dev, info->ctx, port_obj_info->obj, extack); if (err != -EOPNOTSUPP) port_obj_info->handled = true; return err; } /* Switch ports might be stacked under e.g. a LAG. Ignore the * unsupported devices, another driver might be able to handle them. But * propagate to the callers any hard errors. * * If the driver does its own bookkeeping of stacked ports, it's not * necessary to go through this helper. */ netdev_for_each_lower_dev(dev, lower_dev, iter) { if (netif_is_bridge_master(lower_dev)) continue; /* When searching for switchdev interfaces that are neighbors * of foreign ones, and @dev is a bridge, do not recurse on the * foreign interface again, it was already visited. */ if (foreign_dev_check_cb && !check_cb(lower_dev) && !switchdev_lower_dev_find(lower_dev, check_cb, foreign_dev_check_cb)) continue; err = __switchdev_handle_port_obj_add(lower_dev, port_obj_info, check_cb, foreign_dev_check_cb, add_cb); if (err && err != -EOPNOTSUPP) return err; } /* Event is neither on a bridge nor a LAG. Check whether it is on an * interface that is in a bridge with us. */ if (!foreign_dev_check_cb) return err; br = netdev_master_upper_dev_get(dev); if (!br || !netif_is_bridge_master(br)) return err; switchdev = switchdev_lower_dev_find(br, check_cb, foreign_dev_check_cb); if (!switchdev) return err; if (!foreign_dev_check_cb(switchdev, dev)) return err; return __switchdev_handle_port_obj_add(br, port_obj_info, check_cb, foreign_dev_check_cb, add_cb); } /* Pass through a port object addition, if @dev passes @check_cb, or replicate * it towards all lower interfaces of @dev that pass @check_cb, if @dev is a * bridge or a LAG. */ int switchdev_handle_port_obj_add(struct net_device *dev, struct switchdev_notifier_port_obj_info *port_obj_info, bool (*check_cb)(const struct net_device *dev), int (*add_cb)(struct net_device *dev, const void *ctx, const struct switchdev_obj *obj, struct netlink_ext_ack *extack)) { int err; err = __switchdev_handle_port_obj_add(dev, port_obj_info, check_cb, NULL, add_cb); if (err == -EOPNOTSUPP) err = 0; return err; } EXPORT_SYMBOL_GPL(switchdev_handle_port_obj_add); /* Same as switchdev_handle_port_obj_add(), except if object is notified on a * @dev that passes @foreign_dev_check_cb, it is replicated towards all devices * that pass @check_cb and are in the same bridge as @dev. */ int switchdev_handle_port_obj_add_foreign(struct net_device *dev, struct switchdev_notifier_port_obj_info *port_obj_info, bool (*check_cb)(const struct net_device *dev), bool (*foreign_dev_check_cb)(const struct net_device *dev, const struct net_device *foreign_dev), int (*add_cb)(struct net_device *dev, const void *ctx, const struct switchdev_obj *obj, struct netlink_ext_ack *extack)) { int err; err = __switchdev_handle_port_obj_add(dev, port_obj_info, check_cb, foreign_dev_check_cb, add_cb); if (err == -EOPNOTSUPP) err = 0; return err; } EXPORT_SYMBOL_GPL(switchdev_handle_port_obj_add_foreign); static int __switchdev_handle_port_obj_del(struct net_device *dev, struct switchdev_notifier_port_obj_info *port_obj_info, bool (*check_cb)(const struct net_device *dev), bool (*foreign_dev_check_cb)(const struct net_device *dev, const struct net_device *foreign_dev), int (*del_cb)(struct net_device *dev, const void *ctx, const struct switchdev_obj *obj)) { struct switchdev_notifier_info *info = &port_obj_info->info; struct net_device *br, *lower_dev, *switchdev; struct list_head *iter; int err = -EOPNOTSUPP; if (check_cb(dev)) { err = del_cb(dev, info->ctx, port_obj_info->obj); if (err != -EOPNOTSUPP) port_obj_info->handled = true; return err; } /* Switch ports might be stacked under e.g. a LAG. Ignore the * unsupported devices, another driver might be able to handle them. But * propagate to the callers any hard errors. * * If the driver does its own bookkeeping of stacked ports, it's not * necessary to go through this helper. */ netdev_for_each_lower_dev(dev, lower_dev, iter) { if (netif_is_bridge_master(lower_dev)) continue; /* When searching for switchdev interfaces that are neighbors * of foreign ones, and @dev is a bridge, do not recurse on the * foreign interface again, it was already visited. */ if (foreign_dev_check_cb && !check_cb(lower_dev) && !switchdev_lower_dev_find(lower_dev, check_cb, foreign_dev_check_cb)) continue; err = __switchdev_handle_port_obj_del(lower_dev, port_obj_info, check_cb, foreign_dev_check_cb, del_cb); if (err && err != -EOPNOTSUPP) return err; } /* Event is neither on a bridge nor a LAG. Check whether it is on an * interface that is in a bridge with us. */ if (!foreign_dev_check_cb) return err; br = netdev_master_upper_dev_get(dev); if (!br || !netif_is_bridge_master(br)) return err; switchdev = switchdev_lower_dev_find(br, check_cb, foreign_dev_check_cb); if (!switchdev) return err; if (!foreign_dev_check_cb(switchdev, dev)) return err; return __switchdev_handle_port_obj_del(br, port_obj_info, check_cb, foreign_dev_check_cb, del_cb); } /* Pass through a port object deletion, if @dev passes @check_cb, or replicate * it towards all lower interfaces of @dev that pass @check_cb, if @dev is a * bridge or a LAG. */ int switchdev_handle_port_obj_del(struct net_device *dev, struct switchdev_notifier_port_obj_info *port_obj_info, bool (*check_cb)(const struct net_device *dev), int (*del_cb)(struct net_device *dev, const void *ctx, const struct switchdev_obj *obj)) { int err; err = __switchdev_handle_port_obj_del(dev, port_obj_info, check_cb, NULL, del_cb); if (err == -EOPNOTSUPP) err = 0; return err; } EXPORT_SYMBOL_GPL(switchdev_handle_port_obj_del); /* Same as switchdev_handle_port_obj_del(), except if object is notified on a * @dev that passes @foreign_dev_check_cb, it is replicated towards all devices * that pass @check_cb and are in the same bridge as @dev. */ int switchdev_handle_port_obj_del_foreign(struct net_device *dev, struct switchdev_notifier_port_obj_info *port_obj_info, bool (*check_cb)(const struct net_device *dev), bool (*foreign_dev_check_cb)(const struct net_device *dev, const struct net_device *foreign_dev), int (*del_cb)(struct net_device *dev, const void *ctx, const struct switchdev_obj *obj)) { int err; err = __switchdev_handle_port_obj_del(dev, port_obj_info, check_cb, foreign_dev_check_cb, del_cb); if (err == -EOPNOTSUPP) err = 0; return err; } EXPORT_SYMBOL_GPL(switchdev_handle_port_obj_del_foreign); static int __switchdev_handle_port_attr_set(struct net_device *dev, struct switchdev_notifier_port_attr_info *port_attr_info, bool (*check_cb)(const struct net_device *dev), int (*set_cb)(struct net_device *dev, const void *ctx, const struct switchdev_attr *attr, struct netlink_ext_ack *extack)) { struct switchdev_notifier_info *info = &port_attr_info->info; struct netlink_ext_ack *extack; struct net_device *lower_dev; struct list_head *iter; int err = -EOPNOTSUPP; extack = switchdev_notifier_info_to_extack(info); if (check_cb(dev)) { err = set_cb(dev, info->ctx, port_attr_info->attr, extack); if (err != -EOPNOTSUPP) port_attr_info->handled = true; return err; } /* Switch ports might be stacked under e.g. a LAG. Ignore the * unsupported devices, another driver might be able to handle them. But * propagate to the callers any hard errors. * * If the driver does its own bookkeeping of stacked ports, it's not * necessary to go through this helper. */ netdev_for_each_lower_dev(dev, lower_dev, iter) { if (netif_is_bridge_master(lower_dev)) continue; err = __switchdev_handle_port_attr_set(lower_dev, port_attr_info, check_cb, set_cb); if (err && err != -EOPNOTSUPP) return err; } return err; } int switchdev_handle_port_attr_set(struct net_device *dev, struct switchdev_notifier_port_attr_info *port_attr_info, bool (*check_cb)(const struct net_device *dev), int (*set_cb)(struct net_device *dev, const void *ctx, const struct switchdev_attr *attr, struct netlink_ext_ack *extack)) { int err; err = __switchdev_handle_port_attr_set(dev, port_attr_info, check_cb, set_cb); if (err == -EOPNOTSUPP) err = 0; return err; } EXPORT_SYMBOL_GPL(switchdev_handle_port_attr_set); int switchdev_bridge_port_offload(struct net_device *brport_dev, struct net_device *dev, const void *ctx, struct notifier_block *atomic_nb, struct notifier_block *blocking_nb, bool tx_fwd_offload, struct netlink_ext_ack *extack) { struct switchdev_notifier_brport_info brport_info = { .brport = { .dev = dev, .ctx = ctx, .atomic_nb = atomic_nb, .blocking_nb = blocking_nb, .tx_fwd_offload = tx_fwd_offload, }, }; int err; ASSERT_RTNL(); err = call_switchdev_blocking_notifiers(SWITCHDEV_BRPORT_OFFLOADED, brport_dev, &brport_info.info, extack); return notifier_to_errno(err); } EXPORT_SYMBOL_GPL(switchdev_bridge_port_offload); void switchdev_bridge_port_unoffload(struct net_device *brport_dev, const void *ctx, struct notifier_block *atomic_nb, struct notifier_block *blocking_nb) { struct switchdev_notifier_brport_info brport_info = { .brport = { .ctx = ctx, .atomic_nb = atomic_nb, .blocking_nb = blocking_nb, }, }; ASSERT_RTNL(); call_switchdev_blocking_notifiers(SWITCHDEV_BRPORT_UNOFFLOADED, brport_dev, &brport_info.info, NULL); } EXPORT_SYMBOL_GPL(switchdev_bridge_port_unoffload); int switchdev_bridge_port_replay(struct net_device *brport_dev, struct net_device *dev, const void *ctx, struct notifier_block *atomic_nb, struct notifier_block *blocking_nb, struct netlink_ext_ack *extack) { struct switchdev_notifier_brport_info brport_info = { .brport = { .dev = dev, .ctx = ctx, .atomic_nb = atomic_nb, .blocking_nb = blocking_nb, }, }; int err; ASSERT_RTNL(); err = call_switchdev_blocking_notifiers(SWITCHDEV_BRPORT_REPLAY, brport_dev, &brport_info.info, extack); return notifier_to_errno(err); } EXPORT_SYMBOL_GPL(switchdev_bridge_port_replay);
linux-master
net/switchdev/switchdev.c
// SPDX-License-Identifier: GPL-2.0 /* * Shared Memory Communications over RDMA (SMC-R) and RoCE * * smc_sysctl.c: sysctl interface to SMC subsystem. * * Copyright (c) 2022, Alibaba Inc. * * Author: Tony Lu <[email protected]> * */ #include <linux/init.h> #include <linux/sysctl.h> #include <net/net_namespace.h> #include "smc.h" #include "smc_core.h" #include "smc_llc.h" #include "smc_sysctl.h" static int min_sndbuf = SMC_BUF_MIN_SIZE; static int min_rcvbuf = SMC_BUF_MIN_SIZE; static int max_sndbuf = INT_MAX / 2; static int max_rcvbuf = INT_MAX / 2; static const int net_smc_wmem_init = (64 * 1024); static const int net_smc_rmem_init = (64 * 1024); static struct ctl_table smc_table[] = { { .procname = "autocorking_size", .data = &init_net.smc.sysctl_autocorking_size, .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = proc_douintvec, }, { .procname = "smcr_buf_type", .data = &init_net.smc.sysctl_smcr_buf_type, .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = proc_douintvec_minmax, .extra1 = SYSCTL_ZERO, .extra2 = SYSCTL_TWO, }, { .procname = "smcr_testlink_time", .data = &init_net.smc.sysctl_smcr_testlink_time, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_jiffies, }, { .procname = "wmem", .data = &init_net.smc.sysctl_wmem, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = &min_sndbuf, .extra2 = &max_sndbuf, }, { .procname = "rmem", .data = &init_net.smc.sysctl_rmem, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = &min_rcvbuf, .extra2 = &max_rcvbuf, }, { } }; int __net_init smc_sysctl_net_init(struct net *net) { struct ctl_table *table; table = smc_table; if (!net_eq(net, &init_net)) { int i; table = kmemdup(table, sizeof(smc_table), GFP_KERNEL); if (!table) goto err_alloc; for (i = 0; i < ARRAY_SIZE(smc_table) - 1; i++) table[i].data += (void *)net - (void *)&init_net; } net->smc.smc_hdr = register_net_sysctl_sz(net, "net/smc", table, ARRAY_SIZE(smc_table)); if (!net->smc.smc_hdr) goto err_reg; net->smc.sysctl_autocorking_size = SMC_AUTOCORKING_DEFAULT_SIZE; net->smc.sysctl_smcr_buf_type = SMCR_PHYS_CONT_BUFS; net->smc.sysctl_smcr_testlink_time = SMC_LLC_TESTLINK_DEFAULT_TIME; WRITE_ONCE(net->smc.sysctl_wmem, net_smc_wmem_init); WRITE_ONCE(net->smc.sysctl_rmem, net_smc_rmem_init); return 0; err_reg: if (!net_eq(net, &init_net)) kfree(table); err_alloc: return -ENOMEM; } void __net_exit smc_sysctl_net_exit(struct net *net) { struct ctl_table *table; table = net->smc.smc_hdr->ctl_table_arg; unregister_net_sysctl_table(net->smc.smc_hdr); if (!net_eq(net, &init_net)) kfree(table); }
linux-master
net/smc/smc_sysctl.c
// SPDX-License-Identifier: GPL-2.0 /* * Shared Memory Communications over RDMA (SMC-R) and RoCE * * Manage send buffer. * Producer: * Copy user space data into send buffer, if send buffer space available. * Consumer: * Trigger RDMA write into RMBE of peer and send CDC, if RMBE space available. * * Copyright IBM Corp. 2016 * * Author(s): Ursula Braun <[email protected]> */ #include <linux/net.h> #include <linux/rcupdate.h> #include <linux/workqueue.h> #include <linux/sched/signal.h> #include <net/sock.h> #include <net/tcp.h> #include "smc.h" #include "smc_wr.h" #include "smc_cdc.h" #include "smc_close.h" #include "smc_ism.h" #include "smc_tx.h" #include "smc_stats.h" #include "smc_tracepoint.h" #define SMC_TX_WORK_DELAY 0 /***************************** sndbuf producer *******************************/ /* callback implementation for sk.sk_write_space() * to wakeup sndbuf producers that blocked with smc_tx_wait(). * called under sk_socket lock. */ static void smc_tx_write_space(struct sock *sk) { struct socket *sock = sk->sk_socket; struct smc_sock *smc = smc_sk(sk); struct socket_wq *wq; /* similar to sk_stream_write_space */ if (atomic_read(&smc->conn.sndbuf_space) && sock) { if (test_bit(SOCK_NOSPACE, &sock->flags)) SMC_STAT_RMB_TX_FULL(smc, !smc->conn.lnk); clear_bit(SOCK_NOSPACE, &sock->flags); rcu_read_lock(); wq = rcu_dereference(sk->sk_wq); if (skwq_has_sleeper(wq)) wake_up_interruptible_poll(&wq->wait, EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND); if (wq && wq->fasync_list && !(sk->sk_shutdown & SEND_SHUTDOWN)) sock_wake_async(wq, SOCK_WAKE_SPACE, POLL_OUT); rcu_read_unlock(); } } /* Wakeup sndbuf producers that blocked with smc_tx_wait(). * Cf. tcp_data_snd_check()=>tcp_check_space()=>tcp_new_space(). */ void smc_tx_sndbuf_nonfull(struct smc_sock *smc) { if (smc->sk.sk_socket && test_bit(SOCK_NOSPACE, &smc->sk.sk_socket->flags)) smc->sk.sk_write_space(&smc->sk); } /* blocks sndbuf producer until at least one byte of free space available * or urgent Byte was consumed */ static int smc_tx_wait(struct smc_sock *smc, int flags) { DEFINE_WAIT_FUNC(wait, woken_wake_function); struct smc_connection *conn = &smc->conn; struct sock *sk = &smc->sk; long timeo; int rc = 0; /* similar to sk_stream_wait_memory */ timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT); add_wait_queue(sk_sleep(sk), &wait); while (1) { sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk); if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN) || conn->killed || conn->local_tx_ctrl.conn_state_flags.peer_done_writing) { rc = -EPIPE; break; } if (smc_cdc_rxed_any_close(conn)) { rc = -ECONNRESET; break; } if (!timeo) { /* ensure EPOLLOUT is subsequently generated */ set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); rc = -EAGAIN; break; } if (signal_pending(current)) { rc = sock_intr_errno(timeo); break; } sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk); if (atomic_read(&conn->sndbuf_space) && !conn->urg_tx_pend) break; /* at least 1 byte of free & no urgent data */ set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); sk_wait_event(sk, &timeo, READ_ONCE(sk->sk_err) || (READ_ONCE(sk->sk_shutdown) & SEND_SHUTDOWN) || smc_cdc_rxed_any_close(conn) || (atomic_read(&conn->sndbuf_space) && !conn->urg_tx_pend), &wait); } remove_wait_queue(sk_sleep(sk), &wait); return rc; } static bool smc_tx_is_corked(struct smc_sock *smc) { struct tcp_sock *tp = tcp_sk(smc->clcsock->sk); return (tp->nonagle & TCP_NAGLE_CORK) ? true : false; } /* If we have pending CDC messages, do not send: * Because CQE of this CDC message will happen shortly, it gives * a chance to coalesce future sendmsg() payload in to one RDMA Write, * without need for a timer, and with no latency trade off. * Algorithm here: * 1. First message should never cork * 2. If we have pending Tx CDC messages, wait for the first CDC * message's completion * 3. Don't cork to much data in a single RDMA Write to prevent burst * traffic, total corked message should not exceed sendbuf/2 */ static bool smc_should_autocork(struct smc_sock *smc) { struct smc_connection *conn = &smc->conn; int corking_size; corking_size = min_t(unsigned int, conn->sndbuf_desc->len >> 1, sock_net(&smc->sk)->smc.sysctl_autocorking_size); if (atomic_read(&conn->cdc_pend_tx_wr) == 0 || smc_tx_prepared_sends(conn) > corking_size) return false; return true; } static bool smc_tx_should_cork(struct smc_sock *smc, struct msghdr *msg) { struct smc_connection *conn = &smc->conn; if (smc_should_autocork(smc)) return true; /* for a corked socket defer the RDMA writes if * sndbuf_space is still available. The applications * should known how/when to uncork it. */ if ((msg->msg_flags & MSG_MORE || smc_tx_is_corked(smc)) && atomic_read(&conn->sndbuf_space)) return true; return false; } /* sndbuf producer: main API called by socket layer. * called under sock lock. */ int smc_tx_sendmsg(struct smc_sock *smc, struct msghdr *msg, size_t len) { size_t copylen, send_done = 0, send_remaining = len; size_t chunk_len, chunk_off, chunk_len_sum; struct smc_connection *conn = &smc->conn; union smc_host_cursor prep; struct sock *sk = &smc->sk; char *sndbuf_base; int tx_cnt_prep; int writespace; int rc, chunk; /* This should be in poll */ sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk); if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN)) { rc = -EPIPE; goto out_err; } if (sk->sk_state == SMC_INIT) return -ENOTCONN; if (len > conn->sndbuf_desc->len) SMC_STAT_RMB_TX_SIZE_SMALL(smc, !conn->lnk); if (len > conn->peer_rmbe_size) SMC_STAT_RMB_TX_PEER_SIZE_SMALL(smc, !conn->lnk); if (msg->msg_flags & MSG_OOB) SMC_STAT_INC(smc, urg_data_cnt); while (msg_data_left(msg)) { if (smc->sk.sk_shutdown & SEND_SHUTDOWN || (smc->sk.sk_err == ECONNABORTED) || conn->killed) return -EPIPE; if (smc_cdc_rxed_any_close(conn)) return send_done ?: -ECONNRESET; if (msg->msg_flags & MSG_OOB) conn->local_tx_ctrl.prod_flags.urg_data_pending = 1; if (!atomic_read(&conn->sndbuf_space) || conn->urg_tx_pend) { if (send_done) return send_done; rc = smc_tx_wait(smc, msg->msg_flags); if (rc) goto out_err; continue; } /* initialize variables for 1st iteration of subsequent loop */ /* could be just 1 byte, even after smc_tx_wait above */ writespace = atomic_read(&conn->sndbuf_space); /* not more than what user space asked for */ copylen = min_t(size_t, send_remaining, writespace); /* determine start of sndbuf */ sndbuf_base = conn->sndbuf_desc->cpu_addr; smc_curs_copy(&prep, &conn->tx_curs_prep, conn); tx_cnt_prep = prep.count; /* determine chunks where to write into sndbuf */ /* either unwrapped case, or 1st chunk of wrapped case */ chunk_len = min_t(size_t, copylen, conn->sndbuf_desc->len - tx_cnt_prep); chunk_len_sum = chunk_len; chunk_off = tx_cnt_prep; for (chunk = 0; chunk < 2; chunk++) { rc = memcpy_from_msg(sndbuf_base + chunk_off, msg, chunk_len); if (rc) { smc_sndbuf_sync_sg_for_device(conn); if (send_done) return send_done; goto out_err; } send_done += chunk_len; send_remaining -= chunk_len; if (chunk_len_sum == copylen) break; /* either on 1st or 2nd iteration */ /* prepare next (== 2nd) iteration */ chunk_len = copylen - chunk_len; /* remainder */ chunk_len_sum += chunk_len; chunk_off = 0; /* modulo offset in send ring buffer */ } smc_sndbuf_sync_sg_for_device(conn); /* update cursors */ smc_curs_add(conn->sndbuf_desc->len, &prep, copylen); smc_curs_copy(&conn->tx_curs_prep, &prep, conn); /* increased in send tasklet smc_cdc_tx_handler() */ smp_mb__before_atomic(); atomic_sub(copylen, &conn->sndbuf_space); /* guarantee 0 <= sndbuf_space <= sndbuf_desc->len */ smp_mb__after_atomic(); /* since we just produced more new data into sndbuf, * trigger sndbuf consumer: RDMA write into peer RMBE and CDC */ if ((msg->msg_flags & MSG_OOB) && !send_remaining) conn->urg_tx_pend = true; /* If we need to cork, do nothing and wait for the next * sendmsg() call or push on tx completion */ if (!smc_tx_should_cork(smc, msg)) smc_tx_sndbuf_nonempty(conn); trace_smc_tx_sendmsg(smc, copylen); } /* while (msg_data_left(msg)) */ return send_done; out_err: rc = sk_stream_error(sk, msg->msg_flags, rc); /* make sure we wake any epoll edge trigger waiter */ if (unlikely(rc == -EAGAIN)) sk->sk_write_space(sk); return rc; } /***************************** sndbuf consumer *******************************/ /* sndbuf consumer: actual data transfer of one target chunk with ISM write */ int smcd_tx_ism_write(struct smc_connection *conn, void *data, size_t len, u32 offset, int signal) { int rc; rc = smc_ism_write(conn->lgr->smcd, conn->peer_token, conn->peer_rmbe_idx, signal, conn->tx_off + offset, data, len); if (rc) conn->local_tx_ctrl.conn_state_flags.peer_conn_abort = 1; return rc; } /* sndbuf consumer: actual data transfer of one target chunk with RDMA write */ static int smc_tx_rdma_write(struct smc_connection *conn, int peer_rmbe_offset, int num_sges, struct ib_rdma_wr *rdma_wr) { struct smc_link_group *lgr = conn->lgr; struct smc_link *link = conn->lnk; int rc; rdma_wr->wr.wr_id = smc_wr_tx_get_next_wr_id(link); rdma_wr->wr.num_sge = num_sges; rdma_wr->remote_addr = lgr->rtokens[conn->rtoken_idx][link->link_idx].dma_addr + /* RMBE within RMB */ conn->tx_off + /* offset within RMBE */ peer_rmbe_offset; rdma_wr->rkey = lgr->rtokens[conn->rtoken_idx][link->link_idx].rkey; rc = ib_post_send(link->roce_qp, &rdma_wr->wr, NULL); if (rc) smcr_link_down_cond_sched(link); return rc; } /* sndbuf consumer */ static inline void smc_tx_advance_cursors(struct smc_connection *conn, union smc_host_cursor *prod, union smc_host_cursor *sent, size_t len) { smc_curs_add(conn->peer_rmbe_size, prod, len); /* increased in recv tasklet smc_cdc_msg_rcv() */ smp_mb__before_atomic(); /* data in flight reduces usable snd_wnd */ atomic_sub(len, &conn->peer_rmbe_space); /* guarantee 0 <= peer_rmbe_space <= peer_rmbe_size */ smp_mb__after_atomic(); smc_curs_add(conn->sndbuf_desc->len, sent, len); } /* SMC-R helper for smc_tx_rdma_writes() */ static int smcr_tx_rdma_writes(struct smc_connection *conn, size_t len, size_t src_off, size_t src_len, size_t dst_off, size_t dst_len, struct smc_rdma_wr *wr_rdma_buf) { struct smc_link *link = conn->lnk; dma_addr_t dma_addr = sg_dma_address(conn->sndbuf_desc->sgt[link->link_idx].sgl); u64 virt_addr = (uintptr_t)conn->sndbuf_desc->cpu_addr; int src_len_sum = src_len, dst_len_sum = dst_len; int sent_count = src_off; int srcchunk, dstchunk; int num_sges; int rc; for (dstchunk = 0; dstchunk < 2; dstchunk++) { struct ib_rdma_wr *wr = &wr_rdma_buf->wr_tx_rdma[dstchunk]; struct ib_sge *sge = wr->wr.sg_list; u64 base_addr = dma_addr; if (dst_len < link->qp_attr.cap.max_inline_data) { base_addr = virt_addr; wr->wr.send_flags |= IB_SEND_INLINE; } else { wr->wr.send_flags &= ~IB_SEND_INLINE; } num_sges = 0; for (srcchunk = 0; srcchunk < 2; srcchunk++) { sge[srcchunk].addr = conn->sndbuf_desc->is_vm ? (virt_addr + src_off) : (base_addr + src_off); sge[srcchunk].length = src_len; if (conn->sndbuf_desc->is_vm) sge[srcchunk].lkey = conn->sndbuf_desc->mr[link->link_idx]->lkey; num_sges++; src_off += src_len; if (src_off >= conn->sndbuf_desc->len) src_off -= conn->sndbuf_desc->len; /* modulo in send ring */ if (src_len_sum == dst_len) break; /* either on 1st or 2nd iteration */ /* prepare next (== 2nd) iteration */ src_len = dst_len - src_len; /* remainder */ src_len_sum += src_len; } rc = smc_tx_rdma_write(conn, dst_off, num_sges, wr); if (rc) return rc; if (dst_len_sum == len) break; /* either on 1st or 2nd iteration */ /* prepare next (== 2nd) iteration */ dst_off = 0; /* modulo offset in RMBE ring buffer */ dst_len = len - dst_len; /* remainder */ dst_len_sum += dst_len; src_len = min_t(int, dst_len, conn->sndbuf_desc->len - sent_count); src_len_sum = src_len; } return 0; } /* SMC-D helper for smc_tx_rdma_writes() */ static int smcd_tx_rdma_writes(struct smc_connection *conn, size_t len, size_t src_off, size_t src_len, size_t dst_off, size_t dst_len) { int src_len_sum = src_len, dst_len_sum = dst_len; int srcchunk, dstchunk; int rc; for (dstchunk = 0; dstchunk < 2; dstchunk++) { for (srcchunk = 0; srcchunk < 2; srcchunk++) { void *data = conn->sndbuf_desc->cpu_addr + src_off; rc = smcd_tx_ism_write(conn, data, src_len, dst_off + sizeof(struct smcd_cdc_msg), 0); if (rc) return rc; dst_off += src_len; src_off += src_len; if (src_off >= conn->sndbuf_desc->len) src_off -= conn->sndbuf_desc->len; /* modulo in send ring */ if (src_len_sum == dst_len) break; /* either on 1st or 2nd iteration */ /* prepare next (== 2nd) iteration */ src_len = dst_len - src_len; /* remainder */ src_len_sum += src_len; } if (dst_len_sum == len) break; /* either on 1st or 2nd iteration */ /* prepare next (== 2nd) iteration */ dst_off = 0; /* modulo offset in RMBE ring buffer */ dst_len = len - dst_len; /* remainder */ dst_len_sum += dst_len; src_len = min_t(int, dst_len, conn->sndbuf_desc->len - src_off); src_len_sum = src_len; } return 0; } /* sndbuf consumer: prepare all necessary (src&dst) chunks of data transmit; * usable snd_wnd as max transmit */ static int smc_tx_rdma_writes(struct smc_connection *conn, struct smc_rdma_wr *wr_rdma_buf) { size_t len, src_len, dst_off, dst_len; /* current chunk values */ union smc_host_cursor sent, prep, prod, cons; struct smc_cdc_producer_flags *pflags; int to_send, rmbespace; int rc; /* source: sndbuf */ smc_curs_copy(&sent, &conn->tx_curs_sent, conn); smc_curs_copy(&prep, &conn->tx_curs_prep, conn); /* cf. wmem_alloc - (snd_max - snd_una) */ to_send = smc_curs_diff(conn->sndbuf_desc->len, &sent, &prep); if (to_send <= 0) return 0; /* destination: RMBE */ /* cf. snd_wnd */ rmbespace = atomic_read(&conn->peer_rmbe_space); if (rmbespace <= 0) { struct smc_sock *smc = container_of(conn, struct smc_sock, conn); SMC_STAT_RMB_TX_PEER_FULL(smc, !conn->lnk); return 0; } smc_curs_copy(&prod, &conn->local_tx_ctrl.prod, conn); smc_curs_copy(&cons, &conn->local_rx_ctrl.cons, conn); /* if usable snd_wnd closes ask peer to advertise once it opens again */ pflags = &conn->local_tx_ctrl.prod_flags; pflags->write_blocked = (to_send >= rmbespace); /* cf. usable snd_wnd */ len = min(to_send, rmbespace); /* initialize variables for first iteration of subsequent nested loop */ dst_off = prod.count; if (prod.wrap == cons.wrap) { /* the filled destination area is unwrapped, * hence the available free destination space is wrapped * and we need 2 destination chunks of sum len; start with 1st * which is limited by what's available in sndbuf */ dst_len = min_t(size_t, conn->peer_rmbe_size - prod.count, len); } else { /* the filled destination area is wrapped, * hence the available free destination space is unwrapped * and we need a single destination chunk of entire len */ dst_len = len; } /* dst_len determines the maximum src_len */ if (sent.count + dst_len <= conn->sndbuf_desc->len) { /* unwrapped src case: single chunk of entire dst_len */ src_len = dst_len; } else { /* wrapped src case: 2 chunks of sum dst_len; start with 1st: */ src_len = conn->sndbuf_desc->len - sent.count; } if (conn->lgr->is_smcd) rc = smcd_tx_rdma_writes(conn, len, sent.count, src_len, dst_off, dst_len); else rc = smcr_tx_rdma_writes(conn, len, sent.count, src_len, dst_off, dst_len, wr_rdma_buf); if (rc) return rc; if (conn->urg_tx_pend && len == to_send) pflags->urg_data_present = 1; smc_tx_advance_cursors(conn, &prod, &sent, len); /* update connection's cursors with advanced local cursors */ smc_curs_copy(&conn->local_tx_ctrl.prod, &prod, conn); /* dst: peer RMBE */ smc_curs_copy(&conn->tx_curs_sent, &sent, conn);/* src: local sndbuf */ return 0; } /* Wakeup sndbuf consumers from any context (IRQ or process) * since there is more data to transmit; usable snd_wnd as max transmit */ static int smcr_tx_sndbuf_nonempty(struct smc_connection *conn) { struct smc_cdc_producer_flags *pflags = &conn->local_tx_ctrl.prod_flags; struct smc_link *link = conn->lnk; struct smc_rdma_wr *wr_rdma_buf; struct smc_cdc_tx_pend *pend; struct smc_wr_buf *wr_buf; int rc; if (!link || !smc_wr_tx_link_hold(link)) return -ENOLINK; rc = smc_cdc_get_free_slot(conn, link, &wr_buf, &wr_rdma_buf, &pend); if (rc < 0) { smc_wr_tx_link_put(link); if (rc == -EBUSY) { struct smc_sock *smc = container_of(conn, struct smc_sock, conn); if (smc->sk.sk_err == ECONNABORTED) return sock_error(&smc->sk); if (conn->killed) return -EPIPE; rc = 0; mod_delayed_work(conn->lgr->tx_wq, &conn->tx_work, SMC_TX_WORK_DELAY); } return rc; } spin_lock_bh(&conn->send_lock); if (link != conn->lnk) { /* link of connection changed, tx_work will restart */ smc_wr_tx_put_slot(link, (struct smc_wr_tx_pend_priv *)pend); rc = -ENOLINK; goto out_unlock; } if (!pflags->urg_data_present) { rc = smc_tx_rdma_writes(conn, wr_rdma_buf); if (rc) { smc_wr_tx_put_slot(link, (struct smc_wr_tx_pend_priv *)pend); goto out_unlock; } } rc = smc_cdc_msg_send(conn, wr_buf, pend); if (!rc && pflags->urg_data_present) { pflags->urg_data_pending = 0; pflags->urg_data_present = 0; } out_unlock: spin_unlock_bh(&conn->send_lock); smc_wr_tx_link_put(link); return rc; } static int smcd_tx_sndbuf_nonempty(struct smc_connection *conn) { struct smc_cdc_producer_flags *pflags = &conn->local_tx_ctrl.prod_flags; int rc = 0; spin_lock_bh(&conn->send_lock); if (!pflags->urg_data_present) rc = smc_tx_rdma_writes(conn, NULL); if (!rc) rc = smcd_cdc_msg_send(conn); if (!rc && pflags->urg_data_present) { pflags->urg_data_pending = 0; pflags->urg_data_present = 0; } spin_unlock_bh(&conn->send_lock); return rc; } static int __smc_tx_sndbuf_nonempty(struct smc_connection *conn) { struct smc_sock *smc = container_of(conn, struct smc_sock, conn); int rc = 0; /* No data in the send queue */ if (unlikely(smc_tx_prepared_sends(conn) <= 0)) goto out; /* Peer don't have RMBE space */ if (unlikely(atomic_read(&conn->peer_rmbe_space) <= 0)) { SMC_STAT_RMB_TX_PEER_FULL(smc, !conn->lnk); goto out; } if (conn->killed || conn->local_rx_ctrl.conn_state_flags.peer_conn_abort) { rc = -EPIPE; /* connection being aborted */ goto out; } if (conn->lgr->is_smcd) rc = smcd_tx_sndbuf_nonempty(conn); else rc = smcr_tx_sndbuf_nonempty(conn); if (!rc) { /* trigger socket release if connection is closing */ smc_close_wake_tx_prepared(smc); } out: return rc; } int smc_tx_sndbuf_nonempty(struct smc_connection *conn) { int rc; /* This make sure only one can send simultaneously to prevent wasting * of CPU and CDC slot. * Record whether someone has tried to push while we are pushing. */ if (atomic_inc_return(&conn->tx_pushing) > 1) return 0; again: atomic_set(&conn->tx_pushing, 1); smp_wmb(); /* Make sure tx_pushing is 1 before real send */ rc = __smc_tx_sndbuf_nonempty(conn); /* We need to check whether someone else have added some data into * the send queue and tried to push but failed after the atomic_set() * when we are pushing. * If so, we need to push again to prevent those data hang in the send * queue. */ if (unlikely(!atomic_dec_and_test(&conn->tx_pushing))) goto again; return rc; } /* Wakeup sndbuf consumers from process context * since there is more data to transmit. The caller * must hold sock lock. */ void smc_tx_pending(struct smc_connection *conn) { struct smc_sock *smc = container_of(conn, struct smc_sock, conn); int rc; if (smc->sk.sk_err) return; rc = smc_tx_sndbuf_nonempty(conn); if (!rc && conn->local_rx_ctrl.prod_flags.write_blocked && !atomic_read(&conn->bytes_to_rcv)) conn->local_rx_ctrl.prod_flags.write_blocked = 0; } /* Wakeup sndbuf consumers from process context * since there is more data to transmit in locked * sock. */ void smc_tx_work(struct work_struct *work) { struct smc_connection *conn = container_of(to_delayed_work(work), struct smc_connection, tx_work); struct smc_sock *smc = container_of(conn, struct smc_sock, conn); lock_sock(&smc->sk); smc_tx_pending(conn); release_sock(&smc->sk); } void smc_tx_consumer_update(struct smc_connection *conn, bool force) { union smc_host_cursor cfed, cons, prod; int sender_free = conn->rmb_desc->len; int to_confirm; smc_curs_copy(&cons, &conn->local_tx_ctrl.cons, conn); smc_curs_copy(&cfed, &conn->rx_curs_confirmed, conn); to_confirm = smc_curs_diff(conn->rmb_desc->len, &cfed, &cons); if (to_confirm > conn->rmbe_update_limit) { smc_curs_copy(&prod, &conn->local_rx_ctrl.prod, conn); sender_free = conn->rmb_desc->len - smc_curs_diff_large(conn->rmb_desc->len, &cfed, &prod); } if (conn->local_rx_ctrl.prod_flags.cons_curs_upd_req || force || ((to_confirm > conn->rmbe_update_limit) && ((sender_free <= (conn->rmb_desc->len / 2)) || conn->local_rx_ctrl.prod_flags.write_blocked))) { if (conn->killed || conn->local_rx_ctrl.conn_state_flags.peer_conn_abort) return; if ((smc_cdc_get_slot_and_msg_send(conn) < 0) && !conn->killed) { queue_delayed_work(conn->lgr->tx_wq, &conn->tx_work, SMC_TX_WORK_DELAY); return; } } if (conn->local_rx_ctrl.prod_flags.write_blocked && !atomic_read(&conn->bytes_to_rcv)) conn->local_rx_ctrl.prod_flags.write_blocked = 0; } /***************************** send initialize *******************************/ /* Initialize send properties on connection establishment. NB: not __init! */ void smc_tx_init(struct smc_sock *smc) { smc->sk.sk_write_space = smc_tx_write_space; }
linux-master
net/smc/smc_tx.c
// SPDX-License-Identifier: GPL-2.0 /* * Shared Memory Communications over RDMA (SMC-R) and RoCE * * Generic netlink support functions to configure an SMC-R PNET table * * Copyright IBM Corp. 2016 * * Author(s): Thomas Richter <[email protected]> */ #include <linux/module.h> #include <linux/list.h> #include <linux/ctype.h> #include <linux/mutex.h> #include <net/netlink.h> #include <net/genetlink.h> #include <uapi/linux/if.h> #include <uapi/linux/smc.h> #include <rdma/ib_verbs.h> #include <net/netns/generic.h> #include "smc_netns.h" #include "smc_pnet.h" #include "smc_ib.h" #include "smc_ism.h" #include "smc_core.h" static struct net_device *__pnet_find_base_ndev(struct net_device *ndev); static struct net_device *pnet_find_base_ndev(struct net_device *ndev); static const struct nla_policy smc_pnet_policy[SMC_PNETID_MAX + 1] = { [SMC_PNETID_NAME] = { .type = NLA_NUL_STRING, .len = SMC_MAX_PNETID_LEN }, [SMC_PNETID_ETHNAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 }, [SMC_PNETID_IBNAME] = { .type = NLA_NUL_STRING, .len = IB_DEVICE_NAME_MAX - 1 }, [SMC_PNETID_IBPORT] = { .type = NLA_U8 } }; static struct genl_family smc_pnet_nl_family; enum smc_pnet_nametype { SMC_PNET_ETH = 1, SMC_PNET_IB = 2, }; /* pnet entry stored in pnet table */ struct smc_pnetentry { struct list_head list; char pnet_name[SMC_MAX_PNETID_LEN + 1]; enum smc_pnet_nametype type; union { struct { char eth_name[IFNAMSIZ + 1]; struct net_device *ndev; netdevice_tracker dev_tracker; }; struct { char ib_name[IB_DEVICE_NAME_MAX + 1]; u8 ib_port; }; }; }; /* Check if the pnetid is set */ bool smc_pnet_is_pnetid_set(u8 *pnetid) { if (pnetid[0] == 0 || pnetid[0] == _S) return false; return true; } /* Check if two given pnetids match */ static bool smc_pnet_match(u8 *pnetid1, u8 *pnetid2) { int i; for (i = 0; i < SMC_MAX_PNETID_LEN; i++) { if ((pnetid1[i] == 0 || pnetid1[i] == _S) && (pnetid2[i] == 0 || pnetid2[i] == _S)) break; if (pnetid1[i] != pnetid2[i]) return false; } return true; } /* Remove a pnetid from the pnet table. */ static int smc_pnet_remove_by_pnetid(struct net *net, char *pnet_name) { struct smc_pnetentry *pnetelem, *tmp_pe; struct smc_pnettable *pnettable; struct smc_ib_device *ibdev; struct smcd_dev *smcd; struct smc_net *sn; int rc = -ENOENT; int ibport; /* get pnettable for namespace */ sn = net_generic(net, smc_net_id); pnettable = &sn->pnettable; /* remove table entry */ mutex_lock(&pnettable->lock); list_for_each_entry_safe(pnetelem, tmp_pe, &pnettable->pnetlist, list) { if (!pnet_name || smc_pnet_match(pnetelem->pnet_name, pnet_name)) { list_del(&pnetelem->list); if (pnetelem->type == SMC_PNET_ETH && pnetelem->ndev) { netdev_put(pnetelem->ndev, &pnetelem->dev_tracker); pr_warn_ratelimited("smc: net device %s " "erased user defined " "pnetid %.16s\n", pnetelem->eth_name, pnetelem->pnet_name); } kfree(pnetelem); rc = 0; } } mutex_unlock(&pnettable->lock); /* if this is not the initial namespace, stop here */ if (net != &init_net) return rc; /* remove ib devices */ mutex_lock(&smc_ib_devices.mutex); list_for_each_entry(ibdev, &smc_ib_devices.list, list) { for (ibport = 0; ibport < SMC_MAX_PORTS; ibport++) { if (ibdev->pnetid_by_user[ibport] && (!pnet_name || smc_pnet_match(pnet_name, ibdev->pnetid[ibport]))) { pr_warn_ratelimited("smc: ib device %s ibport " "%d erased user defined " "pnetid %.16s\n", ibdev->ibdev->name, ibport + 1, ibdev->pnetid[ibport]); memset(ibdev->pnetid[ibport], 0, SMC_MAX_PNETID_LEN); ibdev->pnetid_by_user[ibport] = false; rc = 0; } } } mutex_unlock(&smc_ib_devices.mutex); /* remove smcd devices */ mutex_lock(&smcd_dev_list.mutex); list_for_each_entry(smcd, &smcd_dev_list.list, list) { if (smcd->pnetid_by_user && (!pnet_name || smc_pnet_match(pnet_name, smcd->pnetid))) { pr_warn_ratelimited("smc: smcd device %s " "erased user defined pnetid " "%.16s\n", dev_name(smcd->ops->get_dev(smcd)), smcd->pnetid); memset(smcd->pnetid, 0, SMC_MAX_PNETID_LEN); smcd->pnetid_by_user = false; rc = 0; } } mutex_unlock(&smcd_dev_list.mutex); return rc; } /* Add the reference to a given network device to the pnet table. */ static int smc_pnet_add_by_ndev(struct net_device *ndev) { struct smc_pnetentry *pnetelem, *tmp_pe; struct smc_pnettable *pnettable; struct net *net = dev_net(ndev); struct smc_net *sn; int rc = -ENOENT; /* get pnettable for namespace */ sn = net_generic(net, smc_net_id); pnettable = &sn->pnettable; mutex_lock(&pnettable->lock); list_for_each_entry_safe(pnetelem, tmp_pe, &pnettable->pnetlist, list) { if (pnetelem->type == SMC_PNET_ETH && !pnetelem->ndev && !strncmp(pnetelem->eth_name, ndev->name, IFNAMSIZ)) { netdev_hold(ndev, &pnetelem->dev_tracker, GFP_ATOMIC); pnetelem->ndev = ndev; rc = 0; pr_warn_ratelimited("smc: adding net device %s with " "user defined pnetid %.16s\n", pnetelem->eth_name, pnetelem->pnet_name); break; } } mutex_unlock(&pnettable->lock); return rc; } /* Remove the reference to a given network device from the pnet table. */ static int smc_pnet_remove_by_ndev(struct net_device *ndev) { struct smc_pnetentry *pnetelem, *tmp_pe; struct smc_pnettable *pnettable; struct net *net = dev_net(ndev); struct smc_net *sn; int rc = -ENOENT; /* get pnettable for namespace */ sn = net_generic(net, smc_net_id); pnettable = &sn->pnettable; mutex_lock(&pnettable->lock); list_for_each_entry_safe(pnetelem, tmp_pe, &pnettable->pnetlist, list) { if (pnetelem->type == SMC_PNET_ETH && pnetelem->ndev == ndev) { netdev_put(pnetelem->ndev, &pnetelem->dev_tracker); pnetelem->ndev = NULL; rc = 0; pr_warn_ratelimited("smc: removing net device %s with " "user defined pnetid %.16s\n", pnetelem->eth_name, pnetelem->pnet_name); break; } } mutex_unlock(&pnettable->lock); return rc; } /* Apply pnetid to ib device when no pnetid is set. */ static bool smc_pnet_apply_ib(struct smc_ib_device *ib_dev, u8 ib_port, char *pnet_name) { bool applied = false; mutex_lock(&smc_ib_devices.mutex); if (!smc_pnet_is_pnetid_set(ib_dev->pnetid[ib_port - 1])) { memcpy(ib_dev->pnetid[ib_port - 1], pnet_name, SMC_MAX_PNETID_LEN); ib_dev->pnetid_by_user[ib_port - 1] = true; applied = true; } mutex_unlock(&smc_ib_devices.mutex); return applied; } /* Apply pnetid to smcd device when no pnetid is set. */ static bool smc_pnet_apply_smcd(struct smcd_dev *smcd_dev, char *pnet_name) { bool applied = false; mutex_lock(&smcd_dev_list.mutex); if (!smc_pnet_is_pnetid_set(smcd_dev->pnetid)) { memcpy(smcd_dev->pnetid, pnet_name, SMC_MAX_PNETID_LEN); smcd_dev->pnetid_by_user = true; applied = true; } mutex_unlock(&smcd_dev_list.mutex); return applied; } /* The limit for pnetid is 16 characters. * Valid characters should be (single-byte character set) a-z, A-Z, 0-9. * Lower case letters are converted to upper case. * Interior blanks should not be used. */ static bool smc_pnetid_valid(const char *pnet_name, char *pnetid) { char *bf = skip_spaces(pnet_name); size_t len = strlen(bf); char *end = bf + len; if (!len) return false; while (--end >= bf && isspace(*end)) ; if (end - bf >= SMC_MAX_PNETID_LEN) return false; while (bf <= end) { if (!isalnum(*bf)) return false; *pnetid++ = islower(*bf) ? toupper(*bf) : *bf; bf++; } *pnetid = '\0'; return true; } /* Find an infiniband device by a given name. The device might not exist. */ static struct smc_ib_device *smc_pnet_find_ib(char *ib_name) { struct smc_ib_device *ibdev; mutex_lock(&smc_ib_devices.mutex); list_for_each_entry(ibdev, &smc_ib_devices.list, list) { if (!strncmp(ibdev->ibdev->name, ib_name, sizeof(ibdev->ibdev->name)) || (ibdev->ibdev->dev.parent && !strncmp(dev_name(ibdev->ibdev->dev.parent), ib_name, IB_DEVICE_NAME_MAX - 1))) { goto out; } } ibdev = NULL; out: mutex_unlock(&smc_ib_devices.mutex); return ibdev; } /* Find an smcd device by a given name. The device might not exist. */ static struct smcd_dev *smc_pnet_find_smcd(char *smcd_name) { struct smcd_dev *smcd_dev; mutex_lock(&smcd_dev_list.mutex); list_for_each_entry(smcd_dev, &smcd_dev_list.list, list) { if (!strncmp(dev_name(smcd_dev->ops->get_dev(smcd_dev)), smcd_name, IB_DEVICE_NAME_MAX - 1)) goto out; } smcd_dev = NULL; out: mutex_unlock(&smcd_dev_list.mutex); return smcd_dev; } static int smc_pnet_add_eth(struct smc_pnettable *pnettable, struct net *net, char *eth_name, char *pnet_name) { struct smc_pnetentry *tmp_pe, *new_pe; struct net_device *ndev, *base_ndev; u8 ndev_pnetid[SMC_MAX_PNETID_LEN]; bool new_netdev; int rc; /* check if (base) netdev already has a pnetid. If there is one, we do * not want to add a pnet table entry */ rc = -EEXIST; ndev = dev_get_by_name(net, eth_name); /* dev_hold() */ if (ndev) { base_ndev = pnet_find_base_ndev(ndev); if (!smc_pnetid_by_dev_port(base_ndev->dev.parent, base_ndev->dev_port, ndev_pnetid)) goto out_put; } /* add a new netdev entry to the pnet table if there isn't one */ rc = -ENOMEM; new_pe = kzalloc(sizeof(*new_pe), GFP_KERNEL); if (!new_pe) goto out_put; new_pe->type = SMC_PNET_ETH; memcpy(new_pe->pnet_name, pnet_name, SMC_MAX_PNETID_LEN); strncpy(new_pe->eth_name, eth_name, IFNAMSIZ); rc = -EEXIST; new_netdev = true; mutex_lock(&pnettable->lock); list_for_each_entry(tmp_pe, &pnettable->pnetlist, list) { if (tmp_pe->type == SMC_PNET_ETH && !strncmp(tmp_pe->eth_name, eth_name, IFNAMSIZ)) { new_netdev = false; break; } } if (new_netdev) { if (ndev) { new_pe->ndev = ndev; netdev_tracker_alloc(ndev, &new_pe->dev_tracker, GFP_ATOMIC); } list_add_tail(&new_pe->list, &pnettable->pnetlist); mutex_unlock(&pnettable->lock); } else { mutex_unlock(&pnettable->lock); kfree(new_pe); goto out_put; } if (ndev) pr_warn_ratelimited("smc: net device %s " "applied user defined pnetid %.16s\n", new_pe->eth_name, new_pe->pnet_name); return 0; out_put: dev_put(ndev); return rc; } static int smc_pnet_add_ib(struct smc_pnettable *pnettable, char *ib_name, u8 ib_port, char *pnet_name) { struct smc_pnetentry *tmp_pe, *new_pe; struct smc_ib_device *ib_dev; bool smcddev_applied = true; bool ibdev_applied = true; struct smcd_dev *smcd; struct device *dev; bool new_ibdev; /* try to apply the pnetid to active devices */ ib_dev = smc_pnet_find_ib(ib_name); if (ib_dev) { ibdev_applied = smc_pnet_apply_ib(ib_dev, ib_port, pnet_name); if (ibdev_applied) pr_warn_ratelimited("smc: ib device %s ibport %d " "applied user defined pnetid " "%.16s\n", ib_dev->ibdev->name, ib_port, ib_dev->pnetid[ib_port - 1]); } smcd = smc_pnet_find_smcd(ib_name); if (smcd) { smcddev_applied = smc_pnet_apply_smcd(smcd, pnet_name); if (smcddev_applied) { dev = smcd->ops->get_dev(smcd); pr_warn_ratelimited("smc: smcd device %s " "applied user defined pnetid " "%.16s\n", dev_name(dev), smcd->pnetid); } } /* Apply fails when a device has a hardware-defined pnetid set, do not * add a pnet table entry in that case. */ if (!ibdev_applied || !smcddev_applied) return -EEXIST; /* add a new ib entry to the pnet table if there isn't one */ new_pe = kzalloc(sizeof(*new_pe), GFP_KERNEL); if (!new_pe) return -ENOMEM; new_pe->type = SMC_PNET_IB; memcpy(new_pe->pnet_name, pnet_name, SMC_MAX_PNETID_LEN); strncpy(new_pe->ib_name, ib_name, IB_DEVICE_NAME_MAX); new_pe->ib_port = ib_port; new_ibdev = true; mutex_lock(&pnettable->lock); list_for_each_entry(tmp_pe, &pnettable->pnetlist, list) { if (tmp_pe->type == SMC_PNET_IB && !strncmp(tmp_pe->ib_name, ib_name, IB_DEVICE_NAME_MAX)) { new_ibdev = false; break; } } if (new_ibdev) { list_add_tail(&new_pe->list, &pnettable->pnetlist); mutex_unlock(&pnettable->lock); } else { mutex_unlock(&pnettable->lock); kfree(new_pe); } return (new_ibdev) ? 0 : -EEXIST; } /* Append a pnetid to the end of the pnet table if not already on this list. */ static int smc_pnet_enter(struct net *net, struct nlattr *tb[]) { char pnet_name[SMC_MAX_PNETID_LEN + 1]; struct smc_pnettable *pnettable; bool new_netdev = false; bool new_ibdev = false; struct smc_net *sn; u8 ibport = 1; char *string; int rc; /* get pnettable for namespace */ sn = net_generic(net, smc_net_id); pnettable = &sn->pnettable; rc = -EINVAL; if (!tb[SMC_PNETID_NAME]) goto error; string = (char *)nla_data(tb[SMC_PNETID_NAME]); if (!smc_pnetid_valid(string, pnet_name)) goto error; if (tb[SMC_PNETID_ETHNAME]) { string = (char *)nla_data(tb[SMC_PNETID_ETHNAME]); rc = smc_pnet_add_eth(pnettable, net, string, pnet_name); if (!rc) new_netdev = true; else if (rc != -EEXIST) goto error; } /* if this is not the initial namespace, stop here */ if (net != &init_net) return new_netdev ? 0 : -EEXIST; rc = -EINVAL; if (tb[SMC_PNETID_IBNAME]) { string = (char *)nla_data(tb[SMC_PNETID_IBNAME]); string = strim(string); if (tb[SMC_PNETID_IBPORT]) { ibport = nla_get_u8(tb[SMC_PNETID_IBPORT]); if (ibport < 1 || ibport > SMC_MAX_PORTS) goto error; } rc = smc_pnet_add_ib(pnettable, string, ibport, pnet_name); if (!rc) new_ibdev = true; else if (rc != -EEXIST) goto error; } return (new_netdev || new_ibdev) ? 0 : -EEXIST; error: return rc; } /* Convert an smc_pnetentry to a netlink attribute sequence */ static int smc_pnet_set_nla(struct sk_buff *msg, struct smc_pnetentry *pnetelem) { if (nla_put_string(msg, SMC_PNETID_NAME, pnetelem->pnet_name)) return -1; if (pnetelem->type == SMC_PNET_ETH) { if (nla_put_string(msg, SMC_PNETID_ETHNAME, pnetelem->eth_name)) return -1; } else { if (nla_put_string(msg, SMC_PNETID_ETHNAME, "n/a")) return -1; } if (pnetelem->type == SMC_PNET_IB) { if (nla_put_string(msg, SMC_PNETID_IBNAME, pnetelem->ib_name) || nla_put_u8(msg, SMC_PNETID_IBPORT, pnetelem->ib_port)) return -1; } else { if (nla_put_string(msg, SMC_PNETID_IBNAME, "n/a") || nla_put_u8(msg, SMC_PNETID_IBPORT, 0xff)) return -1; } return 0; } static int smc_pnet_add(struct sk_buff *skb, struct genl_info *info) { struct net *net = genl_info_net(info); return smc_pnet_enter(net, info->attrs); } static int smc_pnet_del(struct sk_buff *skb, struct genl_info *info) { struct net *net = genl_info_net(info); if (!info->attrs[SMC_PNETID_NAME]) return -EINVAL; return smc_pnet_remove_by_pnetid(net, (char *)nla_data(info->attrs[SMC_PNETID_NAME])); } static int smc_pnet_dump_start(struct netlink_callback *cb) { cb->args[0] = 0; return 0; } static int smc_pnet_dumpinfo(struct sk_buff *skb, u32 portid, u32 seq, u32 flags, struct smc_pnetentry *pnetelem) { void *hdr; hdr = genlmsg_put(skb, portid, seq, &smc_pnet_nl_family, flags, SMC_PNETID_GET); if (!hdr) return -ENOMEM; if (smc_pnet_set_nla(skb, pnetelem) < 0) { genlmsg_cancel(skb, hdr); return -EMSGSIZE; } genlmsg_end(skb, hdr); return 0; } static int _smc_pnet_dump(struct net *net, struct sk_buff *skb, u32 portid, u32 seq, u8 *pnetid, int start_idx) { struct smc_pnettable *pnettable; struct smc_pnetentry *pnetelem; struct smc_net *sn; int idx = 0; /* get pnettable for namespace */ sn = net_generic(net, smc_net_id); pnettable = &sn->pnettable; /* dump pnettable entries */ mutex_lock(&pnettable->lock); list_for_each_entry(pnetelem, &pnettable->pnetlist, list) { if (pnetid && !smc_pnet_match(pnetelem->pnet_name, pnetid)) continue; if (idx++ < start_idx) continue; /* if this is not the initial namespace, dump only netdev */ if (net != &init_net && pnetelem->type != SMC_PNET_ETH) continue; if (smc_pnet_dumpinfo(skb, portid, seq, NLM_F_MULTI, pnetelem)) { --idx; break; } } mutex_unlock(&pnettable->lock); return idx; } static int smc_pnet_dump(struct sk_buff *skb, struct netlink_callback *cb) { struct net *net = sock_net(skb->sk); int idx; idx = _smc_pnet_dump(net, skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, NULL, cb->args[0]); cb->args[0] = idx; return skb->len; } /* Retrieve one PNETID entry */ static int smc_pnet_get(struct sk_buff *skb, struct genl_info *info) { struct net *net = genl_info_net(info); struct sk_buff *msg; void *hdr; if (!info->attrs[SMC_PNETID_NAME]) return -EINVAL; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) return -ENOMEM; _smc_pnet_dump(net, msg, info->snd_portid, info->snd_seq, nla_data(info->attrs[SMC_PNETID_NAME]), 0); /* finish multi part message and send it */ hdr = nlmsg_put(msg, info->snd_portid, info->snd_seq, NLMSG_DONE, 0, NLM_F_MULTI); if (!hdr) { nlmsg_free(msg); return -EMSGSIZE; } return genlmsg_reply(msg, info); } /* Remove and delete all pnetids from pnet table. */ static int smc_pnet_flush(struct sk_buff *skb, struct genl_info *info) { struct net *net = genl_info_net(info); smc_pnet_remove_by_pnetid(net, NULL); return 0; } /* SMC_PNETID generic netlink operation definition */ static const struct genl_ops smc_pnet_ops[] = { { .cmd = SMC_PNETID_GET, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, /* can be retrieved by unprivileged users */ .doit = smc_pnet_get, .dumpit = smc_pnet_dump, .start = smc_pnet_dump_start }, { .cmd = SMC_PNETID_ADD, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .flags = GENL_ADMIN_PERM, .doit = smc_pnet_add }, { .cmd = SMC_PNETID_DEL, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .flags = GENL_ADMIN_PERM, .doit = smc_pnet_del }, { .cmd = SMC_PNETID_FLUSH, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .flags = GENL_ADMIN_PERM, .doit = smc_pnet_flush } }; /* SMC_PNETID family definition */ static struct genl_family smc_pnet_nl_family __ro_after_init = { .hdrsize = 0, .name = SMCR_GENL_FAMILY_NAME, .version = SMCR_GENL_FAMILY_VERSION, .maxattr = SMC_PNETID_MAX, .policy = smc_pnet_policy, .netnsok = true, .module = THIS_MODULE, .ops = smc_pnet_ops, .n_ops = ARRAY_SIZE(smc_pnet_ops), .resv_start_op = SMC_PNETID_FLUSH + 1, }; bool smc_pnet_is_ndev_pnetid(struct net *net, u8 *pnetid) { struct smc_net *sn = net_generic(net, smc_net_id); struct smc_pnetids_ndev_entry *pe; bool rc = false; read_lock(&sn->pnetids_ndev.lock); list_for_each_entry(pe, &sn->pnetids_ndev.list, list) { if (smc_pnet_match(pnetid, pe->pnetid)) { rc = true; goto unlock; } } unlock: read_unlock(&sn->pnetids_ndev.lock); return rc; } static int smc_pnet_add_pnetid(struct net *net, u8 *pnetid) { struct smc_net *sn = net_generic(net, smc_net_id); struct smc_pnetids_ndev_entry *pe, *pi; pe = kzalloc(sizeof(*pe), GFP_KERNEL); if (!pe) return -ENOMEM; write_lock(&sn->pnetids_ndev.lock); list_for_each_entry(pi, &sn->pnetids_ndev.list, list) { if (smc_pnet_match(pnetid, pe->pnetid)) { refcount_inc(&pi->refcnt); kfree(pe); goto unlock; } } refcount_set(&pe->refcnt, 1); memcpy(pe->pnetid, pnetid, SMC_MAX_PNETID_LEN); list_add_tail(&pe->list, &sn->pnetids_ndev.list); unlock: write_unlock(&sn->pnetids_ndev.lock); return 0; } static void smc_pnet_remove_pnetid(struct net *net, u8 *pnetid) { struct smc_net *sn = net_generic(net, smc_net_id); struct smc_pnetids_ndev_entry *pe, *pe2; write_lock(&sn->pnetids_ndev.lock); list_for_each_entry_safe(pe, pe2, &sn->pnetids_ndev.list, list) { if (smc_pnet_match(pnetid, pe->pnetid)) { if (refcount_dec_and_test(&pe->refcnt)) { list_del(&pe->list); kfree(pe); } break; } } write_unlock(&sn->pnetids_ndev.lock); } static void smc_pnet_add_base_pnetid(struct net *net, struct net_device *dev, u8 *ndev_pnetid) { struct net_device *base_dev; base_dev = __pnet_find_base_ndev(dev); if (base_dev->flags & IFF_UP && !smc_pnetid_by_dev_port(base_dev->dev.parent, base_dev->dev_port, ndev_pnetid)) { /* add to PNETIDs list */ smc_pnet_add_pnetid(net, ndev_pnetid); } } /* create initial list of netdevice pnetids */ static void smc_pnet_create_pnetids_list(struct net *net) { u8 ndev_pnetid[SMC_MAX_PNETID_LEN]; struct net_device *dev; rtnl_lock(); for_each_netdev(net, dev) smc_pnet_add_base_pnetid(net, dev, ndev_pnetid); rtnl_unlock(); } /* clean up list of netdevice pnetids */ static void smc_pnet_destroy_pnetids_list(struct net *net) { struct smc_net *sn = net_generic(net, smc_net_id); struct smc_pnetids_ndev_entry *pe, *temp_pe; write_lock(&sn->pnetids_ndev.lock); list_for_each_entry_safe(pe, temp_pe, &sn->pnetids_ndev.list, list) { list_del(&pe->list); kfree(pe); } write_unlock(&sn->pnetids_ndev.lock); } static int smc_pnet_netdev_event(struct notifier_block *this, unsigned long event, void *ptr) { struct net_device *event_dev = netdev_notifier_info_to_dev(ptr); struct net *net = dev_net(event_dev); u8 ndev_pnetid[SMC_MAX_PNETID_LEN]; switch (event) { case NETDEV_REBOOT: case NETDEV_UNREGISTER: smc_pnet_remove_by_ndev(event_dev); smc_ib_ndev_change(event_dev, event); return NOTIFY_OK; case NETDEV_REGISTER: smc_pnet_add_by_ndev(event_dev); smc_ib_ndev_change(event_dev, event); return NOTIFY_OK; case NETDEV_UP: smc_pnet_add_base_pnetid(net, event_dev, ndev_pnetid); return NOTIFY_OK; case NETDEV_DOWN: event_dev = __pnet_find_base_ndev(event_dev); if (!smc_pnetid_by_dev_port(event_dev->dev.parent, event_dev->dev_port, ndev_pnetid)) { /* remove from PNETIDs list */ smc_pnet_remove_pnetid(net, ndev_pnetid); } return NOTIFY_OK; default: return NOTIFY_DONE; } } static struct notifier_block smc_netdev_notifier = { .notifier_call = smc_pnet_netdev_event }; /* init network namespace */ int smc_pnet_net_init(struct net *net) { struct smc_net *sn = net_generic(net, smc_net_id); struct smc_pnettable *pnettable = &sn->pnettable; struct smc_pnetids_ndev *pnetids_ndev = &sn->pnetids_ndev; INIT_LIST_HEAD(&pnettable->pnetlist); mutex_init(&pnettable->lock); INIT_LIST_HEAD(&pnetids_ndev->list); rwlock_init(&pnetids_ndev->lock); smc_pnet_create_pnetids_list(net); /* disable handshake limitation by default */ net->smc.limit_smc_hs = 0; return 0; } int __init smc_pnet_init(void) { int rc; rc = genl_register_family(&smc_pnet_nl_family); if (rc) return rc; rc = register_netdevice_notifier(&smc_netdev_notifier); if (rc) genl_unregister_family(&smc_pnet_nl_family); return rc; } /* exit network namespace */ void smc_pnet_net_exit(struct net *net) { /* flush pnet table */ smc_pnet_remove_by_pnetid(net, NULL); smc_pnet_destroy_pnetids_list(net); } void smc_pnet_exit(void) { unregister_netdevice_notifier(&smc_netdev_notifier); genl_unregister_family(&smc_pnet_nl_family); } static struct net_device *__pnet_find_base_ndev(struct net_device *ndev) { int i, nest_lvl; ASSERT_RTNL(); nest_lvl = ndev->lower_level; for (i = 0; i < nest_lvl; i++) { struct list_head *lower = &ndev->adj_list.lower; if (list_empty(lower)) break; lower = lower->next; ndev = netdev_lower_get_next(ndev, &lower); } return ndev; } /* Determine one base device for stacked net devices. * If the lower device level contains more than one devices * (for instance with bonding slaves), just the first device * is used to reach a base device. */ static struct net_device *pnet_find_base_ndev(struct net_device *ndev) { rtnl_lock(); ndev = __pnet_find_base_ndev(ndev); rtnl_unlock(); return ndev; } static int smc_pnet_find_ndev_pnetid_by_table(struct net_device *ndev, u8 *pnetid) { struct smc_pnettable *pnettable; struct net *net = dev_net(ndev); struct smc_pnetentry *pnetelem; struct smc_net *sn; int rc = -ENOENT; /* get pnettable for namespace */ sn = net_generic(net, smc_net_id); pnettable = &sn->pnettable; mutex_lock(&pnettable->lock); list_for_each_entry(pnetelem, &pnettable->pnetlist, list) { if (pnetelem->type == SMC_PNET_ETH && ndev == pnetelem->ndev) { /* get pnetid of netdev device */ memcpy(pnetid, pnetelem->pnet_name, SMC_MAX_PNETID_LEN); rc = 0; break; } } mutex_unlock(&pnettable->lock); return rc; } static int smc_pnet_determine_gid(struct smc_ib_device *ibdev, int i, struct smc_init_info *ini) { if (!ini->check_smcrv2 && !smc_ib_determine_gid(ibdev, i, ini->vlan_id, ini->ib_gid, NULL, NULL)) { ini->ib_dev = ibdev; ini->ib_port = i; return 0; } if (ini->check_smcrv2 && !smc_ib_determine_gid(ibdev, i, ini->vlan_id, ini->smcrv2.ib_gid_v2, NULL, &ini->smcrv2)) { ini->smcrv2.ib_dev_v2 = ibdev; ini->smcrv2.ib_port_v2 = i; return 0; } return -ENODEV; } /* find a roce device for the given pnetid */ static void _smc_pnet_find_roce_by_pnetid(u8 *pnet_id, struct smc_init_info *ini, struct smc_ib_device *known_dev, struct net *net) { struct smc_ib_device *ibdev; int i; mutex_lock(&smc_ib_devices.mutex); list_for_each_entry(ibdev, &smc_ib_devices.list, list) { if (ibdev == known_dev || !rdma_dev_access_netns(ibdev->ibdev, net)) continue; for (i = 1; i <= SMC_MAX_PORTS; i++) { if (!rdma_is_port_valid(ibdev->ibdev, i)) continue; if (smc_pnet_match(ibdev->pnetid[i - 1], pnet_id) && smc_ib_port_active(ibdev, i) && !test_bit(i - 1, ibdev->ports_going_away)) { if (!smc_pnet_determine_gid(ibdev, i, ini)) goto out; } } } out: mutex_unlock(&smc_ib_devices.mutex); } /* find alternate roce device with same pnet_id, vlan_id and net namespace */ void smc_pnet_find_alt_roce(struct smc_link_group *lgr, struct smc_init_info *ini, struct smc_ib_device *known_dev) { struct net *net = lgr->net; _smc_pnet_find_roce_by_pnetid(lgr->pnet_id, ini, known_dev, net); } /* if handshake network device belongs to a roce device, return its * IB device and port */ static void smc_pnet_find_rdma_dev(struct net_device *netdev, struct smc_init_info *ini) { struct net *net = dev_net(netdev); struct smc_ib_device *ibdev; mutex_lock(&smc_ib_devices.mutex); list_for_each_entry(ibdev, &smc_ib_devices.list, list) { struct net_device *ndev; int i; /* check rdma net namespace */ if (!rdma_dev_access_netns(ibdev->ibdev, net)) continue; for (i = 1; i <= SMC_MAX_PORTS; i++) { if (!rdma_is_port_valid(ibdev->ibdev, i)) continue; if (!ibdev->ibdev->ops.get_netdev) continue; ndev = ibdev->ibdev->ops.get_netdev(ibdev->ibdev, i); if (!ndev) continue; dev_put(ndev); if (netdev == ndev && smc_ib_port_active(ibdev, i) && !test_bit(i - 1, ibdev->ports_going_away)) { if (!smc_pnet_determine_gid(ibdev, i, ini)) break; } } } mutex_unlock(&smc_ib_devices.mutex); } /* Determine the corresponding IB device port based on the hardware PNETID. * Searching stops at the first matching active IB device port with vlan_id * configured. * If nothing found, check pnetid table. * If nothing found, try to use handshake device */ static void smc_pnet_find_roce_by_pnetid(struct net_device *ndev, struct smc_init_info *ini) { u8 ndev_pnetid[SMC_MAX_PNETID_LEN]; struct net *net; ndev = pnet_find_base_ndev(ndev); net = dev_net(ndev); if (smc_pnetid_by_dev_port(ndev->dev.parent, ndev->dev_port, ndev_pnetid) && smc_pnet_find_ndev_pnetid_by_table(ndev, ndev_pnetid)) { smc_pnet_find_rdma_dev(ndev, ini); return; /* pnetid could not be determined */ } _smc_pnet_find_roce_by_pnetid(ndev_pnetid, ini, NULL, net); } static void smc_pnet_find_ism_by_pnetid(struct net_device *ndev, struct smc_init_info *ini) { u8 ndev_pnetid[SMC_MAX_PNETID_LEN]; struct smcd_dev *ismdev; ndev = pnet_find_base_ndev(ndev); if (smc_pnetid_by_dev_port(ndev->dev.parent, ndev->dev_port, ndev_pnetid) && smc_pnet_find_ndev_pnetid_by_table(ndev, ndev_pnetid)) return; /* pnetid could not be determined */ mutex_lock(&smcd_dev_list.mutex); list_for_each_entry(ismdev, &smcd_dev_list.list, list) { if (smc_pnet_match(ismdev->pnetid, ndev_pnetid) && !ismdev->going_away && (!ini->ism_peer_gid[0] || !smc_ism_cantalk(ini->ism_peer_gid[0], ini->vlan_id, ismdev))) { ini->ism_dev[0] = ismdev; break; } } mutex_unlock(&smcd_dev_list.mutex); } /* PNET table analysis for a given sock: * determine ib_device and port belonging to used internal TCP socket * ethernet interface. */ void smc_pnet_find_roce_resource(struct sock *sk, struct smc_init_info *ini) { struct dst_entry *dst = sk_dst_get(sk); if (!dst) goto out; if (!dst->dev) goto out_rel; smc_pnet_find_roce_by_pnetid(dst->dev, ini); out_rel: dst_release(dst); out: return; } void smc_pnet_find_ism_resource(struct sock *sk, struct smc_init_info *ini) { struct dst_entry *dst = sk_dst_get(sk); ini->ism_dev[0] = NULL; if (!dst) goto out; if (!dst->dev) goto out_rel; smc_pnet_find_ism_by_pnetid(dst->dev, ini); out_rel: dst_release(dst); out: return; } /* Lookup and apply a pnet table entry to the given ib device. */ int smc_pnetid_by_table_ib(struct smc_ib_device *smcibdev, u8 ib_port) { char *ib_name = smcibdev->ibdev->name; struct smc_pnettable *pnettable; struct smc_pnetentry *tmp_pe; struct smc_net *sn; int rc = -ENOENT; /* get pnettable for init namespace */ sn = net_generic(&init_net, smc_net_id); pnettable = &sn->pnettable; mutex_lock(&pnettable->lock); list_for_each_entry(tmp_pe, &pnettable->pnetlist, list) { if (tmp_pe->type == SMC_PNET_IB && !strncmp(tmp_pe->ib_name, ib_name, IB_DEVICE_NAME_MAX) && tmp_pe->ib_port == ib_port) { smc_pnet_apply_ib(smcibdev, ib_port, tmp_pe->pnet_name); rc = 0; break; } } mutex_unlock(&pnettable->lock); return rc; } /* Lookup and apply a pnet table entry to the given smcd device. */ int smc_pnetid_by_table_smcd(struct smcd_dev *smcddev) { const char *ib_name = dev_name(smcddev->ops->get_dev(smcddev)); struct smc_pnettable *pnettable; struct smc_pnetentry *tmp_pe; struct smc_net *sn; int rc = -ENOENT; /* get pnettable for init namespace */ sn = net_generic(&init_net, smc_net_id); pnettable = &sn->pnettable; mutex_lock(&pnettable->lock); list_for_each_entry(tmp_pe, &pnettable->pnetlist, list) { if (tmp_pe->type == SMC_PNET_IB && !strncmp(tmp_pe->ib_name, ib_name, IB_DEVICE_NAME_MAX)) { smc_pnet_apply_smcd(smcddev, tmp_pe->pnet_name); rc = 0; break; } } mutex_unlock(&pnettable->lock); return rc; }
linux-master
net/smc/smc_pnet.c
// SPDX-License-Identifier: GPL-2.0-only #define CREATE_TRACE_POINTS #include "smc_tracepoint.h" EXPORT_TRACEPOINT_SYMBOL(smc_switch_to_fallback); EXPORT_TRACEPOINT_SYMBOL(smc_tx_sendmsg); EXPORT_TRACEPOINT_SYMBOL(smc_rx_recvmsg); EXPORT_TRACEPOINT_SYMBOL(smcr_link_down);
linux-master
net/smc/smc_tracepoint.c
// SPDX-License-Identifier: GPL-2.0 /* Shared Memory Communications Direct over ISM devices (SMC-D) * * Functions for ISM device. * * Copyright IBM Corp. 2018 */ #include <linux/if_vlan.h> #include <linux/spinlock.h> #include <linux/mutex.h> #include <linux/slab.h> #include <asm/page.h> #include "smc.h" #include "smc_core.h" #include "smc_ism.h" #include "smc_pnet.h" #include "smc_netlink.h" #include "linux/ism.h" struct smcd_dev_list smcd_dev_list = { .list = LIST_HEAD_INIT(smcd_dev_list.list), .mutex = __MUTEX_INITIALIZER(smcd_dev_list.mutex) }; static bool smc_ism_v2_capable; static u8 smc_ism_v2_system_eid[SMC_MAX_EID_LEN]; #if IS_ENABLED(CONFIG_ISM) static void smcd_register_dev(struct ism_dev *ism); static void smcd_unregister_dev(struct ism_dev *ism); static void smcd_handle_event(struct ism_dev *ism, struct ism_event *event); static void smcd_handle_irq(struct ism_dev *ism, unsigned int dmbno, u16 dmbemask); static struct ism_client smc_ism_client = { .name = "SMC-D", .add = smcd_register_dev, .remove = smcd_unregister_dev, .handle_event = smcd_handle_event, .handle_irq = smcd_handle_irq, }; #endif /* Test if an ISM communication is possible - same CPC */ int smc_ism_cantalk(u64 peer_gid, unsigned short vlan_id, struct smcd_dev *smcd) { return smcd->ops->query_remote_gid(smcd, peer_gid, vlan_id ? 1 : 0, vlan_id); } void smc_ism_get_system_eid(u8 **eid) { if (!smc_ism_v2_capable) *eid = NULL; else *eid = smc_ism_v2_system_eid; } u16 smc_ism_get_chid(struct smcd_dev *smcd) { return smcd->ops->get_chid(smcd); } /* HW supports ISM V2 and thus System EID is defined */ bool smc_ism_is_v2_capable(void) { return smc_ism_v2_capable; } /* Set a connection using this DMBE. */ void smc_ism_set_conn(struct smc_connection *conn) { unsigned long flags; spin_lock_irqsave(&conn->lgr->smcd->lock, flags); conn->lgr->smcd->conn[conn->rmb_desc->sba_idx] = conn; spin_unlock_irqrestore(&conn->lgr->smcd->lock, flags); } /* Unset a connection using this DMBE. */ void smc_ism_unset_conn(struct smc_connection *conn) { unsigned long flags; if (!conn->rmb_desc) return; spin_lock_irqsave(&conn->lgr->smcd->lock, flags); conn->lgr->smcd->conn[conn->rmb_desc->sba_idx] = NULL; spin_unlock_irqrestore(&conn->lgr->smcd->lock, flags); } /* Register a VLAN identifier with the ISM device. Use a reference count * and add a VLAN identifier only when the first DMB using this VLAN is * registered. */ int smc_ism_get_vlan(struct smcd_dev *smcd, unsigned short vlanid) { struct smc_ism_vlanid *new_vlan, *vlan; unsigned long flags; int rc = 0; if (!vlanid) /* No valid vlan id */ return -EINVAL; /* create new vlan entry, in case we need it */ new_vlan = kzalloc(sizeof(*new_vlan), GFP_KERNEL); if (!new_vlan) return -ENOMEM; new_vlan->vlanid = vlanid; refcount_set(&new_vlan->refcnt, 1); /* if there is an existing entry, increase count and return */ spin_lock_irqsave(&smcd->lock, flags); list_for_each_entry(vlan, &smcd->vlan, list) { if (vlan->vlanid == vlanid) { refcount_inc(&vlan->refcnt); kfree(new_vlan); goto out; } } /* no existing entry found. * add new entry to device; might fail, e.g., if HW limit reached */ if (smcd->ops->add_vlan_id(smcd, vlanid)) { kfree(new_vlan); rc = -EIO; goto out; } list_add_tail(&new_vlan->list, &smcd->vlan); out: spin_unlock_irqrestore(&smcd->lock, flags); return rc; } /* Unregister a VLAN identifier with the ISM device. Use a reference count * and remove a VLAN identifier only when the last DMB using this VLAN is * unregistered. */ int smc_ism_put_vlan(struct smcd_dev *smcd, unsigned short vlanid) { struct smc_ism_vlanid *vlan; unsigned long flags; bool found = false; int rc = 0; if (!vlanid) /* No valid vlan id */ return -EINVAL; spin_lock_irqsave(&smcd->lock, flags); list_for_each_entry(vlan, &smcd->vlan, list) { if (vlan->vlanid == vlanid) { if (!refcount_dec_and_test(&vlan->refcnt)) goto out; found = true; break; } } if (!found) { rc = -ENOENT; goto out; /* VLAN id not in table */ } /* Found and the last reference just gone */ if (smcd->ops->del_vlan_id(smcd, vlanid)) rc = -EIO; list_del(&vlan->list); kfree(vlan); out: spin_unlock_irqrestore(&smcd->lock, flags); return rc; } int smc_ism_unregister_dmb(struct smcd_dev *smcd, struct smc_buf_desc *dmb_desc) { struct smcd_dmb dmb; int rc = 0; if (!dmb_desc->dma_addr) return rc; memset(&dmb, 0, sizeof(dmb)); dmb.dmb_tok = dmb_desc->token; dmb.sba_idx = dmb_desc->sba_idx; dmb.cpu_addr = dmb_desc->cpu_addr; dmb.dma_addr = dmb_desc->dma_addr; dmb.dmb_len = dmb_desc->len; rc = smcd->ops->unregister_dmb(smcd, &dmb); if (!rc || rc == ISM_ERROR) { dmb_desc->cpu_addr = NULL; dmb_desc->dma_addr = 0; } return rc; } int smc_ism_register_dmb(struct smc_link_group *lgr, int dmb_len, struct smc_buf_desc *dmb_desc) { #if IS_ENABLED(CONFIG_ISM) struct smcd_dmb dmb; int rc; memset(&dmb, 0, sizeof(dmb)); dmb.dmb_len = dmb_len; dmb.sba_idx = dmb_desc->sba_idx; dmb.vlan_id = lgr->vlan_id; dmb.rgid = lgr->peer_gid; rc = lgr->smcd->ops->register_dmb(lgr->smcd, &dmb, &smc_ism_client); if (!rc) { dmb_desc->sba_idx = dmb.sba_idx; dmb_desc->token = dmb.dmb_tok; dmb_desc->cpu_addr = dmb.cpu_addr; dmb_desc->dma_addr = dmb.dma_addr; dmb_desc->len = dmb.dmb_len; } return rc; #else return 0; #endif } static int smc_nl_handle_smcd_dev(struct smcd_dev *smcd, struct sk_buff *skb, struct netlink_callback *cb) { char smc_pnet[SMC_MAX_PNETID_LEN + 1]; struct smc_pci_dev smc_pci_dev; struct nlattr *port_attrs; struct nlattr *attrs; struct ism_dev *ism; int use_cnt = 0; void *nlh; ism = smcd->priv; nlh = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, &smc_gen_nl_family, NLM_F_MULTI, SMC_NETLINK_GET_DEV_SMCD); if (!nlh) goto errmsg; attrs = nla_nest_start(skb, SMC_GEN_DEV_SMCD); if (!attrs) goto errout; use_cnt = atomic_read(&smcd->lgr_cnt); if (nla_put_u32(skb, SMC_NLA_DEV_USE_CNT, use_cnt)) goto errattr; if (nla_put_u8(skb, SMC_NLA_DEV_IS_CRIT, use_cnt > 0)) goto errattr; memset(&smc_pci_dev, 0, sizeof(smc_pci_dev)); smc_set_pci_values(to_pci_dev(ism->dev.parent), &smc_pci_dev); if (nla_put_u32(skb, SMC_NLA_DEV_PCI_FID, smc_pci_dev.pci_fid)) goto errattr; if (nla_put_u16(skb, SMC_NLA_DEV_PCI_CHID, smc_pci_dev.pci_pchid)) goto errattr; if (nla_put_u16(skb, SMC_NLA_DEV_PCI_VENDOR, smc_pci_dev.pci_vendor)) goto errattr; if (nla_put_u16(skb, SMC_NLA_DEV_PCI_DEVICE, smc_pci_dev.pci_device)) goto errattr; if (nla_put_string(skb, SMC_NLA_DEV_PCI_ID, smc_pci_dev.pci_id)) goto errattr; port_attrs = nla_nest_start(skb, SMC_NLA_DEV_PORT); if (!port_attrs) goto errattr; if (nla_put_u8(skb, SMC_NLA_DEV_PORT_PNET_USR, smcd->pnetid_by_user)) goto errportattr; memcpy(smc_pnet, smcd->pnetid, SMC_MAX_PNETID_LEN); smc_pnet[SMC_MAX_PNETID_LEN] = 0; if (nla_put_string(skb, SMC_NLA_DEV_PORT_PNETID, smc_pnet)) goto errportattr; nla_nest_end(skb, port_attrs); nla_nest_end(skb, attrs); genlmsg_end(skb, nlh); return 0; errportattr: nla_nest_cancel(skb, port_attrs); errattr: nla_nest_cancel(skb, attrs); errout: nlmsg_cancel(skb, nlh); errmsg: return -EMSGSIZE; } static void smc_nl_prep_smcd_dev(struct smcd_dev_list *dev_list, struct sk_buff *skb, struct netlink_callback *cb) { struct smc_nl_dmp_ctx *cb_ctx = smc_nl_dmp_ctx(cb); int snum = cb_ctx->pos[0]; struct smcd_dev *smcd; int num = 0; mutex_lock(&dev_list->mutex); list_for_each_entry(smcd, &dev_list->list, list) { if (num < snum) goto next; if (smc_nl_handle_smcd_dev(smcd, skb, cb)) goto errout; next: num++; } errout: mutex_unlock(&dev_list->mutex); cb_ctx->pos[0] = num; } int smcd_nl_get_device(struct sk_buff *skb, struct netlink_callback *cb) { smc_nl_prep_smcd_dev(&smcd_dev_list, skb, cb); return skb->len; } #if IS_ENABLED(CONFIG_ISM) struct smc_ism_event_work { struct work_struct work; struct smcd_dev *smcd; struct ism_event event; }; #define ISM_EVENT_REQUEST 0x0001 #define ISM_EVENT_RESPONSE 0x0002 #define ISM_EVENT_REQUEST_IR 0x00000001 #define ISM_EVENT_CODE_SHUTDOWN 0x80 #define ISM_EVENT_CODE_TESTLINK 0x83 union smcd_sw_event_info { u64 info; struct { u8 uid[SMC_LGR_ID_SIZE]; unsigned short vlan_id; u16 code; }; }; static void smcd_handle_sw_event(struct smc_ism_event_work *wrk) { union smcd_sw_event_info ev_info; ev_info.info = wrk->event.info; switch (wrk->event.code) { case ISM_EVENT_CODE_SHUTDOWN: /* Peer shut down DMBs */ smc_smcd_terminate(wrk->smcd, wrk->event.tok, ev_info.vlan_id); break; case ISM_EVENT_CODE_TESTLINK: /* Activity timer */ if (ev_info.code == ISM_EVENT_REQUEST) { ev_info.code = ISM_EVENT_RESPONSE; wrk->smcd->ops->signal_event(wrk->smcd, wrk->event.tok, ISM_EVENT_REQUEST_IR, ISM_EVENT_CODE_TESTLINK, ev_info.info); } break; } } /* worker for SMC-D events */ static void smc_ism_event_work(struct work_struct *work) { struct smc_ism_event_work *wrk = container_of(work, struct smc_ism_event_work, work); switch (wrk->event.type) { case ISM_EVENT_GID: /* GID event, token is peer GID */ smc_smcd_terminate(wrk->smcd, wrk->event.tok, VLAN_VID_MASK); break; case ISM_EVENT_DMB: break; case ISM_EVENT_SWR: /* Software defined event */ smcd_handle_sw_event(wrk); break; } kfree(wrk); } static struct smcd_dev *smcd_alloc_dev(struct device *parent, const char *name, const struct smcd_ops *ops, int max_dmbs) { struct smcd_dev *smcd; smcd = devm_kzalloc(parent, sizeof(*smcd), GFP_KERNEL); if (!smcd) return NULL; smcd->conn = devm_kcalloc(parent, max_dmbs, sizeof(struct smc_connection *), GFP_KERNEL); if (!smcd->conn) return NULL; smcd->event_wq = alloc_ordered_workqueue("ism_evt_wq-%s)", WQ_MEM_RECLAIM, name); if (!smcd->event_wq) return NULL; smcd->ops = ops; spin_lock_init(&smcd->lock); spin_lock_init(&smcd->lgr_lock); INIT_LIST_HEAD(&smcd->vlan); INIT_LIST_HEAD(&smcd->lgr_list); init_waitqueue_head(&smcd->lgrs_deleted); return smcd; } static void smcd_register_dev(struct ism_dev *ism) { const struct smcd_ops *ops = ism_get_smcd_ops(); struct smcd_dev *smcd; if (!ops) return; smcd = smcd_alloc_dev(&ism->pdev->dev, dev_name(&ism->pdev->dev), ops, ISM_NR_DMBS); if (!smcd) return; smcd->priv = ism; ism_set_priv(ism, &smc_ism_client, smcd); if (smc_pnetid_by_dev_port(&ism->pdev->dev, 0, smcd->pnetid)) smc_pnetid_by_table_smcd(smcd); mutex_lock(&smcd_dev_list.mutex); if (list_empty(&smcd_dev_list.list)) { u8 *system_eid = NULL; system_eid = smcd->ops->get_system_eid(); if (smcd->ops->supports_v2()) { smc_ism_v2_capable = true; memcpy(smc_ism_v2_system_eid, system_eid, SMC_MAX_EID_LEN); } } /* sort list: devices without pnetid before devices with pnetid */ if (smcd->pnetid[0]) list_add_tail(&smcd->list, &smcd_dev_list.list); else list_add(&smcd->list, &smcd_dev_list.list); mutex_unlock(&smcd_dev_list.mutex); pr_warn_ratelimited("smc: adding smcd device %s with pnetid %.16s%s\n", dev_name(&ism->dev), smcd->pnetid, smcd->pnetid_by_user ? " (user defined)" : ""); return; } static void smcd_unregister_dev(struct ism_dev *ism) { struct smcd_dev *smcd = ism_get_priv(ism, &smc_ism_client); pr_warn_ratelimited("smc: removing smcd device %s\n", dev_name(&ism->dev)); smcd->going_away = 1; smc_smcd_terminate_all(smcd); mutex_lock(&smcd_dev_list.mutex); list_del_init(&smcd->list); mutex_unlock(&smcd_dev_list.mutex); destroy_workqueue(smcd->event_wq); } /* SMCD Device event handler. Called from ISM device interrupt handler. * Parameters are ism device pointer, * - event->type (0 --> DMB, 1 --> GID), * - event->code (event code), * - event->tok (either DMB token when event type 0, or GID when event type 1) * - event->time (time of day) * - event->info (debug info). * * Context: * - Function called in IRQ context from ISM device driver event handler. */ static void smcd_handle_event(struct ism_dev *ism, struct ism_event *event) { struct smcd_dev *smcd = ism_get_priv(ism, &smc_ism_client); struct smc_ism_event_work *wrk; if (smcd->going_away) return; /* copy event to event work queue, and let it be handled there */ wrk = kmalloc(sizeof(*wrk), GFP_ATOMIC); if (!wrk) return; INIT_WORK(&wrk->work, smc_ism_event_work); wrk->smcd = smcd; wrk->event = *event; queue_work(smcd->event_wq, &wrk->work); } /* SMCD Device interrupt handler. Called from ISM device interrupt handler. * Parameters are the ism device pointer, DMB number, and the DMBE bitmask. * Find the connection and schedule the tasklet for this connection. * * Context: * - Function called in IRQ context from ISM device driver IRQ handler. */ static void smcd_handle_irq(struct ism_dev *ism, unsigned int dmbno, u16 dmbemask) { struct smcd_dev *smcd = ism_get_priv(ism, &smc_ism_client); struct smc_connection *conn = NULL; unsigned long flags; spin_lock_irqsave(&smcd->lock, flags); conn = smcd->conn[dmbno]; if (conn && !conn->killed) tasklet_schedule(&conn->rx_tsklet); spin_unlock_irqrestore(&smcd->lock, flags); } #endif int smc_ism_signal_shutdown(struct smc_link_group *lgr) { int rc = 0; #if IS_ENABLED(CONFIG_ISM) union smcd_sw_event_info ev_info; if (lgr->peer_shutdown) return 0; memcpy(ev_info.uid, lgr->id, SMC_LGR_ID_SIZE); ev_info.vlan_id = lgr->vlan_id; ev_info.code = ISM_EVENT_REQUEST; rc = lgr->smcd->ops->signal_event(lgr->smcd, lgr->peer_gid, ISM_EVENT_REQUEST_IR, ISM_EVENT_CODE_SHUTDOWN, ev_info.info); #endif return rc; } int smc_ism_init(void) { int rc = 0; #if IS_ENABLED(CONFIG_ISM) smc_ism_v2_capable = false; memset(smc_ism_v2_system_eid, 0, SMC_MAX_EID_LEN); rc = ism_register_client(&smc_ism_client); #endif return rc; } void smc_ism_exit(void) { #if IS_ENABLED(CONFIG_ISM) ism_unregister_client(&smc_ism_client); #endif }
linux-master
net/smc/smc_ism.c
// SPDX-License-Identifier: GPL-2.0-only /* * Shared Memory Communications over RDMA (SMC-R) and RoCE * * AF_SMC protocol family socket handler keeping the AF_INET sock address type * applies to SOCK_STREAM sockets only * offers an alternative communication option for TCP-protocol sockets * applicable with RoCE-cards only * * Initial restrictions: * - support for alternate links postponed * * Copyright IBM Corp. 2016, 2018 * * Author(s): Ursula Braun <[email protected]> * based on prototype from Frank Blaschka */ #define KMSG_COMPONENT "smc" #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt #include <linux/module.h> #include <linux/socket.h> #include <linux/workqueue.h> #include <linux/in.h> #include <linux/sched/signal.h> #include <linux/if_vlan.h> #include <linux/rcupdate_wait.h> #include <linux/ctype.h> #include <linux/splice.h> #include <net/sock.h> #include <net/tcp.h> #include <net/smc.h> #include <asm/ioctls.h> #include <net/net_namespace.h> #include <net/netns/generic.h> #include "smc_netns.h" #include "smc.h" #include "smc_clc.h" #include "smc_llc.h" #include "smc_cdc.h" #include "smc_core.h" #include "smc_ib.h" #include "smc_ism.h" #include "smc_pnet.h" #include "smc_netlink.h" #include "smc_tx.h" #include "smc_rx.h" #include "smc_close.h" #include "smc_stats.h" #include "smc_tracepoint.h" #include "smc_sysctl.h" static DEFINE_MUTEX(smc_server_lgr_pending); /* serialize link group * creation on server */ static DEFINE_MUTEX(smc_client_lgr_pending); /* serialize link group * creation on client */ static struct workqueue_struct *smc_tcp_ls_wq; /* wq for tcp listen work */ struct workqueue_struct *smc_hs_wq; /* wq for handshake work */ struct workqueue_struct *smc_close_wq; /* wq for close work */ static void smc_tcp_listen_work(struct work_struct *); static void smc_connect_work(struct work_struct *); int smc_nl_dump_hs_limitation(struct sk_buff *skb, struct netlink_callback *cb) { struct smc_nl_dmp_ctx *cb_ctx = smc_nl_dmp_ctx(cb); void *hdr; if (cb_ctx->pos[0]) goto out; hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, &smc_gen_nl_family, NLM_F_MULTI, SMC_NETLINK_DUMP_HS_LIMITATION); if (!hdr) return -ENOMEM; if (nla_put_u8(skb, SMC_NLA_HS_LIMITATION_ENABLED, sock_net(skb->sk)->smc.limit_smc_hs)) goto err; genlmsg_end(skb, hdr); cb_ctx->pos[0] = 1; out: return skb->len; err: genlmsg_cancel(skb, hdr); return -EMSGSIZE; } int smc_nl_enable_hs_limitation(struct sk_buff *skb, struct genl_info *info) { sock_net(skb->sk)->smc.limit_smc_hs = true; return 0; } int smc_nl_disable_hs_limitation(struct sk_buff *skb, struct genl_info *info) { sock_net(skb->sk)->smc.limit_smc_hs = false; return 0; } static void smc_set_keepalive(struct sock *sk, int val) { struct smc_sock *smc = smc_sk(sk); smc->clcsock->sk->sk_prot->keepalive(smc->clcsock->sk, val); } static struct sock *smc_tcp_syn_recv_sock(const struct sock *sk, struct sk_buff *skb, struct request_sock *req, struct dst_entry *dst, struct request_sock *req_unhash, bool *own_req) { struct smc_sock *smc; struct sock *child; smc = smc_clcsock_user_data(sk); if (READ_ONCE(sk->sk_ack_backlog) + atomic_read(&smc->queued_smc_hs) > sk->sk_max_ack_backlog) goto drop; if (sk_acceptq_is_full(&smc->sk)) { NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS); goto drop; } /* passthrough to original syn recv sock fct */ child = smc->ori_af_ops->syn_recv_sock(sk, skb, req, dst, req_unhash, own_req); /* child must not inherit smc or its ops */ if (child) { rcu_assign_sk_user_data(child, NULL); /* v4-mapped sockets don't inherit parent ops. Don't restore. */ if (inet_csk(child)->icsk_af_ops == inet_csk(sk)->icsk_af_ops) inet_csk(child)->icsk_af_ops = smc->ori_af_ops; } return child; drop: dst_release(dst); tcp_listendrop(sk); return NULL; } static bool smc_hs_congested(const struct sock *sk) { const struct smc_sock *smc; smc = smc_clcsock_user_data(sk); if (!smc) return true; if (workqueue_congested(WORK_CPU_UNBOUND, smc_hs_wq)) return true; return false; } static struct smc_hashinfo smc_v4_hashinfo = { .lock = __RW_LOCK_UNLOCKED(smc_v4_hashinfo.lock), }; static struct smc_hashinfo smc_v6_hashinfo = { .lock = __RW_LOCK_UNLOCKED(smc_v6_hashinfo.lock), }; int smc_hash_sk(struct sock *sk) { struct smc_hashinfo *h = sk->sk_prot->h.smc_hash; struct hlist_head *head; head = &h->ht; write_lock_bh(&h->lock); sk_add_node(sk, head); write_unlock_bh(&h->lock); sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); return 0; } EXPORT_SYMBOL_GPL(smc_hash_sk); void smc_unhash_sk(struct sock *sk) { struct smc_hashinfo *h = sk->sk_prot->h.smc_hash; write_lock_bh(&h->lock); if (sk_del_node_init(sk)) sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1); write_unlock_bh(&h->lock); } EXPORT_SYMBOL_GPL(smc_unhash_sk); /* This will be called before user really release sock_lock. So do the * work which we didn't do because of user hold the sock_lock in the * BH context */ static void smc_release_cb(struct sock *sk) { struct smc_sock *smc = smc_sk(sk); if (smc->conn.tx_in_release_sock) { smc_tx_pending(&smc->conn); smc->conn.tx_in_release_sock = false; } } struct proto smc_proto = { .name = "SMC", .owner = THIS_MODULE, .keepalive = smc_set_keepalive, .hash = smc_hash_sk, .unhash = smc_unhash_sk, .release_cb = smc_release_cb, .obj_size = sizeof(struct smc_sock), .h.smc_hash = &smc_v4_hashinfo, .slab_flags = SLAB_TYPESAFE_BY_RCU, }; EXPORT_SYMBOL_GPL(smc_proto); struct proto smc_proto6 = { .name = "SMC6", .owner = THIS_MODULE, .keepalive = smc_set_keepalive, .hash = smc_hash_sk, .unhash = smc_unhash_sk, .release_cb = smc_release_cb, .obj_size = sizeof(struct smc_sock), .h.smc_hash = &smc_v6_hashinfo, .slab_flags = SLAB_TYPESAFE_BY_RCU, }; EXPORT_SYMBOL_GPL(smc_proto6); static void smc_fback_restore_callbacks(struct smc_sock *smc) { struct sock *clcsk = smc->clcsock->sk; write_lock_bh(&clcsk->sk_callback_lock); clcsk->sk_user_data = NULL; smc_clcsock_restore_cb(&clcsk->sk_state_change, &smc->clcsk_state_change); smc_clcsock_restore_cb(&clcsk->sk_data_ready, &smc->clcsk_data_ready); smc_clcsock_restore_cb(&clcsk->sk_write_space, &smc->clcsk_write_space); smc_clcsock_restore_cb(&clcsk->sk_error_report, &smc->clcsk_error_report); write_unlock_bh(&clcsk->sk_callback_lock); } static void smc_restore_fallback_changes(struct smc_sock *smc) { if (smc->clcsock->file) { /* non-accepted sockets have no file yet */ smc->clcsock->file->private_data = smc->sk.sk_socket; smc->clcsock->file = NULL; smc_fback_restore_callbacks(smc); } } static int __smc_release(struct smc_sock *smc) { struct sock *sk = &smc->sk; int rc = 0; if (!smc->use_fallback) { rc = smc_close_active(smc); sock_set_flag(sk, SOCK_DEAD); sk->sk_shutdown |= SHUTDOWN_MASK; } else { if (sk->sk_state != SMC_CLOSED) { if (sk->sk_state != SMC_LISTEN && sk->sk_state != SMC_INIT) sock_put(sk); /* passive closing */ if (sk->sk_state == SMC_LISTEN) { /* wake up clcsock accept */ rc = kernel_sock_shutdown(smc->clcsock, SHUT_RDWR); } sk->sk_state = SMC_CLOSED; sk->sk_state_change(sk); } smc_restore_fallback_changes(smc); } sk->sk_prot->unhash(sk); if (sk->sk_state == SMC_CLOSED) { if (smc->clcsock) { release_sock(sk); smc_clcsock_release(smc); lock_sock(sk); } if (!smc->use_fallback) smc_conn_free(&smc->conn); } return rc; } static int smc_release(struct socket *sock) { struct sock *sk = sock->sk; struct smc_sock *smc; int old_state, rc = 0; if (!sk) goto out; sock_hold(sk); /* sock_put below */ smc = smc_sk(sk); old_state = sk->sk_state; /* cleanup for a dangling non-blocking connect */ if (smc->connect_nonblock && old_state == SMC_INIT) tcp_abort(smc->clcsock->sk, ECONNABORTED); if (cancel_work_sync(&smc->connect_work)) sock_put(&smc->sk); /* sock_hold in smc_connect for passive closing */ if (sk->sk_state == SMC_LISTEN) /* smc_close_non_accepted() is called and acquires * sock lock for child sockets again */ lock_sock_nested(sk, SINGLE_DEPTH_NESTING); else lock_sock(sk); if (old_state == SMC_INIT && sk->sk_state == SMC_ACTIVE && !smc->use_fallback) smc_close_active_abort(smc); rc = __smc_release(smc); /* detach socket */ sock_orphan(sk); sock->sk = NULL; release_sock(sk); sock_put(sk); /* sock_hold above */ sock_put(sk); /* final sock_put */ out: return rc; } static void smc_destruct(struct sock *sk) { if (sk->sk_state != SMC_CLOSED) return; if (!sock_flag(sk, SOCK_DEAD)) return; } static struct sock *smc_sock_alloc(struct net *net, struct socket *sock, int protocol) { struct smc_sock *smc; struct proto *prot; struct sock *sk; prot = (protocol == SMCPROTO_SMC6) ? &smc_proto6 : &smc_proto; sk = sk_alloc(net, PF_SMC, GFP_KERNEL, prot, 0); if (!sk) return NULL; sock_init_data(sock, sk); /* sets sk_refcnt to 1 */ sk->sk_state = SMC_INIT; sk->sk_destruct = smc_destruct; sk->sk_protocol = protocol; WRITE_ONCE(sk->sk_sndbuf, 2 * READ_ONCE(net->smc.sysctl_wmem)); WRITE_ONCE(sk->sk_rcvbuf, 2 * READ_ONCE(net->smc.sysctl_rmem)); smc = smc_sk(sk); INIT_WORK(&smc->tcp_listen_work, smc_tcp_listen_work); INIT_WORK(&smc->connect_work, smc_connect_work); INIT_DELAYED_WORK(&smc->conn.tx_work, smc_tx_work); INIT_LIST_HEAD(&smc->accept_q); spin_lock_init(&smc->accept_q_lock); spin_lock_init(&smc->conn.send_lock); sk->sk_prot->hash(sk); mutex_init(&smc->clcsock_release_lock); smc_init_saved_callbacks(smc); return sk; } static int smc_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) { struct sockaddr_in *addr = (struct sockaddr_in *)uaddr; struct sock *sk = sock->sk; struct smc_sock *smc; int rc; smc = smc_sk(sk); /* replicate tests from inet_bind(), to be safe wrt. future changes */ rc = -EINVAL; if (addr_len < sizeof(struct sockaddr_in)) goto out; rc = -EAFNOSUPPORT; if (addr->sin_family != AF_INET && addr->sin_family != AF_INET6 && addr->sin_family != AF_UNSPEC) goto out; /* accept AF_UNSPEC (mapped to AF_INET) only if s_addr is INADDR_ANY */ if (addr->sin_family == AF_UNSPEC && addr->sin_addr.s_addr != htonl(INADDR_ANY)) goto out; lock_sock(sk); /* Check if socket is already active */ rc = -EINVAL; if (sk->sk_state != SMC_INIT || smc->connect_nonblock) goto out_rel; smc->clcsock->sk->sk_reuse = sk->sk_reuse; smc->clcsock->sk->sk_reuseport = sk->sk_reuseport; rc = kernel_bind(smc->clcsock, uaddr, addr_len); out_rel: release_sock(sk); out: return rc; } /* copy only relevant settings and flags of SOL_SOCKET level from smc to * clc socket (since smc is not called for these options from net/core) */ #define SK_FLAGS_SMC_TO_CLC ((1UL << SOCK_URGINLINE) | \ (1UL << SOCK_KEEPOPEN) | \ (1UL << SOCK_LINGER) | \ (1UL << SOCK_BROADCAST) | \ (1UL << SOCK_TIMESTAMP) | \ (1UL << SOCK_DBG) | \ (1UL << SOCK_RCVTSTAMP) | \ (1UL << SOCK_RCVTSTAMPNS) | \ (1UL << SOCK_LOCALROUTE) | \ (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE) | \ (1UL << SOCK_RXQ_OVFL) | \ (1UL << SOCK_WIFI_STATUS) | \ (1UL << SOCK_NOFCS) | \ (1UL << SOCK_FILTER_LOCKED) | \ (1UL << SOCK_TSTAMP_NEW)) /* if set, use value set by setsockopt() - else use IPv4 or SMC sysctl value */ static void smc_adjust_sock_bufsizes(struct sock *nsk, struct sock *osk, unsigned long mask) { struct net *nnet = sock_net(nsk); nsk->sk_userlocks = osk->sk_userlocks; if (osk->sk_userlocks & SOCK_SNDBUF_LOCK) { nsk->sk_sndbuf = osk->sk_sndbuf; } else { if (mask == SK_FLAGS_SMC_TO_CLC) WRITE_ONCE(nsk->sk_sndbuf, READ_ONCE(nnet->ipv4.sysctl_tcp_wmem[1])); else WRITE_ONCE(nsk->sk_sndbuf, 2 * READ_ONCE(nnet->smc.sysctl_wmem)); } if (osk->sk_userlocks & SOCK_RCVBUF_LOCK) { nsk->sk_rcvbuf = osk->sk_rcvbuf; } else { if (mask == SK_FLAGS_SMC_TO_CLC) WRITE_ONCE(nsk->sk_rcvbuf, READ_ONCE(nnet->ipv4.sysctl_tcp_rmem[1])); else WRITE_ONCE(nsk->sk_rcvbuf, 2 * READ_ONCE(nnet->smc.sysctl_rmem)); } } static void smc_copy_sock_settings(struct sock *nsk, struct sock *osk, unsigned long mask) { /* options we don't get control via setsockopt for */ nsk->sk_type = osk->sk_type; nsk->sk_sndtimeo = osk->sk_sndtimeo; nsk->sk_rcvtimeo = osk->sk_rcvtimeo; nsk->sk_mark = READ_ONCE(osk->sk_mark); nsk->sk_priority = osk->sk_priority; nsk->sk_rcvlowat = osk->sk_rcvlowat; nsk->sk_bound_dev_if = osk->sk_bound_dev_if; nsk->sk_err = osk->sk_err; nsk->sk_flags &= ~mask; nsk->sk_flags |= osk->sk_flags & mask; smc_adjust_sock_bufsizes(nsk, osk, mask); } static void smc_copy_sock_settings_to_clc(struct smc_sock *smc) { smc_copy_sock_settings(smc->clcsock->sk, &smc->sk, SK_FLAGS_SMC_TO_CLC); } #define SK_FLAGS_CLC_TO_SMC ((1UL << SOCK_URGINLINE) | \ (1UL << SOCK_KEEPOPEN) | \ (1UL << SOCK_LINGER) | \ (1UL << SOCK_DBG)) /* copy only settings and flags relevant for smc from clc to smc socket */ static void smc_copy_sock_settings_to_smc(struct smc_sock *smc) { smc_copy_sock_settings(&smc->sk, smc->clcsock->sk, SK_FLAGS_CLC_TO_SMC); } /* register the new vzalloced sndbuf on all links */ static int smcr_lgr_reg_sndbufs(struct smc_link *link, struct smc_buf_desc *snd_desc) { struct smc_link_group *lgr = link->lgr; int i, rc = 0; if (!snd_desc->is_vm) return -EINVAL; /* protect against parallel smcr_link_reg_buf() */ down_write(&lgr->llc_conf_mutex); for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) { if (!smc_link_active(&lgr->lnk[i])) continue; rc = smcr_link_reg_buf(&lgr->lnk[i], snd_desc); if (rc) break; } up_write(&lgr->llc_conf_mutex); return rc; } /* register the new rmb on all links */ static int smcr_lgr_reg_rmbs(struct smc_link *link, struct smc_buf_desc *rmb_desc) { struct smc_link_group *lgr = link->lgr; bool do_slow = false; int i, rc = 0; rc = smc_llc_flow_initiate(lgr, SMC_LLC_FLOW_RKEY); if (rc) return rc; down_read(&lgr->llc_conf_mutex); for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) { if (!smc_link_active(&lgr->lnk[i])) continue; if (!rmb_desc->is_reg_mr[link->link_idx]) { up_read(&lgr->llc_conf_mutex); goto slow_path; } } /* mr register already */ goto fast_path; slow_path: do_slow = true; /* protect against parallel smc_llc_cli_rkey_exchange() and * parallel smcr_link_reg_buf() */ down_write(&lgr->llc_conf_mutex); for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) { if (!smc_link_active(&lgr->lnk[i])) continue; rc = smcr_link_reg_buf(&lgr->lnk[i], rmb_desc); if (rc) goto out; } fast_path: /* exchange confirm_rkey msg with peer */ rc = smc_llc_do_confirm_rkey(link, rmb_desc); if (rc) { rc = -EFAULT; goto out; } rmb_desc->is_conf_rkey = true; out: do_slow ? up_write(&lgr->llc_conf_mutex) : up_read(&lgr->llc_conf_mutex); smc_llc_flow_stop(lgr, &lgr->llc_flow_lcl); return rc; } static int smcr_clnt_conf_first_link(struct smc_sock *smc) { struct smc_link *link = smc->conn.lnk; struct smc_llc_qentry *qentry; int rc; /* receive CONFIRM LINK request from server over RoCE fabric */ qentry = smc_llc_wait(link->lgr, NULL, SMC_LLC_WAIT_TIME, SMC_LLC_CONFIRM_LINK); if (!qentry) { struct smc_clc_msg_decline dclc; rc = smc_clc_wait_msg(smc, &dclc, sizeof(dclc), SMC_CLC_DECLINE, CLC_WAIT_TIME_SHORT); return rc == -EAGAIN ? SMC_CLC_DECL_TIMEOUT_CL : rc; } smc_llc_save_peer_uid(qentry); rc = smc_llc_eval_conf_link(qentry, SMC_LLC_REQ); smc_llc_flow_qentry_del(&link->lgr->llc_flow_lcl); if (rc) return SMC_CLC_DECL_RMBE_EC; rc = smc_ib_modify_qp_rts(link); if (rc) return SMC_CLC_DECL_ERR_RDYLNK; smc_wr_remember_qp_attr(link); /* reg the sndbuf if it was vzalloced */ if (smc->conn.sndbuf_desc->is_vm) { if (smcr_link_reg_buf(link, smc->conn.sndbuf_desc)) return SMC_CLC_DECL_ERR_REGBUF; } /* reg the rmb */ if (smcr_link_reg_buf(link, smc->conn.rmb_desc)) return SMC_CLC_DECL_ERR_REGBUF; /* confirm_rkey is implicit on 1st contact */ smc->conn.rmb_desc->is_conf_rkey = true; /* send CONFIRM LINK response over RoCE fabric */ rc = smc_llc_send_confirm_link(link, SMC_LLC_RESP); if (rc < 0) return SMC_CLC_DECL_TIMEOUT_CL; smc_llc_link_active(link); smcr_lgr_set_type(link->lgr, SMC_LGR_SINGLE); if (link->lgr->max_links > 1) { /* optional 2nd link, receive ADD LINK request from server */ qentry = smc_llc_wait(link->lgr, NULL, SMC_LLC_WAIT_TIME, SMC_LLC_ADD_LINK); if (!qentry) { struct smc_clc_msg_decline dclc; rc = smc_clc_wait_msg(smc, &dclc, sizeof(dclc), SMC_CLC_DECLINE, CLC_WAIT_TIME_SHORT); if (rc == -EAGAIN) rc = 0; /* no DECLINE received, go with one link */ return rc; } smc_llc_flow_qentry_clr(&link->lgr->llc_flow_lcl); smc_llc_cli_add_link(link, qentry); } return 0; } static bool smc_isascii(char *hostname) { int i; for (i = 0; i < SMC_MAX_HOSTNAME_LEN; i++) if (!isascii(hostname[i])) return false; return true; } static void smc_conn_save_peer_info_fce(struct smc_sock *smc, struct smc_clc_msg_accept_confirm *clc) { struct smc_clc_msg_accept_confirm_v2 *clc_v2 = (struct smc_clc_msg_accept_confirm_v2 *)clc; struct smc_clc_first_contact_ext *fce; int clc_v2_len; if (clc->hdr.version == SMC_V1 || !(clc->hdr.typev2 & SMC_FIRST_CONTACT_MASK)) return; if (smc->conn.lgr->is_smcd) { memcpy(smc->conn.lgr->negotiated_eid, clc_v2->d1.eid, SMC_MAX_EID_LEN); clc_v2_len = offsetofend(struct smc_clc_msg_accept_confirm_v2, d1); } else { memcpy(smc->conn.lgr->negotiated_eid, clc_v2->r1.eid, SMC_MAX_EID_LEN); clc_v2_len = offsetofend(struct smc_clc_msg_accept_confirm_v2, r1); } fce = (struct smc_clc_first_contact_ext *)(((u8 *)clc_v2) + clc_v2_len); smc->conn.lgr->peer_os = fce->os_type; smc->conn.lgr->peer_smc_release = fce->release; if (smc_isascii(fce->hostname)) memcpy(smc->conn.lgr->peer_hostname, fce->hostname, SMC_MAX_HOSTNAME_LEN); } static void smcr_conn_save_peer_info(struct smc_sock *smc, struct smc_clc_msg_accept_confirm *clc) { int bufsize = smc_uncompress_bufsize(clc->r0.rmbe_size); smc->conn.peer_rmbe_idx = clc->r0.rmbe_idx; smc->conn.local_tx_ctrl.token = ntohl(clc->r0.rmbe_alert_token); smc->conn.peer_rmbe_size = bufsize; atomic_set(&smc->conn.peer_rmbe_space, smc->conn.peer_rmbe_size); smc->conn.tx_off = bufsize * (smc->conn.peer_rmbe_idx - 1); } static void smcd_conn_save_peer_info(struct smc_sock *smc, struct smc_clc_msg_accept_confirm *clc) { int bufsize = smc_uncompress_bufsize(clc->d0.dmbe_size); smc->conn.peer_rmbe_idx = clc->d0.dmbe_idx; smc->conn.peer_token = clc->d0.token; /* msg header takes up space in the buffer */ smc->conn.peer_rmbe_size = bufsize - sizeof(struct smcd_cdc_msg); atomic_set(&smc->conn.peer_rmbe_space, smc->conn.peer_rmbe_size); smc->conn.tx_off = bufsize * smc->conn.peer_rmbe_idx; } static void smc_conn_save_peer_info(struct smc_sock *smc, struct smc_clc_msg_accept_confirm *clc) { if (smc->conn.lgr->is_smcd) smcd_conn_save_peer_info(smc, clc); else smcr_conn_save_peer_info(smc, clc); smc_conn_save_peer_info_fce(smc, clc); } static void smc_link_save_peer_info(struct smc_link *link, struct smc_clc_msg_accept_confirm *clc, struct smc_init_info *ini) { link->peer_qpn = ntoh24(clc->r0.qpn); memcpy(link->peer_gid, ini->peer_gid, SMC_GID_SIZE); memcpy(link->peer_mac, ini->peer_mac, sizeof(link->peer_mac)); link->peer_psn = ntoh24(clc->r0.psn); link->peer_mtu = clc->r0.qp_mtu; } static void smc_stat_inc_fback_rsn_cnt(struct smc_sock *smc, struct smc_stats_fback *fback_arr) { int cnt; for (cnt = 0; cnt < SMC_MAX_FBACK_RSN_CNT; cnt++) { if (fback_arr[cnt].fback_code == smc->fallback_rsn) { fback_arr[cnt].count++; break; } if (!fback_arr[cnt].fback_code) { fback_arr[cnt].fback_code = smc->fallback_rsn; fback_arr[cnt].count++; break; } } } static void smc_stat_fallback(struct smc_sock *smc) { struct net *net = sock_net(&smc->sk); mutex_lock(&net->smc.mutex_fback_rsn); if (smc->listen_smc) { smc_stat_inc_fback_rsn_cnt(smc, net->smc.fback_rsn->srv); net->smc.fback_rsn->srv_fback_cnt++; } else { smc_stat_inc_fback_rsn_cnt(smc, net->smc.fback_rsn->clnt); net->smc.fback_rsn->clnt_fback_cnt++; } mutex_unlock(&net->smc.mutex_fback_rsn); } /* must be called under rcu read lock */ static void smc_fback_wakeup_waitqueue(struct smc_sock *smc, void *key) { struct socket_wq *wq; __poll_t flags; wq = rcu_dereference(smc->sk.sk_wq); if (!skwq_has_sleeper(wq)) return; /* wake up smc sk->sk_wq */ if (!key) { /* sk_state_change */ wake_up_interruptible_all(&wq->wait); } else { flags = key_to_poll(key); if (flags & (EPOLLIN | EPOLLOUT)) /* sk_data_ready or sk_write_space */ wake_up_interruptible_sync_poll(&wq->wait, flags); else if (flags & EPOLLERR) /* sk_error_report */ wake_up_interruptible_poll(&wq->wait, flags); } } static int smc_fback_mark_woken(wait_queue_entry_t *wait, unsigned int mode, int sync, void *key) { struct smc_mark_woken *mark = container_of(wait, struct smc_mark_woken, wait_entry); mark->woken = true; mark->key = key; return 0; } static void smc_fback_forward_wakeup(struct smc_sock *smc, struct sock *clcsk, void (*clcsock_callback)(struct sock *sk)) { struct smc_mark_woken mark = { .woken = false }; struct socket_wq *wq; init_waitqueue_func_entry(&mark.wait_entry, smc_fback_mark_woken); rcu_read_lock(); wq = rcu_dereference(clcsk->sk_wq); if (!wq) goto out; add_wait_queue(sk_sleep(clcsk), &mark.wait_entry); clcsock_callback(clcsk); remove_wait_queue(sk_sleep(clcsk), &mark.wait_entry); if (mark.woken) smc_fback_wakeup_waitqueue(smc, mark.key); out: rcu_read_unlock(); } static void smc_fback_state_change(struct sock *clcsk) { struct smc_sock *smc; read_lock_bh(&clcsk->sk_callback_lock); smc = smc_clcsock_user_data(clcsk); if (smc) smc_fback_forward_wakeup(smc, clcsk, smc->clcsk_state_change); read_unlock_bh(&clcsk->sk_callback_lock); } static void smc_fback_data_ready(struct sock *clcsk) { struct smc_sock *smc; read_lock_bh(&clcsk->sk_callback_lock); smc = smc_clcsock_user_data(clcsk); if (smc) smc_fback_forward_wakeup(smc, clcsk, smc->clcsk_data_ready); read_unlock_bh(&clcsk->sk_callback_lock); } static void smc_fback_write_space(struct sock *clcsk) { struct smc_sock *smc; read_lock_bh(&clcsk->sk_callback_lock); smc = smc_clcsock_user_data(clcsk); if (smc) smc_fback_forward_wakeup(smc, clcsk, smc->clcsk_write_space); read_unlock_bh(&clcsk->sk_callback_lock); } static void smc_fback_error_report(struct sock *clcsk) { struct smc_sock *smc; read_lock_bh(&clcsk->sk_callback_lock); smc = smc_clcsock_user_data(clcsk); if (smc) smc_fback_forward_wakeup(smc, clcsk, smc->clcsk_error_report); read_unlock_bh(&clcsk->sk_callback_lock); } static void smc_fback_replace_callbacks(struct smc_sock *smc) { struct sock *clcsk = smc->clcsock->sk; write_lock_bh(&clcsk->sk_callback_lock); clcsk->sk_user_data = (void *)((uintptr_t)smc | SK_USER_DATA_NOCOPY); smc_clcsock_replace_cb(&clcsk->sk_state_change, smc_fback_state_change, &smc->clcsk_state_change); smc_clcsock_replace_cb(&clcsk->sk_data_ready, smc_fback_data_ready, &smc->clcsk_data_ready); smc_clcsock_replace_cb(&clcsk->sk_write_space, smc_fback_write_space, &smc->clcsk_write_space); smc_clcsock_replace_cb(&clcsk->sk_error_report, smc_fback_error_report, &smc->clcsk_error_report); write_unlock_bh(&clcsk->sk_callback_lock); } static int smc_switch_to_fallback(struct smc_sock *smc, int reason_code) { int rc = 0; mutex_lock(&smc->clcsock_release_lock); if (!smc->clcsock) { rc = -EBADF; goto out; } smc->use_fallback = true; smc->fallback_rsn = reason_code; smc_stat_fallback(smc); trace_smc_switch_to_fallback(smc, reason_code); if (smc->sk.sk_socket && smc->sk.sk_socket->file) { smc->clcsock->file = smc->sk.sk_socket->file; smc->clcsock->file->private_data = smc->clcsock; smc->clcsock->wq.fasync_list = smc->sk.sk_socket->wq.fasync_list; /* There might be some wait entries remaining * in smc sk->sk_wq and they should be woken up * as clcsock's wait queue is woken up. */ smc_fback_replace_callbacks(smc); } out: mutex_unlock(&smc->clcsock_release_lock); return rc; } /* fall back during connect */ static int smc_connect_fallback(struct smc_sock *smc, int reason_code) { struct net *net = sock_net(&smc->sk); int rc = 0; rc = smc_switch_to_fallback(smc, reason_code); if (rc) { /* fallback fails */ this_cpu_inc(net->smc.smc_stats->clnt_hshake_err_cnt); if (smc->sk.sk_state == SMC_INIT) sock_put(&smc->sk); /* passive closing */ return rc; } smc_copy_sock_settings_to_clc(smc); smc->connect_nonblock = 0; if (smc->sk.sk_state == SMC_INIT) smc->sk.sk_state = SMC_ACTIVE; return 0; } /* decline and fall back during connect */ static int smc_connect_decline_fallback(struct smc_sock *smc, int reason_code, u8 version) { struct net *net = sock_net(&smc->sk); int rc; if (reason_code < 0) { /* error, fallback is not possible */ this_cpu_inc(net->smc.smc_stats->clnt_hshake_err_cnt); if (smc->sk.sk_state == SMC_INIT) sock_put(&smc->sk); /* passive closing */ return reason_code; } if (reason_code != SMC_CLC_DECL_PEERDECL) { rc = smc_clc_send_decline(smc, reason_code, version); if (rc < 0) { this_cpu_inc(net->smc.smc_stats->clnt_hshake_err_cnt); if (smc->sk.sk_state == SMC_INIT) sock_put(&smc->sk); /* passive closing */ return rc; } } return smc_connect_fallback(smc, reason_code); } static void smc_conn_abort(struct smc_sock *smc, int local_first) { struct smc_connection *conn = &smc->conn; struct smc_link_group *lgr = conn->lgr; bool lgr_valid = false; if (smc_conn_lgr_valid(conn)) lgr_valid = true; smc_conn_free(conn); if (local_first && lgr_valid) smc_lgr_cleanup_early(lgr); } /* check if there is a rdma device available for this connection. */ /* called for connect and listen */ static int smc_find_rdma_device(struct smc_sock *smc, struct smc_init_info *ini) { /* PNET table look up: search active ib_device and port * within same PNETID that also contains the ethernet device * used for the internal TCP socket */ smc_pnet_find_roce_resource(smc->clcsock->sk, ini); if (!ini->check_smcrv2 && !ini->ib_dev) return SMC_CLC_DECL_NOSMCRDEV; if (ini->check_smcrv2 && !ini->smcrv2.ib_dev_v2) return SMC_CLC_DECL_NOSMCRDEV; return 0; } /* check if there is an ISM device available for this connection. */ /* called for connect and listen */ static int smc_find_ism_device(struct smc_sock *smc, struct smc_init_info *ini) { /* Find ISM device with same PNETID as connecting interface */ smc_pnet_find_ism_resource(smc->clcsock->sk, ini); if (!ini->ism_dev[0]) return SMC_CLC_DECL_NOSMCDDEV; else ini->ism_chid[0] = smc_ism_get_chid(ini->ism_dev[0]); return 0; } /* is chid unique for the ism devices that are already determined? */ static bool smc_find_ism_v2_is_unique_chid(u16 chid, struct smc_init_info *ini, int cnt) { int i = (!ini->ism_dev[0]) ? 1 : 0; for (; i < cnt; i++) if (ini->ism_chid[i] == chid) return false; return true; } /* determine possible V2 ISM devices (either without PNETID or with PNETID plus * PNETID matching net_device) */ static int smc_find_ism_v2_device_clnt(struct smc_sock *smc, struct smc_init_info *ini) { int rc = SMC_CLC_DECL_NOSMCDDEV; struct smcd_dev *smcd; int i = 1; u16 chid; if (smcd_indicated(ini->smc_type_v1)) rc = 0; /* already initialized for V1 */ mutex_lock(&smcd_dev_list.mutex); list_for_each_entry(smcd, &smcd_dev_list.list, list) { if (smcd->going_away || smcd == ini->ism_dev[0]) continue; chid = smc_ism_get_chid(smcd); if (!smc_find_ism_v2_is_unique_chid(chid, ini, i)) continue; if (!smc_pnet_is_pnetid_set(smcd->pnetid) || smc_pnet_is_ndev_pnetid(sock_net(&smc->sk), smcd->pnetid)) { ini->ism_dev[i] = smcd; ini->ism_chid[i] = chid; ini->is_smcd = true; rc = 0; i++; if (i > SMC_MAX_ISM_DEVS) break; } } mutex_unlock(&smcd_dev_list.mutex); ini->ism_offered_cnt = i - 1; if (!ini->ism_dev[0] && !ini->ism_dev[1]) ini->smcd_version = 0; return rc; } /* Check for VLAN ID and register it on ISM device just for CLC handshake */ static int smc_connect_ism_vlan_setup(struct smc_sock *smc, struct smc_init_info *ini) { if (ini->vlan_id && smc_ism_get_vlan(ini->ism_dev[0], ini->vlan_id)) return SMC_CLC_DECL_ISMVLANERR; return 0; } static int smc_find_proposal_devices(struct smc_sock *smc, struct smc_init_info *ini) { int rc = 0; /* check if there is an ism device available */ if (!(ini->smcd_version & SMC_V1) || smc_find_ism_device(smc, ini) || smc_connect_ism_vlan_setup(smc, ini)) ini->smcd_version &= ~SMC_V1; /* else ISM V1 is supported for this connection */ /* check if there is an rdma device available */ if (!(ini->smcr_version & SMC_V1) || smc_find_rdma_device(smc, ini)) ini->smcr_version &= ~SMC_V1; /* else RDMA is supported for this connection */ ini->smc_type_v1 = smc_indicated_type(ini->smcd_version & SMC_V1, ini->smcr_version & SMC_V1); /* check if there is an ism v2 device available */ if (!(ini->smcd_version & SMC_V2) || !smc_ism_is_v2_capable() || smc_find_ism_v2_device_clnt(smc, ini)) ini->smcd_version &= ~SMC_V2; /* check if there is an rdma v2 device available */ ini->check_smcrv2 = true; ini->smcrv2.saddr = smc->clcsock->sk->sk_rcv_saddr; if (!(ini->smcr_version & SMC_V2) || smc->clcsock->sk->sk_family != AF_INET || !smc_clc_ueid_count() || smc_find_rdma_device(smc, ini)) ini->smcr_version &= ~SMC_V2; ini->check_smcrv2 = false; ini->smc_type_v2 = smc_indicated_type(ini->smcd_version & SMC_V2, ini->smcr_version & SMC_V2); /* if neither ISM nor RDMA are supported, fallback */ if (ini->smc_type_v1 == SMC_TYPE_N && ini->smc_type_v2 == SMC_TYPE_N) rc = SMC_CLC_DECL_NOSMCDEV; return rc; } /* cleanup temporary VLAN ID registration used for CLC handshake. If ISM is * used, the VLAN ID will be registered again during the connection setup. */ static int smc_connect_ism_vlan_cleanup(struct smc_sock *smc, struct smc_init_info *ini) { if (!smcd_indicated(ini->smc_type_v1)) return 0; if (ini->vlan_id && smc_ism_put_vlan(ini->ism_dev[0], ini->vlan_id)) return SMC_CLC_DECL_CNFERR; return 0; } #define SMC_CLC_MAX_ACCEPT_LEN \ (sizeof(struct smc_clc_msg_accept_confirm_v2) + \ sizeof(struct smc_clc_first_contact_ext_v2x) + \ sizeof(struct smc_clc_msg_trail)) /* CLC handshake during connect */ static int smc_connect_clc(struct smc_sock *smc, struct smc_clc_msg_accept_confirm_v2 *aclc2, struct smc_init_info *ini) { int rc = 0; /* do inband token exchange */ rc = smc_clc_send_proposal(smc, ini); if (rc) return rc; /* receive SMC Accept CLC message */ return smc_clc_wait_msg(smc, aclc2, SMC_CLC_MAX_ACCEPT_LEN, SMC_CLC_ACCEPT, CLC_WAIT_TIME); } void smc_fill_gid_list(struct smc_link_group *lgr, struct smc_gidlist *gidlist, struct smc_ib_device *known_dev, u8 *known_gid) { struct smc_init_info *alt_ini = NULL; memset(gidlist, 0, sizeof(*gidlist)); memcpy(gidlist->list[gidlist->len++], known_gid, SMC_GID_SIZE); alt_ini = kzalloc(sizeof(*alt_ini), GFP_KERNEL); if (!alt_ini) goto out; alt_ini->vlan_id = lgr->vlan_id; alt_ini->check_smcrv2 = true; alt_ini->smcrv2.saddr = lgr->saddr; smc_pnet_find_alt_roce(lgr, alt_ini, known_dev); if (!alt_ini->smcrv2.ib_dev_v2) goto out; memcpy(gidlist->list[gidlist->len++], alt_ini->smcrv2.ib_gid_v2, SMC_GID_SIZE); out: kfree(alt_ini); } static int smc_connect_rdma_v2_prepare(struct smc_sock *smc, struct smc_clc_msg_accept_confirm *aclc, struct smc_init_info *ini) { struct smc_clc_msg_accept_confirm_v2 *clc_v2 = (struct smc_clc_msg_accept_confirm_v2 *)aclc; struct smc_clc_first_contact_ext *fce = smc_get_clc_first_contact_ext(clc_v2, false); int rc; if (!ini->first_contact_peer || aclc->hdr.version == SMC_V1) return 0; if (fce->v2_direct) { memcpy(ini->smcrv2.nexthop_mac, &aclc->r0.lcl.mac, ETH_ALEN); ini->smcrv2.uses_gateway = false; } else { if (smc_ib_find_route(smc->clcsock->sk->sk_rcv_saddr, smc_ib_gid_to_ipv4(aclc->r0.lcl.gid), ini->smcrv2.nexthop_mac, &ini->smcrv2.uses_gateway)) return SMC_CLC_DECL_NOROUTE; if (!ini->smcrv2.uses_gateway) { /* mismatch: peer claims indirect, but its direct */ return SMC_CLC_DECL_NOINDIRECT; } } ini->release_nr = fce->release; rc = smc_clc_clnt_v2x_features_validate(fce, ini); if (rc) return rc; return 0; } /* setup for RDMA connection of client */ static int smc_connect_rdma(struct smc_sock *smc, struct smc_clc_msg_accept_confirm *aclc, struct smc_init_info *ini) { int i, reason_code = 0; struct smc_link *link; u8 *eid = NULL; ini->is_smcd = false; ini->ib_clcqpn = ntoh24(aclc->r0.qpn); ini->first_contact_peer = aclc->hdr.typev2 & SMC_FIRST_CONTACT_MASK; memcpy(ini->peer_systemid, aclc->r0.lcl.id_for_peer, SMC_SYSTEMID_LEN); memcpy(ini->peer_gid, aclc->r0.lcl.gid, SMC_GID_SIZE); memcpy(ini->peer_mac, aclc->r0.lcl.mac, ETH_ALEN); ini->max_conns = SMC_CONN_PER_LGR_MAX; ini->max_links = SMC_LINKS_ADD_LNK_MAX; reason_code = smc_connect_rdma_v2_prepare(smc, aclc, ini); if (reason_code) return reason_code; mutex_lock(&smc_client_lgr_pending); reason_code = smc_conn_create(smc, ini); if (reason_code) { mutex_unlock(&smc_client_lgr_pending); return reason_code; } smc_conn_save_peer_info(smc, aclc); if (ini->first_contact_local) { link = smc->conn.lnk; } else { /* set link that was assigned by server */ link = NULL; for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) { struct smc_link *l = &smc->conn.lgr->lnk[i]; if (l->peer_qpn == ntoh24(aclc->r0.qpn) && !memcmp(l->peer_gid, &aclc->r0.lcl.gid, SMC_GID_SIZE) && (aclc->hdr.version > SMC_V1 || !memcmp(l->peer_mac, &aclc->r0.lcl.mac, sizeof(l->peer_mac)))) { link = l; break; } } if (!link) { reason_code = SMC_CLC_DECL_NOSRVLINK; goto connect_abort; } smc_switch_link_and_count(&smc->conn, link); } /* create send buffer and rmb */ if (smc_buf_create(smc, false)) { reason_code = SMC_CLC_DECL_MEM; goto connect_abort; } if (ini->first_contact_local) smc_link_save_peer_info(link, aclc, ini); if (smc_rmb_rtoken_handling(&smc->conn, link, aclc)) { reason_code = SMC_CLC_DECL_ERR_RTOK; goto connect_abort; } smc_close_init(smc); smc_rx_init(smc); if (ini->first_contact_local) { if (smc_ib_ready_link(link)) { reason_code = SMC_CLC_DECL_ERR_RDYLNK; goto connect_abort; } } else { /* reg sendbufs if they were vzalloced */ if (smc->conn.sndbuf_desc->is_vm) { if (smcr_lgr_reg_sndbufs(link, smc->conn.sndbuf_desc)) { reason_code = SMC_CLC_DECL_ERR_REGBUF; goto connect_abort; } } if (smcr_lgr_reg_rmbs(link, smc->conn.rmb_desc)) { reason_code = SMC_CLC_DECL_ERR_REGBUF; goto connect_abort; } } if (aclc->hdr.version > SMC_V1) { struct smc_clc_msg_accept_confirm_v2 *clc_v2 = (struct smc_clc_msg_accept_confirm_v2 *)aclc; eid = clc_v2->r1.eid; if (ini->first_contact_local) smc_fill_gid_list(link->lgr, &ini->smcrv2.gidlist, link->smcibdev, link->gid); } reason_code = smc_clc_send_confirm(smc, ini->first_contact_local, aclc->hdr.version, eid, ini); if (reason_code) goto connect_abort; smc_tx_init(smc); if (ini->first_contact_local) { /* QP confirmation over RoCE fabric */ smc_llc_flow_initiate(link->lgr, SMC_LLC_FLOW_ADD_LINK); reason_code = smcr_clnt_conf_first_link(smc); smc_llc_flow_stop(link->lgr, &link->lgr->llc_flow_lcl); if (reason_code) goto connect_abort; } mutex_unlock(&smc_client_lgr_pending); smc_copy_sock_settings_to_clc(smc); smc->connect_nonblock = 0; if (smc->sk.sk_state == SMC_INIT) smc->sk.sk_state = SMC_ACTIVE; return 0; connect_abort: smc_conn_abort(smc, ini->first_contact_local); mutex_unlock(&smc_client_lgr_pending); smc->connect_nonblock = 0; return reason_code; } /* The server has chosen one of the proposed ISM devices for the communication. * Determine from the CHID of the received CLC ACCEPT the ISM device chosen. */ static int smc_v2_determine_accepted_chid(struct smc_clc_msg_accept_confirm_v2 *aclc, struct smc_init_info *ini) { int i; for (i = 0; i < ini->ism_offered_cnt + 1; i++) { if (ini->ism_chid[i] == ntohs(aclc->d1.chid)) { ini->ism_selected = i; return 0; } } return -EPROTO; } /* setup for ISM connection of client */ static int smc_connect_ism(struct smc_sock *smc, struct smc_clc_msg_accept_confirm *aclc, struct smc_init_info *ini) { u8 *eid = NULL; int rc = 0; ini->is_smcd = true; ini->first_contact_peer = aclc->hdr.typev2 & SMC_FIRST_CONTACT_MASK; if (aclc->hdr.version == SMC_V2) { struct smc_clc_msg_accept_confirm_v2 *aclc_v2 = (struct smc_clc_msg_accept_confirm_v2 *)aclc; if (ini->first_contact_peer) { struct smc_clc_first_contact_ext *fce = smc_get_clc_first_contact_ext(aclc_v2, true); ini->release_nr = fce->release; rc = smc_clc_clnt_v2x_features_validate(fce, ini); if (rc) return rc; } rc = smc_v2_determine_accepted_chid(aclc_v2, ini); if (rc) return rc; } ini->ism_peer_gid[ini->ism_selected] = aclc->d0.gid; /* there is only one lgr role for SMC-D; use server lock */ mutex_lock(&smc_server_lgr_pending); rc = smc_conn_create(smc, ini); if (rc) { mutex_unlock(&smc_server_lgr_pending); return rc; } /* Create send and receive buffers */ rc = smc_buf_create(smc, true); if (rc) { rc = (rc == -ENOSPC) ? SMC_CLC_DECL_MAX_DMB : SMC_CLC_DECL_MEM; goto connect_abort; } smc_conn_save_peer_info(smc, aclc); smc_close_init(smc); smc_rx_init(smc); smc_tx_init(smc); if (aclc->hdr.version > SMC_V1) { struct smc_clc_msg_accept_confirm_v2 *clc_v2 = (struct smc_clc_msg_accept_confirm_v2 *)aclc; eid = clc_v2->d1.eid; } rc = smc_clc_send_confirm(smc, ini->first_contact_local, aclc->hdr.version, eid, ini); if (rc) goto connect_abort; mutex_unlock(&smc_server_lgr_pending); smc_copy_sock_settings_to_clc(smc); smc->connect_nonblock = 0; if (smc->sk.sk_state == SMC_INIT) smc->sk.sk_state = SMC_ACTIVE; return 0; connect_abort: smc_conn_abort(smc, ini->first_contact_local); mutex_unlock(&smc_server_lgr_pending); smc->connect_nonblock = 0; return rc; } /* check if received accept type and version matches a proposed one */ static int smc_connect_check_aclc(struct smc_init_info *ini, struct smc_clc_msg_accept_confirm *aclc) { if (aclc->hdr.typev1 != SMC_TYPE_R && aclc->hdr.typev1 != SMC_TYPE_D) return SMC_CLC_DECL_MODEUNSUPP; if (aclc->hdr.version >= SMC_V2) { if ((aclc->hdr.typev1 == SMC_TYPE_R && !smcr_indicated(ini->smc_type_v2)) || (aclc->hdr.typev1 == SMC_TYPE_D && !smcd_indicated(ini->smc_type_v2))) return SMC_CLC_DECL_MODEUNSUPP; } else { if ((aclc->hdr.typev1 == SMC_TYPE_R && !smcr_indicated(ini->smc_type_v1)) || (aclc->hdr.typev1 == SMC_TYPE_D && !smcd_indicated(ini->smc_type_v1))) return SMC_CLC_DECL_MODEUNSUPP; } return 0; } /* perform steps before actually connecting */ static int __smc_connect(struct smc_sock *smc) { u8 version = smc_ism_is_v2_capable() ? SMC_V2 : SMC_V1; struct smc_clc_msg_accept_confirm_v2 *aclc2; struct smc_clc_msg_accept_confirm *aclc; struct smc_init_info *ini = NULL; u8 *buf = NULL; int rc = 0; if (smc->use_fallback) return smc_connect_fallback(smc, smc->fallback_rsn); /* if peer has not signalled SMC-capability, fall back */ if (!tcp_sk(smc->clcsock->sk)->syn_smc) return smc_connect_fallback(smc, SMC_CLC_DECL_PEERNOSMC); /* IPSec connections opt out of SMC optimizations */ if (using_ipsec(smc)) return smc_connect_decline_fallback(smc, SMC_CLC_DECL_IPSEC, version); ini = kzalloc(sizeof(*ini), GFP_KERNEL); if (!ini) return smc_connect_decline_fallback(smc, SMC_CLC_DECL_MEM, version); ini->smcd_version = SMC_V1 | SMC_V2; ini->smcr_version = SMC_V1 | SMC_V2; ini->smc_type_v1 = SMC_TYPE_B; ini->smc_type_v2 = SMC_TYPE_B; /* get vlan id from IP device */ if (smc_vlan_by_tcpsk(smc->clcsock, ini)) { ini->smcd_version &= ~SMC_V1; ini->smcr_version = 0; ini->smc_type_v1 = SMC_TYPE_N; if (!ini->smcd_version) { rc = SMC_CLC_DECL_GETVLANERR; goto fallback; } } rc = smc_find_proposal_devices(smc, ini); if (rc) goto fallback; buf = kzalloc(SMC_CLC_MAX_ACCEPT_LEN, GFP_KERNEL); if (!buf) { rc = SMC_CLC_DECL_MEM; goto fallback; } aclc2 = (struct smc_clc_msg_accept_confirm_v2 *)buf; aclc = (struct smc_clc_msg_accept_confirm *)aclc2; /* perform CLC handshake */ rc = smc_connect_clc(smc, aclc2, ini); if (rc) { /* -EAGAIN on timeout, see tcp_recvmsg() */ if (rc == -EAGAIN) { rc = -ETIMEDOUT; smc->sk.sk_err = ETIMEDOUT; } goto vlan_cleanup; } /* check if smc modes and versions of CLC proposal and accept match */ rc = smc_connect_check_aclc(ini, aclc); version = aclc->hdr.version == SMC_V1 ? SMC_V1 : SMC_V2; if (rc) goto vlan_cleanup; /* depending on previous steps, connect using rdma or ism */ if (aclc->hdr.typev1 == SMC_TYPE_R) { ini->smcr_version = version; rc = smc_connect_rdma(smc, aclc, ini); } else if (aclc->hdr.typev1 == SMC_TYPE_D) { ini->smcd_version = version; rc = smc_connect_ism(smc, aclc, ini); } if (rc) goto vlan_cleanup; SMC_STAT_CLNT_SUCC_INC(sock_net(smc->clcsock->sk), aclc); smc_connect_ism_vlan_cleanup(smc, ini); kfree(buf); kfree(ini); return 0; vlan_cleanup: smc_connect_ism_vlan_cleanup(smc, ini); kfree(buf); fallback: kfree(ini); return smc_connect_decline_fallback(smc, rc, version); } static void smc_connect_work(struct work_struct *work) { struct smc_sock *smc = container_of(work, struct smc_sock, connect_work); long timeo = smc->sk.sk_sndtimeo; int rc = 0; if (!timeo) timeo = MAX_SCHEDULE_TIMEOUT; lock_sock(smc->clcsock->sk); if (smc->clcsock->sk->sk_err) { smc->sk.sk_err = smc->clcsock->sk->sk_err; } else if ((1 << smc->clcsock->sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) { rc = sk_stream_wait_connect(smc->clcsock->sk, &timeo); if ((rc == -EPIPE) && ((1 << smc->clcsock->sk->sk_state) & (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT))) rc = 0; } release_sock(smc->clcsock->sk); lock_sock(&smc->sk); if (rc != 0 || smc->sk.sk_err) { smc->sk.sk_state = SMC_CLOSED; if (rc == -EPIPE || rc == -EAGAIN) smc->sk.sk_err = EPIPE; else if (rc == -ECONNREFUSED) smc->sk.sk_err = ECONNREFUSED; else if (signal_pending(current)) smc->sk.sk_err = -sock_intr_errno(timeo); sock_put(&smc->sk); /* passive closing */ goto out; } rc = __smc_connect(smc); if (rc < 0) smc->sk.sk_err = -rc; out: if (!sock_flag(&smc->sk, SOCK_DEAD)) { if (smc->sk.sk_err) { smc->sk.sk_state_change(&smc->sk); } else { /* allow polling before and after fallback decision */ smc->clcsock->sk->sk_write_space(smc->clcsock->sk); smc->sk.sk_write_space(&smc->sk); } } release_sock(&smc->sk); } static int smc_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags) { struct sock *sk = sock->sk; struct smc_sock *smc; int rc = -EINVAL; smc = smc_sk(sk); /* separate smc parameter checking to be safe */ if (alen < sizeof(addr->sa_family)) goto out_err; if (addr->sa_family != AF_INET && addr->sa_family != AF_INET6) goto out_err; lock_sock(sk); switch (sock->state) { default: rc = -EINVAL; goto out; case SS_CONNECTED: rc = sk->sk_state == SMC_ACTIVE ? -EISCONN : -EINVAL; goto out; case SS_CONNECTING: if (sk->sk_state == SMC_ACTIVE) goto connected; break; case SS_UNCONNECTED: sock->state = SS_CONNECTING; break; } switch (sk->sk_state) { default: goto out; case SMC_CLOSED: rc = sock_error(sk) ? : -ECONNABORTED; sock->state = SS_UNCONNECTED; goto out; case SMC_ACTIVE: rc = -EISCONN; goto out; case SMC_INIT: break; } smc_copy_sock_settings_to_clc(smc); tcp_sk(smc->clcsock->sk)->syn_smc = 1; if (smc->connect_nonblock) { rc = -EALREADY; goto out; } rc = kernel_connect(smc->clcsock, addr, alen, flags); if (rc && rc != -EINPROGRESS) goto out; if (smc->use_fallback) { sock->state = rc ? SS_CONNECTING : SS_CONNECTED; goto out; } sock_hold(&smc->sk); /* sock put in passive closing */ if (flags & O_NONBLOCK) { if (queue_work(smc_hs_wq, &smc->connect_work)) smc->connect_nonblock = 1; rc = -EINPROGRESS; goto out; } else { rc = __smc_connect(smc); if (rc < 0) goto out; } connected: rc = 0; sock->state = SS_CONNECTED; out: release_sock(sk); out_err: return rc; } static int smc_clcsock_accept(struct smc_sock *lsmc, struct smc_sock **new_smc) { struct socket *new_clcsock = NULL; struct sock *lsk = &lsmc->sk; struct sock *new_sk; int rc = -EINVAL; release_sock(lsk); new_sk = smc_sock_alloc(sock_net(lsk), NULL, lsk->sk_protocol); if (!new_sk) { rc = -ENOMEM; lsk->sk_err = ENOMEM; *new_smc = NULL; lock_sock(lsk); goto out; } *new_smc = smc_sk(new_sk); mutex_lock(&lsmc->clcsock_release_lock); if (lsmc->clcsock) rc = kernel_accept(lsmc->clcsock, &new_clcsock, SOCK_NONBLOCK); mutex_unlock(&lsmc->clcsock_release_lock); lock_sock(lsk); if (rc < 0 && rc != -EAGAIN) lsk->sk_err = -rc; if (rc < 0 || lsk->sk_state == SMC_CLOSED) { new_sk->sk_prot->unhash(new_sk); if (new_clcsock) sock_release(new_clcsock); new_sk->sk_state = SMC_CLOSED; sock_set_flag(new_sk, SOCK_DEAD); sock_put(new_sk); /* final */ *new_smc = NULL; goto out; } /* new clcsock has inherited the smc listen-specific sk_data_ready * function; switch it back to the original sk_data_ready function */ new_clcsock->sk->sk_data_ready = lsmc->clcsk_data_ready; /* if new clcsock has also inherited the fallback-specific callback * functions, switch them back to the original ones. */ if (lsmc->use_fallback) { if (lsmc->clcsk_state_change) new_clcsock->sk->sk_state_change = lsmc->clcsk_state_change; if (lsmc->clcsk_write_space) new_clcsock->sk->sk_write_space = lsmc->clcsk_write_space; if (lsmc->clcsk_error_report) new_clcsock->sk->sk_error_report = lsmc->clcsk_error_report; } (*new_smc)->clcsock = new_clcsock; out: return rc; } /* add a just created sock to the accept queue of the listen sock as * candidate for a following socket accept call from user space */ static void smc_accept_enqueue(struct sock *parent, struct sock *sk) { struct smc_sock *par = smc_sk(parent); sock_hold(sk); /* sock_put in smc_accept_unlink () */ spin_lock(&par->accept_q_lock); list_add_tail(&smc_sk(sk)->accept_q, &par->accept_q); spin_unlock(&par->accept_q_lock); sk_acceptq_added(parent); } /* remove a socket from the accept queue of its parental listening socket */ static void smc_accept_unlink(struct sock *sk) { struct smc_sock *par = smc_sk(sk)->listen_smc; spin_lock(&par->accept_q_lock); list_del_init(&smc_sk(sk)->accept_q); spin_unlock(&par->accept_q_lock); sk_acceptq_removed(&smc_sk(sk)->listen_smc->sk); sock_put(sk); /* sock_hold in smc_accept_enqueue */ } /* remove a sock from the accept queue to bind it to a new socket created * for a socket accept call from user space */ struct sock *smc_accept_dequeue(struct sock *parent, struct socket *new_sock) { struct smc_sock *isk, *n; struct sock *new_sk; list_for_each_entry_safe(isk, n, &smc_sk(parent)->accept_q, accept_q) { new_sk = (struct sock *)isk; smc_accept_unlink(new_sk); if (new_sk->sk_state == SMC_CLOSED) { new_sk->sk_prot->unhash(new_sk); if (isk->clcsock) { sock_release(isk->clcsock); isk->clcsock = NULL; } sock_put(new_sk); /* final */ continue; } if (new_sock) { sock_graft(new_sk, new_sock); new_sock->state = SS_CONNECTED; if (isk->use_fallback) { smc_sk(new_sk)->clcsock->file = new_sock->file; isk->clcsock->file->private_data = isk->clcsock; } } return new_sk; } return NULL; } /* clean up for a created but never accepted sock */ void smc_close_non_accepted(struct sock *sk) { struct smc_sock *smc = smc_sk(sk); sock_hold(sk); /* sock_put below */ lock_sock(sk); if (!sk->sk_lingertime) /* wait for peer closing */ WRITE_ONCE(sk->sk_lingertime, SMC_MAX_STREAM_WAIT_TIMEOUT); __smc_release(smc); release_sock(sk); sock_put(sk); /* sock_hold above */ sock_put(sk); /* final sock_put */ } static int smcr_serv_conf_first_link(struct smc_sock *smc) { struct smc_link *link = smc->conn.lnk; struct smc_llc_qentry *qentry; int rc; /* reg the sndbuf if it was vzalloced*/ if (smc->conn.sndbuf_desc->is_vm) { if (smcr_link_reg_buf(link, smc->conn.sndbuf_desc)) return SMC_CLC_DECL_ERR_REGBUF; } /* reg the rmb */ if (smcr_link_reg_buf(link, smc->conn.rmb_desc)) return SMC_CLC_DECL_ERR_REGBUF; /* send CONFIRM LINK request to client over the RoCE fabric */ rc = smc_llc_send_confirm_link(link, SMC_LLC_REQ); if (rc < 0) return SMC_CLC_DECL_TIMEOUT_CL; /* receive CONFIRM LINK response from client over the RoCE fabric */ qentry = smc_llc_wait(link->lgr, link, SMC_LLC_WAIT_TIME, SMC_LLC_CONFIRM_LINK); if (!qentry) { struct smc_clc_msg_decline dclc; rc = smc_clc_wait_msg(smc, &dclc, sizeof(dclc), SMC_CLC_DECLINE, CLC_WAIT_TIME_SHORT); return rc == -EAGAIN ? SMC_CLC_DECL_TIMEOUT_CL : rc; } smc_llc_save_peer_uid(qentry); rc = smc_llc_eval_conf_link(qentry, SMC_LLC_RESP); smc_llc_flow_qentry_del(&link->lgr->llc_flow_lcl); if (rc) return SMC_CLC_DECL_RMBE_EC; /* confirm_rkey is implicit on 1st contact */ smc->conn.rmb_desc->is_conf_rkey = true; smc_llc_link_active(link); smcr_lgr_set_type(link->lgr, SMC_LGR_SINGLE); if (link->lgr->max_links > 1) { down_write(&link->lgr->llc_conf_mutex); /* initial contact - try to establish second link */ smc_llc_srv_add_link(link, NULL); up_write(&link->lgr->llc_conf_mutex); } return 0; } /* listen worker: finish */ static void smc_listen_out(struct smc_sock *new_smc) { struct smc_sock *lsmc = new_smc->listen_smc; struct sock *newsmcsk = &new_smc->sk; if (tcp_sk(new_smc->clcsock->sk)->syn_smc) atomic_dec(&lsmc->queued_smc_hs); if (lsmc->sk.sk_state == SMC_LISTEN) { lock_sock_nested(&lsmc->sk, SINGLE_DEPTH_NESTING); smc_accept_enqueue(&lsmc->sk, newsmcsk); release_sock(&lsmc->sk); } else { /* no longer listening */ smc_close_non_accepted(newsmcsk); } /* Wake up accept */ lsmc->sk.sk_data_ready(&lsmc->sk); sock_put(&lsmc->sk); /* sock_hold in smc_tcp_listen_work */ } /* listen worker: finish in state connected */ static void smc_listen_out_connected(struct smc_sock *new_smc) { struct sock *newsmcsk = &new_smc->sk; if (newsmcsk->sk_state == SMC_INIT) newsmcsk->sk_state = SMC_ACTIVE; smc_listen_out(new_smc); } /* listen worker: finish in error state */ static void smc_listen_out_err(struct smc_sock *new_smc) { struct sock *newsmcsk = &new_smc->sk; struct net *net = sock_net(newsmcsk); this_cpu_inc(net->smc.smc_stats->srv_hshake_err_cnt); if (newsmcsk->sk_state == SMC_INIT) sock_put(&new_smc->sk); /* passive closing */ newsmcsk->sk_state = SMC_CLOSED; smc_listen_out(new_smc); } /* listen worker: decline and fall back if possible */ static void smc_listen_decline(struct smc_sock *new_smc, int reason_code, int local_first, u8 version) { /* RDMA setup failed, switch back to TCP */ smc_conn_abort(new_smc, local_first); if (reason_code < 0 || smc_switch_to_fallback(new_smc, reason_code)) { /* error, no fallback possible */ smc_listen_out_err(new_smc); return; } if (reason_code && reason_code != SMC_CLC_DECL_PEERDECL) { if (smc_clc_send_decline(new_smc, reason_code, version) < 0) { smc_listen_out_err(new_smc); return; } } smc_listen_out_connected(new_smc); } /* listen worker: version checking */ static int smc_listen_v2_check(struct smc_sock *new_smc, struct smc_clc_msg_proposal *pclc, struct smc_init_info *ini) { struct smc_clc_smcd_v2_extension *pclc_smcd_v2_ext; struct smc_clc_v2_extension *pclc_v2_ext; int rc = SMC_CLC_DECL_PEERNOSMC; ini->smc_type_v1 = pclc->hdr.typev1; ini->smc_type_v2 = pclc->hdr.typev2; ini->smcd_version = smcd_indicated(ini->smc_type_v1) ? SMC_V1 : 0; ini->smcr_version = smcr_indicated(ini->smc_type_v1) ? SMC_V1 : 0; if (pclc->hdr.version > SMC_V1) { if (smcd_indicated(ini->smc_type_v2)) ini->smcd_version |= SMC_V2; if (smcr_indicated(ini->smc_type_v2)) ini->smcr_version |= SMC_V2; } if (!(ini->smcd_version & SMC_V2) && !(ini->smcr_version & SMC_V2)) { rc = SMC_CLC_DECL_PEERNOSMC; goto out; } pclc_v2_ext = smc_get_clc_v2_ext(pclc); if (!pclc_v2_ext) { ini->smcd_version &= ~SMC_V2; ini->smcr_version &= ~SMC_V2; rc = SMC_CLC_DECL_NOV2EXT; goto out; } pclc_smcd_v2_ext = smc_get_clc_smcd_v2_ext(pclc_v2_ext); if (ini->smcd_version & SMC_V2) { if (!smc_ism_is_v2_capable()) { ini->smcd_version &= ~SMC_V2; rc = SMC_CLC_DECL_NOISM2SUPP; } else if (!pclc_smcd_v2_ext) { ini->smcd_version &= ~SMC_V2; rc = SMC_CLC_DECL_NOV2DEXT; } else if (!pclc_v2_ext->hdr.eid_cnt && !pclc_v2_ext->hdr.flag.seid) { ini->smcd_version &= ~SMC_V2; rc = SMC_CLC_DECL_NOUEID; } } if (ini->smcr_version & SMC_V2) { if (!pclc_v2_ext->hdr.eid_cnt) { ini->smcr_version &= ~SMC_V2; rc = SMC_CLC_DECL_NOUEID; } } ini->release_nr = pclc_v2_ext->hdr.flag.release; if (pclc_v2_ext->hdr.flag.release > SMC_RELEASE) ini->release_nr = SMC_RELEASE; out: if (!ini->smcd_version && !ini->smcr_version) return rc; return 0; } /* listen worker: check prefixes */ static int smc_listen_prfx_check(struct smc_sock *new_smc, struct smc_clc_msg_proposal *pclc) { struct smc_clc_msg_proposal_prefix *pclc_prfx; struct socket *newclcsock = new_smc->clcsock; if (pclc->hdr.typev1 == SMC_TYPE_N) return 0; pclc_prfx = smc_clc_proposal_get_prefix(pclc); if (smc_clc_prfx_match(newclcsock, pclc_prfx)) return SMC_CLC_DECL_DIFFPREFIX; return 0; } /* listen worker: initialize connection and buffers */ static int smc_listen_rdma_init(struct smc_sock *new_smc, struct smc_init_info *ini) { int rc; /* allocate connection / link group */ rc = smc_conn_create(new_smc, ini); if (rc) return rc; /* create send buffer and rmb */ if (smc_buf_create(new_smc, false)) { smc_conn_abort(new_smc, ini->first_contact_local); return SMC_CLC_DECL_MEM; } return 0; } /* listen worker: initialize connection and buffers for SMC-D */ static int smc_listen_ism_init(struct smc_sock *new_smc, struct smc_init_info *ini) { int rc; rc = smc_conn_create(new_smc, ini); if (rc) return rc; /* Create send and receive buffers */ rc = smc_buf_create(new_smc, true); if (rc) { smc_conn_abort(new_smc, ini->first_contact_local); return (rc == -ENOSPC) ? SMC_CLC_DECL_MAX_DMB : SMC_CLC_DECL_MEM; } return 0; } static bool smc_is_already_selected(struct smcd_dev *smcd, struct smc_init_info *ini, int matches) { int i; for (i = 0; i < matches; i++) if (smcd == ini->ism_dev[i]) return true; return false; } /* check for ISM devices matching proposed ISM devices */ static void smc_check_ism_v2_match(struct smc_init_info *ini, u16 proposed_chid, u64 proposed_gid, unsigned int *matches) { struct smcd_dev *smcd; list_for_each_entry(smcd, &smcd_dev_list.list, list) { if (smcd->going_away) continue; if (smc_is_already_selected(smcd, ini, *matches)) continue; if (smc_ism_get_chid(smcd) == proposed_chid && !smc_ism_cantalk(proposed_gid, ISM_RESERVED_VLANID, smcd)) { ini->ism_peer_gid[*matches] = proposed_gid; ini->ism_dev[*matches] = smcd; (*matches)++; break; } } } static void smc_find_ism_store_rc(u32 rc, struct smc_init_info *ini) { if (!ini->rc) ini->rc = rc; } static void smc_find_ism_v2_device_serv(struct smc_sock *new_smc, struct smc_clc_msg_proposal *pclc, struct smc_init_info *ini) { struct smc_clc_smcd_v2_extension *smcd_v2_ext; struct smc_clc_v2_extension *smc_v2_ext; struct smc_clc_msg_smcd *pclc_smcd; unsigned int matches = 0; u8 smcd_version; u8 *eid = NULL; int i, rc; if (!(ini->smcd_version & SMC_V2) || !smcd_indicated(ini->smc_type_v2)) goto not_found; pclc_smcd = smc_get_clc_msg_smcd(pclc); smc_v2_ext = smc_get_clc_v2_ext(pclc); smcd_v2_ext = smc_get_clc_smcd_v2_ext(smc_v2_ext); mutex_lock(&smcd_dev_list.mutex); if (pclc_smcd->ism.chid) /* check for ISM device matching proposed native ISM device */ smc_check_ism_v2_match(ini, ntohs(pclc_smcd->ism.chid), ntohll(pclc_smcd->ism.gid), &matches); for (i = 1; i <= smc_v2_ext->hdr.ism_gid_cnt; i++) { /* check for ISM devices matching proposed non-native ISM * devices */ smc_check_ism_v2_match(ini, ntohs(smcd_v2_ext->gidchid[i - 1].chid), ntohll(smcd_v2_ext->gidchid[i - 1].gid), &matches); } mutex_unlock(&smcd_dev_list.mutex); if (!ini->ism_dev[0]) { smc_find_ism_store_rc(SMC_CLC_DECL_NOSMCD2DEV, ini); goto not_found; } smc_ism_get_system_eid(&eid); if (!smc_clc_match_eid(ini->negotiated_eid, smc_v2_ext, smcd_v2_ext->system_eid, eid)) goto not_found; /* separate - outside the smcd_dev_list.lock */ smcd_version = ini->smcd_version; for (i = 0; i < matches; i++) { ini->smcd_version = SMC_V2; ini->is_smcd = true; ini->ism_selected = i; rc = smc_listen_ism_init(new_smc, ini); if (rc) { smc_find_ism_store_rc(rc, ini); /* try next active ISM device */ continue; } return; /* matching and usable V2 ISM device found */ } /* no V2 ISM device could be initialized */ ini->smcd_version = smcd_version; /* restore original value */ ini->negotiated_eid[0] = 0; not_found: ini->smcd_version &= ~SMC_V2; ini->ism_dev[0] = NULL; ini->is_smcd = false; } static void smc_find_ism_v1_device_serv(struct smc_sock *new_smc, struct smc_clc_msg_proposal *pclc, struct smc_init_info *ini) { struct smc_clc_msg_smcd *pclc_smcd = smc_get_clc_msg_smcd(pclc); int rc = 0; /* check if ISM V1 is available */ if (!(ini->smcd_version & SMC_V1) || !smcd_indicated(ini->smc_type_v1)) goto not_found; ini->is_smcd = true; /* prepare ISM check */ ini->ism_peer_gid[0] = ntohll(pclc_smcd->ism.gid); rc = smc_find_ism_device(new_smc, ini); if (rc) goto not_found; ini->ism_selected = 0; rc = smc_listen_ism_init(new_smc, ini); if (!rc) return; /* V1 ISM device found */ not_found: smc_find_ism_store_rc(rc, ini); ini->smcd_version &= ~SMC_V1; ini->ism_dev[0] = NULL; ini->is_smcd = false; } /* listen worker: register buffers */ static int smc_listen_rdma_reg(struct smc_sock *new_smc, bool local_first) { struct smc_connection *conn = &new_smc->conn; if (!local_first) { /* reg sendbufs if they were vzalloced */ if (conn->sndbuf_desc->is_vm) { if (smcr_lgr_reg_sndbufs(conn->lnk, conn->sndbuf_desc)) return SMC_CLC_DECL_ERR_REGBUF; } if (smcr_lgr_reg_rmbs(conn->lnk, conn->rmb_desc)) return SMC_CLC_DECL_ERR_REGBUF; } return 0; } static void smc_find_rdma_v2_device_serv(struct smc_sock *new_smc, struct smc_clc_msg_proposal *pclc, struct smc_init_info *ini) { struct smc_clc_v2_extension *smc_v2_ext; u8 smcr_version; int rc; if (!(ini->smcr_version & SMC_V2) || !smcr_indicated(ini->smc_type_v2)) goto not_found; smc_v2_ext = smc_get_clc_v2_ext(pclc); if (!smc_clc_match_eid(ini->negotiated_eid, smc_v2_ext, NULL, NULL)) goto not_found; /* prepare RDMA check */ memcpy(ini->peer_systemid, pclc->lcl.id_for_peer, SMC_SYSTEMID_LEN); memcpy(ini->peer_gid, smc_v2_ext->roce, SMC_GID_SIZE); memcpy(ini->peer_mac, pclc->lcl.mac, ETH_ALEN); ini->check_smcrv2 = true; ini->smcrv2.clc_sk = new_smc->clcsock->sk; ini->smcrv2.saddr = new_smc->clcsock->sk->sk_rcv_saddr; ini->smcrv2.daddr = smc_ib_gid_to_ipv4(smc_v2_ext->roce); rc = smc_find_rdma_device(new_smc, ini); if (rc) { smc_find_ism_store_rc(rc, ini); goto not_found; } if (!ini->smcrv2.uses_gateway) memcpy(ini->smcrv2.nexthop_mac, pclc->lcl.mac, ETH_ALEN); smcr_version = ini->smcr_version; ini->smcr_version = SMC_V2; rc = smc_listen_rdma_init(new_smc, ini); if (!rc) { rc = smc_listen_rdma_reg(new_smc, ini->first_contact_local); if (rc) smc_conn_abort(new_smc, ini->first_contact_local); } if (!rc) return; ini->smcr_version = smcr_version; smc_find_ism_store_rc(rc, ini); not_found: ini->smcr_version &= ~SMC_V2; ini->smcrv2.ib_dev_v2 = NULL; ini->check_smcrv2 = false; } static int smc_find_rdma_v1_device_serv(struct smc_sock *new_smc, struct smc_clc_msg_proposal *pclc, struct smc_init_info *ini) { int rc; if (!(ini->smcr_version & SMC_V1) || !smcr_indicated(ini->smc_type_v1)) return SMC_CLC_DECL_NOSMCDEV; /* prepare RDMA check */ memcpy(ini->peer_systemid, pclc->lcl.id_for_peer, SMC_SYSTEMID_LEN); memcpy(ini->peer_gid, pclc->lcl.gid, SMC_GID_SIZE); memcpy(ini->peer_mac, pclc->lcl.mac, ETH_ALEN); rc = smc_find_rdma_device(new_smc, ini); if (rc) { /* no RDMA device found */ return SMC_CLC_DECL_NOSMCDEV; } rc = smc_listen_rdma_init(new_smc, ini); if (rc) return rc; return smc_listen_rdma_reg(new_smc, ini->first_contact_local); } /* determine the local device matching to proposal */ static int smc_listen_find_device(struct smc_sock *new_smc, struct smc_clc_msg_proposal *pclc, struct smc_init_info *ini) { int prfx_rc; /* check for ISM device matching V2 proposed device */ smc_find_ism_v2_device_serv(new_smc, pclc, ini); if (ini->ism_dev[0]) return 0; /* check for matching IP prefix and subnet length (V1) */ prfx_rc = smc_listen_prfx_check(new_smc, pclc); if (prfx_rc) smc_find_ism_store_rc(prfx_rc, ini); /* get vlan id from IP device */ if (smc_vlan_by_tcpsk(new_smc->clcsock, ini)) return ini->rc ?: SMC_CLC_DECL_GETVLANERR; /* check for ISM device matching V1 proposed device */ if (!prfx_rc) smc_find_ism_v1_device_serv(new_smc, pclc, ini); if (ini->ism_dev[0]) return 0; if (!smcr_indicated(pclc->hdr.typev1) && !smcr_indicated(pclc->hdr.typev2)) /* skip RDMA and decline */ return ini->rc ?: SMC_CLC_DECL_NOSMCDDEV; /* check if RDMA V2 is available */ smc_find_rdma_v2_device_serv(new_smc, pclc, ini); if (ini->smcrv2.ib_dev_v2) return 0; /* check if RDMA V1 is available */ if (!prfx_rc) { int rc; rc = smc_find_rdma_v1_device_serv(new_smc, pclc, ini); smc_find_ism_store_rc(rc, ini); return (!rc) ? 0 : ini->rc; } return SMC_CLC_DECL_NOSMCDEV; } /* listen worker: finish RDMA setup */ static int smc_listen_rdma_finish(struct smc_sock *new_smc, struct smc_clc_msg_accept_confirm *cclc, bool local_first, struct smc_init_info *ini) { struct smc_link *link = new_smc->conn.lnk; int reason_code = 0; if (local_first) smc_link_save_peer_info(link, cclc, ini); if (smc_rmb_rtoken_handling(&new_smc->conn, link, cclc)) return SMC_CLC_DECL_ERR_RTOK; if (local_first) { if (smc_ib_ready_link(link)) return SMC_CLC_DECL_ERR_RDYLNK; /* QP confirmation over RoCE fabric */ smc_llc_flow_initiate(link->lgr, SMC_LLC_FLOW_ADD_LINK); reason_code = smcr_serv_conf_first_link(new_smc); smc_llc_flow_stop(link->lgr, &link->lgr->llc_flow_lcl); } return reason_code; } /* setup for connection of server */ static void smc_listen_work(struct work_struct *work) { struct smc_sock *new_smc = container_of(work, struct smc_sock, smc_listen_work); struct socket *newclcsock = new_smc->clcsock; struct smc_clc_msg_accept_confirm *cclc; struct smc_clc_msg_proposal_area *buf; struct smc_clc_msg_proposal *pclc; struct smc_init_info *ini = NULL; u8 proposal_version = SMC_V1; u8 accept_version; int rc = 0; if (new_smc->listen_smc->sk.sk_state != SMC_LISTEN) return smc_listen_out_err(new_smc); if (new_smc->use_fallback) { smc_listen_out_connected(new_smc); return; } /* check if peer is smc capable */ if (!tcp_sk(newclcsock->sk)->syn_smc) { rc = smc_switch_to_fallback(new_smc, SMC_CLC_DECL_PEERNOSMC); if (rc) smc_listen_out_err(new_smc); else smc_listen_out_connected(new_smc); return; } /* do inband token exchange - * wait for and receive SMC Proposal CLC message */ buf = kzalloc(sizeof(*buf), GFP_KERNEL); if (!buf) { rc = SMC_CLC_DECL_MEM; goto out_decl; } pclc = (struct smc_clc_msg_proposal *)buf; rc = smc_clc_wait_msg(new_smc, pclc, sizeof(*buf), SMC_CLC_PROPOSAL, CLC_WAIT_TIME); if (rc) goto out_decl; if (pclc->hdr.version > SMC_V1) proposal_version = SMC_V2; /* IPSec connections opt out of SMC optimizations */ if (using_ipsec(new_smc)) { rc = SMC_CLC_DECL_IPSEC; goto out_decl; } ini = kzalloc(sizeof(*ini), GFP_KERNEL); if (!ini) { rc = SMC_CLC_DECL_MEM; goto out_decl; } /* initial version checking */ rc = smc_listen_v2_check(new_smc, pclc, ini); if (rc) goto out_decl; rc = smc_clc_srv_v2x_features_validate(pclc, ini); if (rc) goto out_decl; mutex_lock(&smc_server_lgr_pending); smc_close_init(new_smc); smc_rx_init(new_smc); smc_tx_init(new_smc); /* determine ISM or RoCE device used for connection */ rc = smc_listen_find_device(new_smc, pclc, ini); if (rc) goto out_unlock; /* send SMC Accept CLC message */ accept_version = ini->is_smcd ? ini->smcd_version : ini->smcr_version; rc = smc_clc_send_accept(new_smc, ini->first_contact_local, accept_version, ini->negotiated_eid, ini); if (rc) goto out_unlock; /* SMC-D does not need this lock any more */ if (ini->is_smcd) mutex_unlock(&smc_server_lgr_pending); /* receive SMC Confirm CLC message */ memset(buf, 0, sizeof(*buf)); cclc = (struct smc_clc_msg_accept_confirm *)buf; rc = smc_clc_wait_msg(new_smc, cclc, sizeof(*buf), SMC_CLC_CONFIRM, CLC_WAIT_TIME); if (rc) { if (!ini->is_smcd) goto out_unlock; goto out_decl; } rc = smc_clc_v2x_features_confirm_check(cclc, ini); if (rc) { if (!ini->is_smcd) goto out_unlock; goto out_decl; } /* fce smc release version is needed in smc_listen_rdma_finish, * so save fce info here. */ smc_conn_save_peer_info_fce(new_smc, cclc); /* finish worker */ if (!ini->is_smcd) { rc = smc_listen_rdma_finish(new_smc, cclc, ini->first_contact_local, ini); if (rc) goto out_unlock; mutex_unlock(&smc_server_lgr_pending); } smc_conn_save_peer_info(new_smc, cclc); smc_listen_out_connected(new_smc); SMC_STAT_SERV_SUCC_INC(sock_net(newclcsock->sk), ini); goto out_free; out_unlock: mutex_unlock(&smc_server_lgr_pending); out_decl: smc_listen_decline(new_smc, rc, ini ? ini->first_contact_local : 0, proposal_version); out_free: kfree(ini); kfree(buf); } static void smc_tcp_listen_work(struct work_struct *work) { struct smc_sock *lsmc = container_of(work, struct smc_sock, tcp_listen_work); struct sock *lsk = &lsmc->sk; struct smc_sock *new_smc; int rc = 0; lock_sock(lsk); while (lsk->sk_state == SMC_LISTEN) { rc = smc_clcsock_accept(lsmc, &new_smc); if (rc) /* clcsock accept queue empty or error */ goto out; if (!new_smc) continue; if (tcp_sk(new_smc->clcsock->sk)->syn_smc) atomic_inc(&lsmc->queued_smc_hs); new_smc->listen_smc = lsmc; new_smc->use_fallback = lsmc->use_fallback; new_smc->fallback_rsn = lsmc->fallback_rsn; sock_hold(lsk); /* sock_put in smc_listen_work */ INIT_WORK(&new_smc->smc_listen_work, smc_listen_work); smc_copy_sock_settings_to_smc(new_smc); sock_hold(&new_smc->sk); /* sock_put in passive closing */ if (!queue_work(smc_hs_wq, &new_smc->smc_listen_work)) sock_put(&new_smc->sk); } out: release_sock(lsk); sock_put(&lsmc->sk); /* sock_hold in smc_clcsock_data_ready() */ } static void smc_clcsock_data_ready(struct sock *listen_clcsock) { struct smc_sock *lsmc; read_lock_bh(&listen_clcsock->sk_callback_lock); lsmc = smc_clcsock_user_data(listen_clcsock); if (!lsmc) goto out; lsmc->clcsk_data_ready(listen_clcsock); if (lsmc->sk.sk_state == SMC_LISTEN) { sock_hold(&lsmc->sk); /* sock_put in smc_tcp_listen_work() */ if (!queue_work(smc_tcp_ls_wq, &lsmc->tcp_listen_work)) sock_put(&lsmc->sk); } out: read_unlock_bh(&listen_clcsock->sk_callback_lock); } static int smc_listen(struct socket *sock, int backlog) { struct sock *sk = sock->sk; struct smc_sock *smc; int rc; smc = smc_sk(sk); lock_sock(sk); rc = -EINVAL; if ((sk->sk_state != SMC_INIT && sk->sk_state != SMC_LISTEN) || smc->connect_nonblock || sock->state != SS_UNCONNECTED) goto out; rc = 0; if (sk->sk_state == SMC_LISTEN) { sk->sk_max_ack_backlog = backlog; goto out; } /* some socket options are handled in core, so we could not apply * them to the clc socket -- copy smc socket options to clc socket */ smc_copy_sock_settings_to_clc(smc); if (!smc->use_fallback) tcp_sk(smc->clcsock->sk)->syn_smc = 1; /* save original sk_data_ready function and establish * smc-specific sk_data_ready function */ write_lock_bh(&smc->clcsock->sk->sk_callback_lock); smc->clcsock->sk->sk_user_data = (void *)((uintptr_t)smc | SK_USER_DATA_NOCOPY); smc_clcsock_replace_cb(&smc->clcsock->sk->sk_data_ready, smc_clcsock_data_ready, &smc->clcsk_data_ready); write_unlock_bh(&smc->clcsock->sk->sk_callback_lock); /* save original ops */ smc->ori_af_ops = inet_csk(smc->clcsock->sk)->icsk_af_ops; smc->af_ops = *smc->ori_af_ops; smc->af_ops.syn_recv_sock = smc_tcp_syn_recv_sock; inet_csk(smc->clcsock->sk)->icsk_af_ops = &smc->af_ops; if (smc->limit_smc_hs) tcp_sk(smc->clcsock->sk)->smc_hs_congested = smc_hs_congested; rc = kernel_listen(smc->clcsock, backlog); if (rc) { write_lock_bh(&smc->clcsock->sk->sk_callback_lock); smc_clcsock_restore_cb(&smc->clcsock->sk->sk_data_ready, &smc->clcsk_data_ready); smc->clcsock->sk->sk_user_data = NULL; write_unlock_bh(&smc->clcsock->sk->sk_callback_lock); goto out; } sk->sk_max_ack_backlog = backlog; sk->sk_ack_backlog = 0; sk->sk_state = SMC_LISTEN; out: release_sock(sk); return rc; } static int smc_accept(struct socket *sock, struct socket *new_sock, int flags, bool kern) { struct sock *sk = sock->sk, *nsk; DECLARE_WAITQUEUE(wait, current); struct smc_sock *lsmc; long timeo; int rc = 0; lsmc = smc_sk(sk); sock_hold(sk); /* sock_put below */ lock_sock(sk); if (lsmc->sk.sk_state != SMC_LISTEN) { rc = -EINVAL; release_sock(sk); goto out; } /* Wait for an incoming connection */ timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK); add_wait_queue_exclusive(sk_sleep(sk), &wait); while (!(nsk = smc_accept_dequeue(sk, new_sock))) { set_current_state(TASK_INTERRUPTIBLE); if (!timeo) { rc = -EAGAIN; break; } release_sock(sk); timeo = schedule_timeout(timeo); /* wakeup by sk_data_ready in smc_listen_work() */ sched_annotate_sleep(); lock_sock(sk); if (signal_pending(current)) { rc = sock_intr_errno(timeo); break; } } set_current_state(TASK_RUNNING); remove_wait_queue(sk_sleep(sk), &wait); if (!rc) rc = sock_error(nsk); release_sock(sk); if (rc) goto out; if (lsmc->sockopt_defer_accept && !(flags & O_NONBLOCK)) { /* wait till data arrives on the socket */ timeo = msecs_to_jiffies(lsmc->sockopt_defer_accept * MSEC_PER_SEC); if (smc_sk(nsk)->use_fallback) { struct sock *clcsk = smc_sk(nsk)->clcsock->sk; lock_sock(clcsk); if (skb_queue_empty(&clcsk->sk_receive_queue)) sk_wait_data(clcsk, &timeo, NULL); release_sock(clcsk); } else if (!atomic_read(&smc_sk(nsk)->conn.bytes_to_rcv)) { lock_sock(nsk); smc_rx_wait(smc_sk(nsk), &timeo, smc_rx_data_available); release_sock(nsk); } } out: sock_put(sk); /* sock_hold above */ return rc; } static int smc_getname(struct socket *sock, struct sockaddr *addr, int peer) { struct smc_sock *smc; if (peer && (sock->sk->sk_state != SMC_ACTIVE) && (sock->sk->sk_state != SMC_APPCLOSEWAIT1)) return -ENOTCONN; smc = smc_sk(sock->sk); return smc->clcsock->ops->getname(smc->clcsock, addr, peer); } static int smc_sendmsg(struct socket *sock, struct msghdr *msg, size_t len) { struct sock *sk = sock->sk; struct smc_sock *smc; int rc; smc = smc_sk(sk); lock_sock(sk); /* SMC does not support connect with fastopen */ if (msg->msg_flags & MSG_FASTOPEN) { /* not connected yet, fallback */ if (sk->sk_state == SMC_INIT && !smc->connect_nonblock) { rc = smc_switch_to_fallback(smc, SMC_CLC_DECL_OPTUNSUPP); if (rc) goto out; } else { rc = -EINVAL; goto out; } } else if ((sk->sk_state != SMC_ACTIVE) && (sk->sk_state != SMC_APPCLOSEWAIT1) && (sk->sk_state != SMC_INIT)) { rc = -EPIPE; goto out; } if (smc->use_fallback) { rc = smc->clcsock->ops->sendmsg(smc->clcsock, msg, len); } else { rc = smc_tx_sendmsg(smc, msg, len); SMC_STAT_TX_PAYLOAD(smc, len, rc); } out: release_sock(sk); return rc; } static int smc_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, int flags) { struct sock *sk = sock->sk; struct smc_sock *smc; int rc = -ENOTCONN; smc = smc_sk(sk); lock_sock(sk); if (sk->sk_state == SMC_CLOSED && (sk->sk_shutdown & RCV_SHUTDOWN)) { /* socket was connected before, no more data to read */ rc = 0; goto out; } if ((sk->sk_state == SMC_INIT) || (sk->sk_state == SMC_LISTEN) || (sk->sk_state == SMC_CLOSED)) goto out; if (sk->sk_state == SMC_PEERFINCLOSEWAIT) { rc = 0; goto out; } if (smc->use_fallback) { rc = smc->clcsock->ops->recvmsg(smc->clcsock, msg, len, flags); } else { msg->msg_namelen = 0; rc = smc_rx_recvmsg(smc, msg, NULL, len, flags); SMC_STAT_RX_PAYLOAD(smc, rc, rc); } out: release_sock(sk); return rc; } static __poll_t smc_accept_poll(struct sock *parent) { struct smc_sock *isk = smc_sk(parent); __poll_t mask = 0; spin_lock(&isk->accept_q_lock); if (!list_empty(&isk->accept_q)) mask = EPOLLIN | EPOLLRDNORM; spin_unlock(&isk->accept_q_lock); return mask; } static __poll_t smc_poll(struct file *file, struct socket *sock, poll_table *wait) { struct sock *sk = sock->sk; struct smc_sock *smc; __poll_t mask = 0; if (!sk) return EPOLLNVAL; smc = smc_sk(sock->sk); if (smc->use_fallback) { /* delegate to CLC child sock */ mask = smc->clcsock->ops->poll(file, smc->clcsock, wait); sk->sk_err = smc->clcsock->sk->sk_err; } else { if (sk->sk_state != SMC_CLOSED) sock_poll_wait(file, sock, wait); if (sk->sk_err) mask |= EPOLLERR; if ((sk->sk_shutdown == SHUTDOWN_MASK) || (sk->sk_state == SMC_CLOSED)) mask |= EPOLLHUP; if (sk->sk_state == SMC_LISTEN) { /* woken up by sk_data_ready in smc_listen_work() */ mask |= smc_accept_poll(sk); } else if (smc->use_fallback) { /* as result of connect_work()*/ mask |= smc->clcsock->ops->poll(file, smc->clcsock, wait); sk->sk_err = smc->clcsock->sk->sk_err; } else { if ((sk->sk_state != SMC_INIT && atomic_read(&smc->conn.sndbuf_space)) || sk->sk_shutdown & SEND_SHUTDOWN) { mask |= EPOLLOUT | EPOLLWRNORM; } else { sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk); set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); } if (atomic_read(&smc->conn.bytes_to_rcv)) mask |= EPOLLIN | EPOLLRDNORM; if (sk->sk_shutdown & RCV_SHUTDOWN) mask |= EPOLLIN | EPOLLRDNORM | EPOLLRDHUP; if (sk->sk_state == SMC_APPCLOSEWAIT1) mask |= EPOLLIN; if (smc->conn.urg_state == SMC_URG_VALID) mask |= EPOLLPRI; } } return mask; } static int smc_shutdown(struct socket *sock, int how) { struct sock *sk = sock->sk; bool do_shutdown = true; struct smc_sock *smc; int rc = -EINVAL; int old_state; int rc1 = 0; smc = smc_sk(sk); if ((how < SHUT_RD) || (how > SHUT_RDWR)) return rc; lock_sock(sk); if (sock->state == SS_CONNECTING) { if (sk->sk_state == SMC_ACTIVE) sock->state = SS_CONNECTED; else if (sk->sk_state == SMC_PEERCLOSEWAIT1 || sk->sk_state == SMC_PEERCLOSEWAIT2 || sk->sk_state == SMC_APPCLOSEWAIT1 || sk->sk_state == SMC_APPCLOSEWAIT2 || sk->sk_state == SMC_APPFINCLOSEWAIT) sock->state = SS_DISCONNECTING; } rc = -ENOTCONN; if ((sk->sk_state != SMC_ACTIVE) && (sk->sk_state != SMC_PEERCLOSEWAIT1) && (sk->sk_state != SMC_PEERCLOSEWAIT2) && (sk->sk_state != SMC_APPCLOSEWAIT1) && (sk->sk_state != SMC_APPCLOSEWAIT2) && (sk->sk_state != SMC_APPFINCLOSEWAIT)) goto out; if (smc->use_fallback) { rc = kernel_sock_shutdown(smc->clcsock, how); sk->sk_shutdown = smc->clcsock->sk->sk_shutdown; if (sk->sk_shutdown == SHUTDOWN_MASK) { sk->sk_state = SMC_CLOSED; sk->sk_socket->state = SS_UNCONNECTED; sock_put(sk); } goto out; } switch (how) { case SHUT_RDWR: /* shutdown in both directions */ old_state = sk->sk_state; rc = smc_close_active(smc); if (old_state == SMC_ACTIVE && sk->sk_state == SMC_PEERCLOSEWAIT1) do_shutdown = false; break; case SHUT_WR: rc = smc_close_shutdown_write(smc); break; case SHUT_RD: rc = 0; /* nothing more to do because peer is not involved */ break; } if (do_shutdown && smc->clcsock) rc1 = kernel_sock_shutdown(smc->clcsock, how); /* map sock_shutdown_cmd constants to sk_shutdown value range */ sk->sk_shutdown |= how + 1; if (sk->sk_state == SMC_CLOSED) sock->state = SS_UNCONNECTED; else sock->state = SS_DISCONNECTING; out: release_sock(sk); return rc ? rc : rc1; } static int __smc_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen) { struct smc_sock *smc; int val, len; smc = smc_sk(sock->sk); if (get_user(len, optlen)) return -EFAULT; len = min_t(int, len, sizeof(int)); if (len < 0) return -EINVAL; switch (optname) { case SMC_LIMIT_HS: val = smc->limit_smc_hs; break; default: return -EOPNOTSUPP; } if (put_user(len, optlen)) return -EFAULT; if (copy_to_user(optval, &val, len)) return -EFAULT; return 0; } static int __smc_setsockopt(struct socket *sock, int level, int optname, sockptr_t optval, unsigned int optlen) { struct sock *sk = sock->sk; struct smc_sock *smc; int val, rc; smc = smc_sk(sk); lock_sock(sk); switch (optname) { case SMC_LIMIT_HS: if (optlen < sizeof(int)) { rc = -EINVAL; break; } if (copy_from_sockptr(&val, optval, sizeof(int))) { rc = -EFAULT; break; } smc->limit_smc_hs = !!val; rc = 0; break; default: rc = -EOPNOTSUPP; break; } release_sock(sk); return rc; } static int smc_setsockopt(struct socket *sock, int level, int optname, sockptr_t optval, unsigned int optlen) { struct sock *sk = sock->sk; struct smc_sock *smc; int val, rc; if (level == SOL_TCP && optname == TCP_ULP) return -EOPNOTSUPP; else if (level == SOL_SMC) return __smc_setsockopt(sock, level, optname, optval, optlen); smc = smc_sk(sk); /* generic setsockopts reaching us here always apply to the * CLC socket */ mutex_lock(&smc->clcsock_release_lock); if (!smc->clcsock) { mutex_unlock(&smc->clcsock_release_lock); return -EBADF; } if (unlikely(!smc->clcsock->ops->setsockopt)) rc = -EOPNOTSUPP; else rc = smc->clcsock->ops->setsockopt(smc->clcsock, level, optname, optval, optlen); if (smc->clcsock->sk->sk_err) { sk->sk_err = smc->clcsock->sk->sk_err; sk_error_report(sk); } mutex_unlock(&smc->clcsock_release_lock); if (optlen < sizeof(int)) return -EINVAL; if (copy_from_sockptr(&val, optval, sizeof(int))) return -EFAULT; lock_sock(sk); if (rc || smc->use_fallback) goto out; switch (optname) { case TCP_FASTOPEN: case TCP_FASTOPEN_CONNECT: case TCP_FASTOPEN_KEY: case TCP_FASTOPEN_NO_COOKIE: /* option not supported by SMC */ if (sk->sk_state == SMC_INIT && !smc->connect_nonblock) { rc = smc_switch_to_fallback(smc, SMC_CLC_DECL_OPTUNSUPP); } else { rc = -EINVAL; } break; case TCP_NODELAY: if (sk->sk_state != SMC_INIT && sk->sk_state != SMC_LISTEN && sk->sk_state != SMC_CLOSED) { if (val) { SMC_STAT_INC(smc, ndly_cnt); smc_tx_pending(&smc->conn); cancel_delayed_work(&smc->conn.tx_work); } } break; case TCP_CORK: if (sk->sk_state != SMC_INIT && sk->sk_state != SMC_LISTEN && sk->sk_state != SMC_CLOSED) { if (!val) { SMC_STAT_INC(smc, cork_cnt); smc_tx_pending(&smc->conn); cancel_delayed_work(&smc->conn.tx_work); } } break; case TCP_DEFER_ACCEPT: smc->sockopt_defer_accept = val; break; default: break; } out: release_sock(sk); return rc; } static int smc_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen) { struct smc_sock *smc; int rc; if (level == SOL_SMC) return __smc_getsockopt(sock, level, optname, optval, optlen); smc = smc_sk(sock->sk); mutex_lock(&smc->clcsock_release_lock); if (!smc->clcsock) { mutex_unlock(&smc->clcsock_release_lock); return -EBADF; } /* socket options apply to the CLC socket */ if (unlikely(!smc->clcsock->ops->getsockopt)) { mutex_unlock(&smc->clcsock_release_lock); return -EOPNOTSUPP; } rc = smc->clcsock->ops->getsockopt(smc->clcsock, level, optname, optval, optlen); mutex_unlock(&smc->clcsock_release_lock); return rc; } static int smc_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) { union smc_host_cursor cons, urg; struct smc_connection *conn; struct smc_sock *smc; int answ; smc = smc_sk(sock->sk); conn = &smc->conn; lock_sock(&smc->sk); if (smc->use_fallback) { if (!smc->clcsock) { release_sock(&smc->sk); return -EBADF; } answ = smc->clcsock->ops->ioctl(smc->clcsock, cmd, arg); release_sock(&smc->sk); return answ; } switch (cmd) { case SIOCINQ: /* same as FIONREAD */ if (smc->sk.sk_state == SMC_LISTEN) { release_sock(&smc->sk); return -EINVAL; } if (smc->sk.sk_state == SMC_INIT || smc->sk.sk_state == SMC_CLOSED) answ = 0; else answ = atomic_read(&smc->conn.bytes_to_rcv); break; case SIOCOUTQ: /* output queue size (not send + not acked) */ if (smc->sk.sk_state == SMC_LISTEN) { release_sock(&smc->sk); return -EINVAL; } if (smc->sk.sk_state == SMC_INIT || smc->sk.sk_state == SMC_CLOSED) answ = 0; else answ = smc->conn.sndbuf_desc->len - atomic_read(&smc->conn.sndbuf_space); break; case SIOCOUTQNSD: /* output queue size (not send only) */ if (smc->sk.sk_state == SMC_LISTEN) { release_sock(&smc->sk); return -EINVAL; } if (smc->sk.sk_state == SMC_INIT || smc->sk.sk_state == SMC_CLOSED) answ = 0; else answ = smc_tx_prepared_sends(&smc->conn); break; case SIOCATMARK: if (smc->sk.sk_state == SMC_LISTEN) { release_sock(&smc->sk); return -EINVAL; } if (smc->sk.sk_state == SMC_INIT || smc->sk.sk_state == SMC_CLOSED) { answ = 0; } else { smc_curs_copy(&cons, &conn->local_tx_ctrl.cons, conn); smc_curs_copy(&urg, &conn->urg_curs, conn); answ = smc_curs_diff(conn->rmb_desc->len, &cons, &urg) == 1; } break; default: release_sock(&smc->sk); return -ENOIOCTLCMD; } release_sock(&smc->sk); return put_user(answ, (int __user *)arg); } /* Map the affected portions of the rmbe into an spd, note the number of bytes * to splice in conn->splice_pending, and press 'go'. Delays consumer cursor * updates till whenever a respective page has been fully processed. * Note that subsequent recv() calls have to wait till all splice() processing * completed. */ static ssize_t smc_splice_read(struct socket *sock, loff_t *ppos, struct pipe_inode_info *pipe, size_t len, unsigned int flags) { struct sock *sk = sock->sk; struct smc_sock *smc; int rc = -ENOTCONN; smc = smc_sk(sk); lock_sock(sk); if (sk->sk_state == SMC_CLOSED && (sk->sk_shutdown & RCV_SHUTDOWN)) { /* socket was connected before, no more data to read */ rc = 0; goto out; } if (sk->sk_state == SMC_INIT || sk->sk_state == SMC_LISTEN || sk->sk_state == SMC_CLOSED) goto out; if (sk->sk_state == SMC_PEERFINCLOSEWAIT) { rc = 0; goto out; } if (smc->use_fallback) { rc = smc->clcsock->ops->splice_read(smc->clcsock, ppos, pipe, len, flags); } else { if (*ppos) { rc = -ESPIPE; goto out; } if (flags & SPLICE_F_NONBLOCK) flags = MSG_DONTWAIT; else flags = 0; SMC_STAT_INC(smc, splice_cnt); rc = smc_rx_recvmsg(smc, NULL, pipe, len, flags); } out: release_sock(sk); return rc; } /* must look like tcp */ static const struct proto_ops smc_sock_ops = { .family = PF_SMC, .owner = THIS_MODULE, .release = smc_release, .bind = smc_bind, .connect = smc_connect, .socketpair = sock_no_socketpair, .accept = smc_accept, .getname = smc_getname, .poll = smc_poll, .ioctl = smc_ioctl, .listen = smc_listen, .shutdown = smc_shutdown, .setsockopt = smc_setsockopt, .getsockopt = smc_getsockopt, .sendmsg = smc_sendmsg, .recvmsg = smc_recvmsg, .mmap = sock_no_mmap, .splice_read = smc_splice_read, }; static int __smc_create(struct net *net, struct socket *sock, int protocol, int kern, struct socket *clcsock) { int family = (protocol == SMCPROTO_SMC6) ? PF_INET6 : PF_INET; struct smc_sock *smc; struct sock *sk; int rc; rc = -ESOCKTNOSUPPORT; if (sock->type != SOCK_STREAM) goto out; rc = -EPROTONOSUPPORT; if (protocol != SMCPROTO_SMC && protocol != SMCPROTO_SMC6) goto out; rc = -ENOBUFS; sock->ops = &smc_sock_ops; sock->state = SS_UNCONNECTED; sk = smc_sock_alloc(net, sock, protocol); if (!sk) goto out; /* create internal TCP socket for CLC handshake and fallback */ smc = smc_sk(sk); smc->use_fallback = false; /* assume rdma capability first */ smc->fallback_rsn = 0; /* default behavior from limit_smc_hs in every net namespace */ smc->limit_smc_hs = net->smc.limit_smc_hs; rc = 0; if (!clcsock) { rc = sock_create_kern(net, family, SOCK_STREAM, IPPROTO_TCP, &smc->clcsock); if (rc) { sk_common_release(sk); goto out; } /* smc_clcsock_release() does not wait smc->clcsock->sk's * destruction; its sk_state might not be TCP_CLOSE after * smc->sk is close()d, and TCP timers can be fired later, * which need net ref. */ sk = smc->clcsock->sk; __netns_tracker_free(net, &sk->ns_tracker, false); sk->sk_net_refcnt = 1; get_net_track(net, &sk->ns_tracker, GFP_KERNEL); sock_inuse_add(net, 1); } else { smc->clcsock = clcsock; } out: return rc; } static int smc_create(struct net *net, struct socket *sock, int protocol, int kern) { return __smc_create(net, sock, protocol, kern, NULL); } static const struct net_proto_family smc_sock_family_ops = { .family = PF_SMC, .owner = THIS_MODULE, .create = smc_create, }; static int smc_ulp_init(struct sock *sk) { struct socket *tcp = sk->sk_socket; struct net *net = sock_net(sk); struct socket *smcsock; int protocol, ret; /* only TCP can be replaced */ if (tcp->type != SOCK_STREAM || sk->sk_protocol != IPPROTO_TCP || (sk->sk_family != AF_INET && sk->sk_family != AF_INET6)) return -ESOCKTNOSUPPORT; /* don't handle wq now */ if (tcp->state != SS_UNCONNECTED || !tcp->file || tcp->wq.fasync_list) return -ENOTCONN; if (sk->sk_family == AF_INET) protocol = SMCPROTO_SMC; else protocol = SMCPROTO_SMC6; smcsock = sock_alloc(); if (!smcsock) return -ENFILE; smcsock->type = SOCK_STREAM; __module_get(THIS_MODULE); /* tried in __tcp_ulp_find_autoload */ ret = __smc_create(net, smcsock, protocol, 1, tcp); if (ret) { sock_release(smcsock); /* module_put() which ops won't be NULL */ return ret; } /* replace tcp socket to smc */ smcsock->file = tcp->file; smcsock->file->private_data = smcsock; smcsock->file->f_inode = SOCK_INODE(smcsock); /* replace inode when sock_close */ smcsock->file->f_path.dentry->d_inode = SOCK_INODE(smcsock); /* dput() in __fput */ tcp->file = NULL; return ret; } static void smc_ulp_clone(const struct request_sock *req, struct sock *newsk, const gfp_t priority) { struct inet_connection_sock *icsk = inet_csk(newsk); /* don't inherit ulp ops to child when listen */ icsk->icsk_ulp_ops = NULL; } static struct tcp_ulp_ops smc_ulp_ops __read_mostly = { .name = "smc", .owner = THIS_MODULE, .init = smc_ulp_init, .clone = smc_ulp_clone, }; unsigned int smc_net_id; static __net_init int smc_net_init(struct net *net) { int rc; rc = smc_sysctl_net_init(net); if (rc) return rc; return smc_pnet_net_init(net); } static void __net_exit smc_net_exit(struct net *net) { smc_sysctl_net_exit(net); smc_pnet_net_exit(net); } static __net_init int smc_net_stat_init(struct net *net) { return smc_stats_init(net); } static void __net_exit smc_net_stat_exit(struct net *net) { smc_stats_exit(net); } static struct pernet_operations smc_net_ops = { .init = smc_net_init, .exit = smc_net_exit, .id = &smc_net_id, .size = sizeof(struct smc_net), }; static struct pernet_operations smc_net_stat_ops = { .init = smc_net_stat_init, .exit = smc_net_stat_exit, }; static int __init smc_init(void) { int rc; rc = register_pernet_subsys(&smc_net_ops); if (rc) return rc; rc = register_pernet_subsys(&smc_net_stat_ops); if (rc) goto out_pernet_subsys; rc = smc_ism_init(); if (rc) goto out_pernet_subsys_stat; smc_clc_init(); rc = smc_nl_init(); if (rc) goto out_ism; rc = smc_pnet_init(); if (rc) goto out_nl; rc = -ENOMEM; smc_tcp_ls_wq = alloc_workqueue("smc_tcp_ls_wq", 0, 0); if (!smc_tcp_ls_wq) goto out_pnet; smc_hs_wq = alloc_workqueue("smc_hs_wq", 0, 0); if (!smc_hs_wq) goto out_alloc_tcp_ls_wq; smc_close_wq = alloc_workqueue("smc_close_wq", 0, 0); if (!smc_close_wq) goto out_alloc_hs_wq; rc = smc_core_init(); if (rc) { pr_err("%s: smc_core_init fails with %d\n", __func__, rc); goto out_alloc_wqs; } rc = smc_llc_init(); if (rc) { pr_err("%s: smc_llc_init fails with %d\n", __func__, rc); goto out_core; } rc = smc_cdc_init(); if (rc) { pr_err("%s: smc_cdc_init fails with %d\n", __func__, rc); goto out_core; } rc = proto_register(&smc_proto, 1); if (rc) { pr_err("%s: proto_register(v4) fails with %d\n", __func__, rc); goto out_core; } rc = proto_register(&smc_proto6, 1); if (rc) { pr_err("%s: proto_register(v6) fails with %d\n", __func__, rc); goto out_proto; } rc = sock_register(&smc_sock_family_ops); if (rc) { pr_err("%s: sock_register fails with %d\n", __func__, rc); goto out_proto6; } INIT_HLIST_HEAD(&smc_v4_hashinfo.ht); INIT_HLIST_HEAD(&smc_v6_hashinfo.ht); rc = smc_ib_register_client(); if (rc) { pr_err("%s: ib_register fails with %d\n", __func__, rc); goto out_sock; } rc = tcp_register_ulp(&smc_ulp_ops); if (rc) { pr_err("%s: tcp_ulp_register fails with %d\n", __func__, rc); goto out_ib; } static_branch_enable(&tcp_have_smc); return 0; out_ib: smc_ib_unregister_client(); out_sock: sock_unregister(PF_SMC); out_proto6: proto_unregister(&smc_proto6); out_proto: proto_unregister(&smc_proto); out_core: smc_core_exit(); out_alloc_wqs: destroy_workqueue(smc_close_wq); out_alloc_hs_wq: destroy_workqueue(smc_hs_wq); out_alloc_tcp_ls_wq: destroy_workqueue(smc_tcp_ls_wq); out_pnet: smc_pnet_exit(); out_nl: smc_nl_exit(); out_ism: smc_clc_exit(); smc_ism_exit(); out_pernet_subsys_stat: unregister_pernet_subsys(&smc_net_stat_ops); out_pernet_subsys: unregister_pernet_subsys(&smc_net_ops); return rc; } static void __exit smc_exit(void) { static_branch_disable(&tcp_have_smc); tcp_unregister_ulp(&smc_ulp_ops); sock_unregister(PF_SMC); smc_core_exit(); smc_ib_unregister_client(); smc_ism_exit(); destroy_workqueue(smc_close_wq); destroy_workqueue(smc_tcp_ls_wq); destroy_workqueue(smc_hs_wq); proto_unregister(&smc_proto6); proto_unregister(&smc_proto); smc_pnet_exit(); smc_nl_exit(); smc_clc_exit(); unregister_pernet_subsys(&smc_net_stat_ops); unregister_pernet_subsys(&smc_net_ops); rcu_barrier(); } module_init(smc_init); module_exit(smc_exit); MODULE_AUTHOR("Ursula Braun <[email protected]>"); MODULE_DESCRIPTION("smc socket address family"); MODULE_LICENSE("GPL"); MODULE_ALIAS_NETPROTO(PF_SMC); MODULE_ALIAS_TCP_ULP("smc"); MODULE_ALIAS_GENL_FAMILY(SMC_GENL_FAMILY_NAME);
linux-master
net/smc/af_smc.c
// SPDX-License-Identifier: GPL-2.0 /* * Shared Memory Communications over RDMA (SMC-R) and RoCE * * Generic netlink support functions to interact with SMC module * * Copyright IBM Corp. 2020 * * Author(s): Guvenc Gulce <[email protected]> */ #include <linux/module.h> #include <linux/list.h> #include <linux/ctype.h> #include <linux/mutex.h> #include <linux/if.h> #include <linux/smc.h> #include "smc_core.h" #include "smc_ism.h" #include "smc_ib.h" #include "smc_clc.h" #include "smc_stats.h" #include "smc_netlink.h" const struct nla_policy smc_gen_ueid_policy[SMC_NLA_EID_TABLE_MAX + 1] = { [SMC_NLA_EID_TABLE_UNSPEC] = { .type = NLA_UNSPEC }, [SMC_NLA_EID_TABLE_ENTRY] = { .type = NLA_STRING, .len = SMC_MAX_EID_LEN, }, }; #define SMC_CMD_MAX_ATTR 1 /* SMC_GENL generic netlink operation definition */ static const struct genl_ops smc_gen_nl_ops[] = { { .cmd = SMC_NETLINK_GET_SYS_INFO, /* can be retrieved by unprivileged users */ .dumpit = smc_nl_get_sys_info, }, { .cmd = SMC_NETLINK_GET_LGR_SMCR, /* can be retrieved by unprivileged users */ .dumpit = smcr_nl_get_lgr, }, { .cmd = SMC_NETLINK_GET_LINK_SMCR, /* can be retrieved by unprivileged users */ .dumpit = smcr_nl_get_link, }, { .cmd = SMC_NETLINK_GET_LGR_SMCD, /* can be retrieved by unprivileged users */ .dumpit = smcd_nl_get_lgr, }, { .cmd = SMC_NETLINK_GET_DEV_SMCD, /* can be retrieved by unprivileged users */ .dumpit = smcd_nl_get_device, }, { .cmd = SMC_NETLINK_GET_DEV_SMCR, /* can be retrieved by unprivileged users */ .dumpit = smcr_nl_get_device, }, { .cmd = SMC_NETLINK_GET_STATS, /* can be retrieved by unprivileged users */ .dumpit = smc_nl_get_stats, }, { .cmd = SMC_NETLINK_GET_FBACK_STATS, /* can be retrieved by unprivileged users */ .dumpit = smc_nl_get_fback_stats, }, { .cmd = SMC_NETLINK_DUMP_UEID, /* can be retrieved by unprivileged users */ .dumpit = smc_nl_dump_ueid, }, { .cmd = SMC_NETLINK_ADD_UEID, .flags = GENL_ADMIN_PERM, .doit = smc_nl_add_ueid, .policy = smc_gen_ueid_policy, }, { .cmd = SMC_NETLINK_REMOVE_UEID, .flags = GENL_ADMIN_PERM, .doit = smc_nl_remove_ueid, .policy = smc_gen_ueid_policy, }, { .cmd = SMC_NETLINK_FLUSH_UEID, .flags = GENL_ADMIN_PERM, .doit = smc_nl_flush_ueid, }, { .cmd = SMC_NETLINK_DUMP_SEID, /* can be retrieved by unprivileged users */ .dumpit = smc_nl_dump_seid, }, { .cmd = SMC_NETLINK_ENABLE_SEID, .flags = GENL_ADMIN_PERM, .doit = smc_nl_enable_seid, }, { .cmd = SMC_NETLINK_DISABLE_SEID, .flags = GENL_ADMIN_PERM, .doit = smc_nl_disable_seid, }, { .cmd = SMC_NETLINK_DUMP_HS_LIMITATION, /* can be retrieved by unprivileged users */ .dumpit = smc_nl_dump_hs_limitation, }, { .cmd = SMC_NETLINK_ENABLE_HS_LIMITATION, .flags = GENL_ADMIN_PERM, .doit = smc_nl_enable_hs_limitation, }, { .cmd = SMC_NETLINK_DISABLE_HS_LIMITATION, .flags = GENL_ADMIN_PERM, .doit = smc_nl_disable_hs_limitation, }, }; static const struct nla_policy smc_gen_nl_policy[2] = { [SMC_CMD_MAX_ATTR] = { .type = NLA_REJECT, }, }; /* SMC_GENL family definition */ struct genl_family smc_gen_nl_family __ro_after_init = { .hdrsize = 0, .name = SMC_GENL_FAMILY_NAME, .version = SMC_GENL_FAMILY_VERSION, .maxattr = SMC_CMD_MAX_ATTR, .policy = smc_gen_nl_policy, .netnsok = true, .module = THIS_MODULE, .ops = smc_gen_nl_ops, .n_ops = ARRAY_SIZE(smc_gen_nl_ops), .resv_start_op = SMC_NETLINK_DISABLE_HS_LIMITATION + 1, }; int __init smc_nl_init(void) { return genl_register_family(&smc_gen_nl_family); } void smc_nl_exit(void) { genl_unregister_family(&smc_gen_nl_family); }
linux-master
net/smc/smc_netlink.c
// SPDX-License-Identifier: GPL-2.0 /* * Shared Memory Communications over RDMA (SMC-R) and RoCE * * CLC (connection layer control) handshake over initial TCP socket to * prepare for RDMA traffic * * Copyright IBM Corp. 2016, 2018 * * Author(s): Ursula Braun <[email protected]> */ #include <linux/in.h> #include <linux/inetdevice.h> #include <linux/if_ether.h> #include <linux/sched/signal.h> #include <linux/utsname.h> #include <linux/ctype.h> #include <net/addrconf.h> #include <net/sock.h> #include <net/tcp.h> #include "smc.h" #include "smc_core.h" #include "smc_clc.h" #include "smc_ib.h" #include "smc_ism.h" #include "smc_netlink.h" #define SMCR_CLC_ACCEPT_CONFIRM_LEN 68 #define SMCD_CLC_ACCEPT_CONFIRM_LEN 48 #define SMCD_CLC_ACCEPT_CONFIRM_LEN_V2 78 #define SMCR_CLC_ACCEPT_CONFIRM_LEN_V2 108 #define SMC_CLC_RECV_BUF_LEN 100 /* eye catcher "SMCR" EBCDIC for CLC messages */ static const char SMC_EYECATCHER[4] = {'\xe2', '\xd4', '\xc3', '\xd9'}; /* eye catcher "SMCD" EBCDIC for CLC messages */ static const char SMCD_EYECATCHER[4] = {'\xe2', '\xd4', '\xc3', '\xc4'}; static u8 smc_hostname[SMC_MAX_HOSTNAME_LEN]; struct smc_clc_eid_table { rwlock_t lock; struct list_head list; u8 ueid_cnt; u8 seid_enabled; }; static struct smc_clc_eid_table smc_clc_eid_table; struct smc_clc_eid_entry { struct list_head list; u8 eid[SMC_MAX_EID_LEN]; }; /* The size of a user EID is 32 characters. * Valid characters should be (single-byte character set) A-Z, 0-9, '.' and '-'. * Blanks should only be used to pad to the expected size. * First character must be alphanumeric. */ static bool smc_clc_ueid_valid(char *ueid) { char *end = ueid + SMC_MAX_EID_LEN; while (--end >= ueid && isspace(*end)) ; if (end < ueid) return false; if (!isalnum(*ueid) || islower(*ueid)) return false; while (ueid <= end) { if ((!isalnum(*ueid) || islower(*ueid)) && *ueid != '.' && *ueid != '-') return false; ueid++; } return true; } static int smc_clc_ueid_add(char *ueid) { struct smc_clc_eid_entry *new_ueid, *tmp_ueid; int rc; if (!smc_clc_ueid_valid(ueid)) return -EINVAL; /* add a new ueid entry to the ueid table if there isn't one */ new_ueid = kzalloc(sizeof(*new_ueid), GFP_KERNEL); if (!new_ueid) return -ENOMEM; memcpy(new_ueid->eid, ueid, SMC_MAX_EID_LEN); write_lock(&smc_clc_eid_table.lock); if (smc_clc_eid_table.ueid_cnt >= SMC_MAX_UEID) { rc = -ERANGE; goto err_out; } list_for_each_entry(tmp_ueid, &smc_clc_eid_table.list, list) { if (!memcmp(tmp_ueid->eid, ueid, SMC_MAX_EID_LEN)) { rc = -EEXIST; goto err_out; } } list_add_tail(&new_ueid->list, &smc_clc_eid_table.list); smc_clc_eid_table.ueid_cnt++; write_unlock(&smc_clc_eid_table.lock); return 0; err_out: write_unlock(&smc_clc_eid_table.lock); kfree(new_ueid); return rc; } int smc_clc_ueid_count(void) { int count; read_lock(&smc_clc_eid_table.lock); count = smc_clc_eid_table.ueid_cnt; read_unlock(&smc_clc_eid_table.lock); return count; } int smc_nl_add_ueid(struct sk_buff *skb, struct genl_info *info) { struct nlattr *nla_ueid = info->attrs[SMC_NLA_EID_TABLE_ENTRY]; char *ueid; if (!nla_ueid || nla_len(nla_ueid) != SMC_MAX_EID_LEN + 1) return -EINVAL; ueid = (char *)nla_data(nla_ueid); return smc_clc_ueid_add(ueid); } /* remove one or all ueid entries from the table */ static int smc_clc_ueid_remove(char *ueid) { struct smc_clc_eid_entry *lst_ueid, *tmp_ueid; int rc = -ENOENT; /* remove table entry */ write_lock(&smc_clc_eid_table.lock); list_for_each_entry_safe(lst_ueid, tmp_ueid, &smc_clc_eid_table.list, list) { if (!ueid || !memcmp(lst_ueid->eid, ueid, SMC_MAX_EID_LEN)) { list_del(&lst_ueid->list); smc_clc_eid_table.ueid_cnt--; kfree(lst_ueid); rc = 0; } } if (!rc && !smc_clc_eid_table.ueid_cnt) { smc_clc_eid_table.seid_enabled = 1; rc = -EAGAIN; /* indicate success and enabling of seid */ } write_unlock(&smc_clc_eid_table.lock); return rc; } int smc_nl_remove_ueid(struct sk_buff *skb, struct genl_info *info) { struct nlattr *nla_ueid = info->attrs[SMC_NLA_EID_TABLE_ENTRY]; char *ueid; if (!nla_ueid || nla_len(nla_ueid) != SMC_MAX_EID_LEN + 1) return -EINVAL; ueid = (char *)nla_data(nla_ueid); return smc_clc_ueid_remove(ueid); } int smc_nl_flush_ueid(struct sk_buff *skb, struct genl_info *info) { smc_clc_ueid_remove(NULL); return 0; } static int smc_nl_ueid_dumpinfo(struct sk_buff *skb, u32 portid, u32 seq, u32 flags, char *ueid) { char ueid_str[SMC_MAX_EID_LEN + 1]; void *hdr; hdr = genlmsg_put(skb, portid, seq, &smc_gen_nl_family, flags, SMC_NETLINK_DUMP_UEID); if (!hdr) return -ENOMEM; memcpy(ueid_str, ueid, SMC_MAX_EID_LEN); ueid_str[SMC_MAX_EID_LEN] = 0; if (nla_put_string(skb, SMC_NLA_EID_TABLE_ENTRY, ueid_str)) { genlmsg_cancel(skb, hdr); return -EMSGSIZE; } genlmsg_end(skb, hdr); return 0; } static int _smc_nl_ueid_dump(struct sk_buff *skb, u32 portid, u32 seq, int start_idx) { struct smc_clc_eid_entry *lst_ueid; int idx = 0; read_lock(&smc_clc_eid_table.lock); list_for_each_entry(lst_ueid, &smc_clc_eid_table.list, list) { if (idx++ < start_idx) continue; if (smc_nl_ueid_dumpinfo(skb, portid, seq, NLM_F_MULTI, lst_ueid->eid)) { --idx; break; } } read_unlock(&smc_clc_eid_table.lock); return idx; } int smc_nl_dump_ueid(struct sk_buff *skb, struct netlink_callback *cb) { struct smc_nl_dmp_ctx *cb_ctx = smc_nl_dmp_ctx(cb); int idx; idx = _smc_nl_ueid_dump(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, cb_ctx->pos[0]); cb_ctx->pos[0] = idx; return skb->len; } int smc_nl_dump_seid(struct sk_buff *skb, struct netlink_callback *cb) { struct smc_nl_dmp_ctx *cb_ctx = smc_nl_dmp_ctx(cb); char seid_str[SMC_MAX_EID_LEN + 1]; u8 seid_enabled; void *hdr; u8 *seid; if (cb_ctx->pos[0]) return skb->len; hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, &smc_gen_nl_family, NLM_F_MULTI, SMC_NETLINK_DUMP_SEID); if (!hdr) return -ENOMEM; if (!smc_ism_is_v2_capable()) goto end; smc_ism_get_system_eid(&seid); memcpy(seid_str, seid, SMC_MAX_EID_LEN); seid_str[SMC_MAX_EID_LEN] = 0; if (nla_put_string(skb, SMC_NLA_SEID_ENTRY, seid_str)) goto err; read_lock(&smc_clc_eid_table.lock); seid_enabled = smc_clc_eid_table.seid_enabled; read_unlock(&smc_clc_eid_table.lock); if (nla_put_u8(skb, SMC_NLA_SEID_ENABLED, seid_enabled)) goto err; end: genlmsg_end(skb, hdr); cb_ctx->pos[0]++; return skb->len; err: genlmsg_cancel(skb, hdr); return -EMSGSIZE; } int smc_nl_enable_seid(struct sk_buff *skb, struct genl_info *info) { write_lock(&smc_clc_eid_table.lock); smc_clc_eid_table.seid_enabled = 1; write_unlock(&smc_clc_eid_table.lock); return 0; } int smc_nl_disable_seid(struct sk_buff *skb, struct genl_info *info) { int rc = 0; write_lock(&smc_clc_eid_table.lock); if (!smc_clc_eid_table.ueid_cnt) rc = -ENOENT; else smc_clc_eid_table.seid_enabled = 0; write_unlock(&smc_clc_eid_table.lock); return rc; } static bool _smc_clc_match_ueid(u8 *peer_ueid) { struct smc_clc_eid_entry *tmp_ueid; list_for_each_entry(tmp_ueid, &smc_clc_eid_table.list, list) { if (!memcmp(tmp_ueid->eid, peer_ueid, SMC_MAX_EID_LEN)) return true; } return false; } bool smc_clc_match_eid(u8 *negotiated_eid, struct smc_clc_v2_extension *smc_v2_ext, u8 *peer_eid, u8 *local_eid) { bool match = false; int i; negotiated_eid[0] = 0; read_lock(&smc_clc_eid_table.lock); if (peer_eid && local_eid && smc_clc_eid_table.seid_enabled && smc_v2_ext->hdr.flag.seid && !memcmp(peer_eid, local_eid, SMC_MAX_EID_LEN)) { memcpy(negotiated_eid, peer_eid, SMC_MAX_EID_LEN); match = true; goto out; } for (i = 0; i < smc_v2_ext->hdr.eid_cnt; i++) { if (_smc_clc_match_ueid(smc_v2_ext->user_eids[i])) { memcpy(negotiated_eid, smc_v2_ext->user_eids[i], SMC_MAX_EID_LEN); match = true; goto out; } } out: read_unlock(&smc_clc_eid_table.lock); return match; } /* check arriving CLC proposal */ static bool smc_clc_msg_prop_valid(struct smc_clc_msg_proposal *pclc) { struct smc_clc_msg_proposal_prefix *pclc_prfx; struct smc_clc_smcd_v2_extension *smcd_v2_ext; struct smc_clc_msg_hdr *hdr = &pclc->hdr; struct smc_clc_v2_extension *v2_ext; v2_ext = smc_get_clc_v2_ext(pclc); pclc_prfx = smc_clc_proposal_get_prefix(pclc); if (hdr->version == SMC_V1) { if (hdr->typev1 == SMC_TYPE_N) return false; if (ntohs(hdr->length) != sizeof(*pclc) + ntohs(pclc->iparea_offset) + sizeof(*pclc_prfx) + pclc_prfx->ipv6_prefixes_cnt * sizeof(struct smc_clc_ipv6_prefix) + sizeof(struct smc_clc_msg_trail)) return false; } else { if (ntohs(hdr->length) != sizeof(*pclc) + sizeof(struct smc_clc_msg_smcd) + (hdr->typev1 != SMC_TYPE_N ? sizeof(*pclc_prfx) + pclc_prfx->ipv6_prefixes_cnt * sizeof(struct smc_clc_ipv6_prefix) : 0) + (hdr->typev2 != SMC_TYPE_N ? sizeof(*v2_ext) + v2_ext->hdr.eid_cnt * SMC_MAX_EID_LEN : 0) + (smcd_indicated(hdr->typev2) ? sizeof(*smcd_v2_ext) + v2_ext->hdr.ism_gid_cnt * sizeof(struct smc_clc_smcd_gid_chid) : 0) + sizeof(struct smc_clc_msg_trail)) return false; } return true; } /* check arriving CLC accept or confirm */ static bool smc_clc_msg_acc_conf_valid(struct smc_clc_msg_accept_confirm_v2 *clc_v2) { struct smc_clc_msg_hdr *hdr = &clc_v2->hdr; if (hdr->typev1 != SMC_TYPE_R && hdr->typev1 != SMC_TYPE_D) return false; if (hdr->version == SMC_V1) { if ((hdr->typev1 == SMC_TYPE_R && ntohs(hdr->length) != SMCR_CLC_ACCEPT_CONFIRM_LEN) || (hdr->typev1 == SMC_TYPE_D && ntohs(hdr->length) != SMCD_CLC_ACCEPT_CONFIRM_LEN)) return false; } else { if (hdr->typev1 == SMC_TYPE_D && ntohs(hdr->length) < SMCD_CLC_ACCEPT_CONFIRM_LEN_V2) return false; if (hdr->typev1 == SMC_TYPE_R && ntohs(hdr->length) < SMCR_CLC_ACCEPT_CONFIRM_LEN_V2) return false; } return true; } /* check arriving CLC decline */ static bool smc_clc_msg_decl_valid(struct smc_clc_msg_decline *dclc) { struct smc_clc_msg_hdr *hdr = &dclc->hdr; if (hdr->typev1 != SMC_TYPE_R && hdr->typev1 != SMC_TYPE_D) return false; if (hdr->version == SMC_V1) { if (ntohs(hdr->length) != sizeof(struct smc_clc_msg_decline)) return false; } else { if (ntohs(hdr->length) != sizeof(struct smc_clc_msg_decline_v2)) return false; } return true; } static int smc_clc_fill_fce(struct smc_clc_first_contact_ext_v2x *fce, struct smc_init_info *ini) { int ret = sizeof(*fce); memset(fce, 0, sizeof(*fce)); fce->fce_v2_base.os_type = SMC_CLC_OS_LINUX; fce->fce_v2_base.release = ini->release_nr; memcpy(fce->fce_v2_base.hostname, smc_hostname, sizeof(smc_hostname)); if (ini->is_smcd && ini->release_nr < SMC_RELEASE_1) { ret = sizeof(struct smc_clc_first_contact_ext); goto out; } if (ini->release_nr >= SMC_RELEASE_1) { if (!ini->is_smcd) { fce->max_conns = ini->max_conns; fce->max_links = ini->max_links; } } out: return ret; } /* check if received message has a correct header length and contains valid * heading and trailing eyecatchers */ static bool smc_clc_msg_hdr_valid(struct smc_clc_msg_hdr *clcm, bool check_trl) { struct smc_clc_msg_accept_confirm_v2 *clc_v2; struct smc_clc_msg_proposal *pclc; struct smc_clc_msg_decline *dclc; struct smc_clc_msg_trail *trl; if (memcmp(clcm->eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER)) && memcmp(clcm->eyecatcher, SMCD_EYECATCHER, sizeof(SMCD_EYECATCHER))) return false; switch (clcm->type) { case SMC_CLC_PROPOSAL: pclc = (struct smc_clc_msg_proposal *)clcm; if (!smc_clc_msg_prop_valid(pclc)) return false; trl = (struct smc_clc_msg_trail *) ((u8 *)pclc + ntohs(pclc->hdr.length) - sizeof(*trl)); break; case SMC_CLC_ACCEPT: case SMC_CLC_CONFIRM: clc_v2 = (struct smc_clc_msg_accept_confirm_v2 *)clcm; if (!smc_clc_msg_acc_conf_valid(clc_v2)) return false; trl = (struct smc_clc_msg_trail *) ((u8 *)clc_v2 + ntohs(clc_v2->hdr.length) - sizeof(*trl)); break; case SMC_CLC_DECLINE: dclc = (struct smc_clc_msg_decline *)clcm; if (!smc_clc_msg_decl_valid(dclc)) return false; check_trl = false; break; default: return false; } if (check_trl && memcmp(trl->eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER)) && memcmp(trl->eyecatcher, SMCD_EYECATCHER, sizeof(SMCD_EYECATCHER))) return false; return true; } /* find ipv4 addr on device and get the prefix len, fill CLC proposal msg */ static int smc_clc_prfx_set4_rcu(struct dst_entry *dst, __be32 ipv4, struct smc_clc_msg_proposal_prefix *prop) { struct in_device *in_dev = __in_dev_get_rcu(dst->dev); const struct in_ifaddr *ifa; if (!in_dev) return -ENODEV; in_dev_for_each_ifa_rcu(ifa, in_dev) { if (!inet_ifa_match(ipv4, ifa)) continue; prop->prefix_len = inet_mask_len(ifa->ifa_mask); prop->outgoing_subnet = ifa->ifa_address & ifa->ifa_mask; /* prop->ipv6_prefixes_cnt = 0; already done by memset before */ return 0; } return -ENOENT; } /* fill CLC proposal msg with ipv6 prefixes from device */ static int smc_clc_prfx_set6_rcu(struct dst_entry *dst, struct smc_clc_msg_proposal_prefix *prop, struct smc_clc_ipv6_prefix *ipv6_prfx) { #if IS_ENABLED(CONFIG_IPV6) struct inet6_dev *in6_dev = __in6_dev_get(dst->dev); struct inet6_ifaddr *ifa; int cnt = 0; if (!in6_dev) return -ENODEV; /* use a maximum of 8 IPv6 prefixes from device */ list_for_each_entry(ifa, &in6_dev->addr_list, if_list) { if (ipv6_addr_type(&ifa->addr) & IPV6_ADDR_LINKLOCAL) continue; ipv6_addr_prefix(&ipv6_prfx[cnt].prefix, &ifa->addr, ifa->prefix_len); ipv6_prfx[cnt].prefix_len = ifa->prefix_len; cnt++; if (cnt == SMC_CLC_MAX_V6_PREFIX) break; } prop->ipv6_prefixes_cnt = cnt; if (cnt) return 0; #endif return -ENOENT; } /* retrieve and set prefixes in CLC proposal msg */ static int smc_clc_prfx_set(struct socket *clcsock, struct smc_clc_msg_proposal_prefix *prop, struct smc_clc_ipv6_prefix *ipv6_prfx) { struct dst_entry *dst = sk_dst_get(clcsock->sk); struct sockaddr_storage addrs; struct sockaddr_in6 *addr6; struct sockaddr_in *addr; int rc = -ENOENT; if (!dst) { rc = -ENOTCONN; goto out; } if (!dst->dev) { rc = -ENODEV; goto out_rel; } /* get address to which the internal TCP socket is bound */ if (kernel_getsockname(clcsock, (struct sockaddr *)&addrs) < 0) goto out_rel; /* analyze IP specific data of net_device belonging to TCP socket */ addr6 = (struct sockaddr_in6 *)&addrs; rcu_read_lock(); if (addrs.ss_family == PF_INET) { /* IPv4 */ addr = (struct sockaddr_in *)&addrs; rc = smc_clc_prfx_set4_rcu(dst, addr->sin_addr.s_addr, prop); } else if (ipv6_addr_v4mapped(&addr6->sin6_addr)) { /* mapped IPv4 address - peer is IPv4 only */ rc = smc_clc_prfx_set4_rcu(dst, addr6->sin6_addr.s6_addr32[3], prop); } else { /* IPv6 */ rc = smc_clc_prfx_set6_rcu(dst, prop, ipv6_prfx); } rcu_read_unlock(); out_rel: dst_release(dst); out: return rc; } /* match ipv4 addrs of dev against addr in CLC proposal */ static int smc_clc_prfx_match4_rcu(struct net_device *dev, struct smc_clc_msg_proposal_prefix *prop) { struct in_device *in_dev = __in_dev_get_rcu(dev); const struct in_ifaddr *ifa; if (!in_dev) return -ENODEV; in_dev_for_each_ifa_rcu(ifa, in_dev) { if (prop->prefix_len == inet_mask_len(ifa->ifa_mask) && inet_ifa_match(prop->outgoing_subnet, ifa)) return 0; } return -ENOENT; } /* match ipv6 addrs of dev against addrs in CLC proposal */ static int smc_clc_prfx_match6_rcu(struct net_device *dev, struct smc_clc_msg_proposal_prefix *prop) { #if IS_ENABLED(CONFIG_IPV6) struct inet6_dev *in6_dev = __in6_dev_get(dev); struct smc_clc_ipv6_prefix *ipv6_prfx; struct inet6_ifaddr *ifa; int i, max; if (!in6_dev) return -ENODEV; /* ipv6 prefix list starts behind smc_clc_msg_proposal_prefix */ ipv6_prfx = (struct smc_clc_ipv6_prefix *)((u8 *)prop + sizeof(*prop)); max = min_t(u8, prop->ipv6_prefixes_cnt, SMC_CLC_MAX_V6_PREFIX); list_for_each_entry(ifa, &in6_dev->addr_list, if_list) { if (ipv6_addr_type(&ifa->addr) & IPV6_ADDR_LINKLOCAL) continue; for (i = 0; i < max; i++) { if (ifa->prefix_len == ipv6_prfx[i].prefix_len && ipv6_prefix_equal(&ifa->addr, &ipv6_prfx[i].prefix, ifa->prefix_len)) return 0; } } #endif return -ENOENT; } /* check if proposed prefixes match one of our device prefixes */ int smc_clc_prfx_match(struct socket *clcsock, struct smc_clc_msg_proposal_prefix *prop) { struct dst_entry *dst = sk_dst_get(clcsock->sk); int rc; if (!dst) { rc = -ENOTCONN; goto out; } if (!dst->dev) { rc = -ENODEV; goto out_rel; } rcu_read_lock(); if (!prop->ipv6_prefixes_cnt) rc = smc_clc_prfx_match4_rcu(dst->dev, prop); else rc = smc_clc_prfx_match6_rcu(dst->dev, prop); rcu_read_unlock(); out_rel: dst_release(dst); out: return rc; } /* Wait for data on the tcp-socket, analyze received data * Returns: * 0 if success and it was not a decline that we received. * SMC_CLC_DECL_REPLY if decline received for fallback w/o another decl send. * clcsock error, -EINTR, -ECONNRESET, -EPROTO otherwise. */ int smc_clc_wait_msg(struct smc_sock *smc, void *buf, int buflen, u8 expected_type, unsigned long timeout) { long rcvtimeo = smc->clcsock->sk->sk_rcvtimeo; struct sock *clc_sk = smc->clcsock->sk; struct smc_clc_msg_hdr *clcm = buf; struct msghdr msg = {NULL, 0}; int reason_code = 0; struct kvec vec = {buf, buflen}; int len, datlen, recvlen; bool check_trl = true; int krflags; /* peek the first few bytes to determine length of data to receive * so we don't consume any subsequent CLC message or payload data * in the TCP byte stream */ /* * Caller must make sure that buflen is no less than * sizeof(struct smc_clc_msg_hdr) */ krflags = MSG_PEEK | MSG_WAITALL; clc_sk->sk_rcvtimeo = timeout; iov_iter_kvec(&msg.msg_iter, ITER_DEST, &vec, 1, sizeof(struct smc_clc_msg_hdr)); len = sock_recvmsg(smc->clcsock, &msg, krflags); if (signal_pending(current)) { reason_code = -EINTR; clc_sk->sk_err = EINTR; smc->sk.sk_err = EINTR; goto out; } if (clc_sk->sk_err) { reason_code = -clc_sk->sk_err; if (clc_sk->sk_err == EAGAIN && expected_type == SMC_CLC_DECLINE) clc_sk->sk_err = 0; /* reset for fallback usage */ else smc->sk.sk_err = clc_sk->sk_err; goto out; } if (!len) { /* peer has performed orderly shutdown */ smc->sk.sk_err = ECONNRESET; reason_code = -ECONNRESET; goto out; } if (len < 0) { if (len != -EAGAIN || expected_type != SMC_CLC_DECLINE) smc->sk.sk_err = -len; reason_code = len; goto out; } datlen = ntohs(clcm->length); if ((len < sizeof(struct smc_clc_msg_hdr)) || (clcm->version < SMC_V1) || ((clcm->type != SMC_CLC_DECLINE) && (clcm->type != expected_type))) { smc->sk.sk_err = EPROTO; reason_code = -EPROTO; goto out; } /* receive the complete CLC message */ memset(&msg, 0, sizeof(struct msghdr)); if (datlen > buflen) { check_trl = false; recvlen = buflen; } else { recvlen = datlen; } iov_iter_kvec(&msg.msg_iter, ITER_DEST, &vec, 1, recvlen); krflags = MSG_WAITALL; len = sock_recvmsg(smc->clcsock, &msg, krflags); if (len < recvlen || !smc_clc_msg_hdr_valid(clcm, check_trl)) { smc->sk.sk_err = EPROTO; reason_code = -EPROTO; goto out; } datlen -= len; while (datlen) { u8 tmp[SMC_CLC_RECV_BUF_LEN]; vec.iov_base = &tmp; vec.iov_len = SMC_CLC_RECV_BUF_LEN; /* receive remaining proposal message */ recvlen = datlen > SMC_CLC_RECV_BUF_LEN ? SMC_CLC_RECV_BUF_LEN : datlen; iov_iter_kvec(&msg.msg_iter, ITER_DEST, &vec, 1, recvlen); len = sock_recvmsg(smc->clcsock, &msg, krflags); datlen -= len; } if (clcm->type == SMC_CLC_DECLINE) { struct smc_clc_msg_decline *dclc; dclc = (struct smc_clc_msg_decline *)clcm; reason_code = SMC_CLC_DECL_PEERDECL; smc->peer_diagnosis = ntohl(dclc->peer_diagnosis); if (((struct smc_clc_msg_decline *)buf)->hdr.typev2 & SMC_FIRST_CONTACT_MASK) { smc->conn.lgr->sync_err = 1; smc_lgr_terminate_sched(smc->conn.lgr); } } out: clc_sk->sk_rcvtimeo = rcvtimeo; return reason_code; } /* send CLC DECLINE message across internal TCP socket */ int smc_clc_send_decline(struct smc_sock *smc, u32 peer_diag_info, u8 version) { struct smc_clc_msg_decline *dclc_v1; struct smc_clc_msg_decline_v2 dclc; struct msghdr msg; int len, send_len; struct kvec vec; dclc_v1 = (struct smc_clc_msg_decline *)&dclc; memset(&dclc, 0, sizeof(dclc)); memcpy(dclc.hdr.eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER)); dclc.hdr.type = SMC_CLC_DECLINE; dclc.hdr.version = version; dclc.os_type = version == SMC_V1 ? 0 : SMC_CLC_OS_LINUX; dclc.hdr.typev2 = (peer_diag_info == SMC_CLC_DECL_SYNCERR) ? SMC_FIRST_CONTACT_MASK : 0; if ((!smc_conn_lgr_valid(&smc->conn) || !smc->conn.lgr->is_smcd) && smc_ib_is_valid_local_systemid()) memcpy(dclc.id_for_peer, local_systemid, sizeof(local_systemid)); dclc.peer_diagnosis = htonl(peer_diag_info); if (version == SMC_V1) { memcpy(dclc_v1->trl.eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER)); send_len = sizeof(*dclc_v1); } else { memcpy(dclc.trl.eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER)); send_len = sizeof(dclc); } dclc.hdr.length = htons(send_len); memset(&msg, 0, sizeof(msg)); vec.iov_base = &dclc; vec.iov_len = send_len; len = kernel_sendmsg(smc->clcsock, &msg, &vec, 1, send_len); if (len < 0 || len < send_len) len = -EPROTO; return len > 0 ? 0 : len; } /* send CLC PROPOSAL message across internal TCP socket */ int smc_clc_send_proposal(struct smc_sock *smc, struct smc_init_info *ini) { struct smc_clc_smcd_v2_extension *smcd_v2_ext; struct smc_clc_msg_proposal_prefix *pclc_prfx; struct smc_clc_msg_proposal *pclc_base; struct smc_clc_smcd_gid_chid *gidchids; struct smc_clc_msg_proposal_area *pclc; struct smc_clc_ipv6_prefix *ipv6_prfx; struct smc_clc_v2_extension *v2_ext; struct smc_clc_msg_smcd *pclc_smcd; struct smc_clc_msg_trail *trl; struct smcd_dev *smcd; int len, i, plen, rc; int reason_code = 0; struct kvec vec[8]; struct msghdr msg; pclc = kzalloc(sizeof(*pclc), GFP_KERNEL); if (!pclc) return -ENOMEM; pclc_base = &pclc->pclc_base; pclc_smcd = &pclc->pclc_smcd; pclc_prfx = &pclc->pclc_prfx; ipv6_prfx = pclc->pclc_prfx_ipv6; v2_ext = &pclc->pclc_v2_ext; smcd_v2_ext = &pclc->pclc_smcd_v2_ext; gidchids = pclc->pclc_gidchids; trl = &pclc->pclc_trl; pclc_base->hdr.version = SMC_V2; pclc_base->hdr.typev1 = ini->smc_type_v1; pclc_base->hdr.typev2 = ini->smc_type_v2; plen = sizeof(*pclc_base) + sizeof(*pclc_smcd) + sizeof(*trl); /* retrieve ip prefixes for CLC proposal msg */ if (ini->smc_type_v1 != SMC_TYPE_N) { rc = smc_clc_prfx_set(smc->clcsock, pclc_prfx, ipv6_prfx); if (rc) { if (ini->smc_type_v2 == SMC_TYPE_N) { kfree(pclc); return SMC_CLC_DECL_CNFERR; } pclc_base->hdr.typev1 = SMC_TYPE_N; } else { pclc_base->iparea_offset = htons(sizeof(*pclc_smcd)); plen += sizeof(*pclc_prfx) + pclc_prfx->ipv6_prefixes_cnt * sizeof(ipv6_prfx[0]); } } /* build SMC Proposal CLC message */ memcpy(pclc_base->hdr.eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER)); pclc_base->hdr.type = SMC_CLC_PROPOSAL; if (smcr_indicated(ini->smc_type_v1)) { /* add SMC-R specifics */ memcpy(pclc_base->lcl.id_for_peer, local_systemid, sizeof(local_systemid)); memcpy(pclc_base->lcl.gid, ini->ib_gid, SMC_GID_SIZE); memcpy(pclc_base->lcl.mac, &ini->ib_dev->mac[ini->ib_port - 1], ETH_ALEN); } if (smcd_indicated(ini->smc_type_v1)) { /* add SMC-D specifics */ if (ini->ism_dev[0]) { smcd = ini->ism_dev[0]; pclc_smcd->ism.gid = htonll(smcd->ops->get_local_gid(smcd)); pclc_smcd->ism.chid = htons(smc_ism_get_chid(ini->ism_dev[0])); } } if (ini->smc_type_v2 == SMC_TYPE_N) { pclc_smcd->v2_ext_offset = 0; } else { struct smc_clc_eid_entry *ueident; u16 v2_ext_offset; v2_ext->hdr.flag.release = SMC_RELEASE; v2_ext_offset = sizeof(*pclc_smcd) - offsetofend(struct smc_clc_msg_smcd, v2_ext_offset); if (ini->smc_type_v1 != SMC_TYPE_N) v2_ext_offset += sizeof(*pclc_prfx) + pclc_prfx->ipv6_prefixes_cnt * sizeof(ipv6_prfx[0]); pclc_smcd->v2_ext_offset = htons(v2_ext_offset); plen += sizeof(*v2_ext); read_lock(&smc_clc_eid_table.lock); v2_ext->hdr.eid_cnt = smc_clc_eid_table.ueid_cnt; plen += smc_clc_eid_table.ueid_cnt * SMC_MAX_EID_LEN; i = 0; list_for_each_entry(ueident, &smc_clc_eid_table.list, list) { memcpy(v2_ext->user_eids[i++], ueident->eid, sizeof(ueident->eid)); } read_unlock(&smc_clc_eid_table.lock); } if (smcd_indicated(ini->smc_type_v2)) { u8 *eid = NULL; v2_ext->hdr.flag.seid = smc_clc_eid_table.seid_enabled; v2_ext->hdr.ism_gid_cnt = ini->ism_offered_cnt; v2_ext->hdr.smcd_v2_ext_offset = htons(sizeof(*v2_ext) - offsetofend(struct smc_clnt_opts_area_hdr, smcd_v2_ext_offset) + v2_ext->hdr.eid_cnt * SMC_MAX_EID_LEN); smc_ism_get_system_eid(&eid); if (eid && v2_ext->hdr.flag.seid) memcpy(smcd_v2_ext->system_eid, eid, SMC_MAX_EID_LEN); plen += sizeof(*smcd_v2_ext); if (ini->ism_offered_cnt) { for (i = 1; i <= ini->ism_offered_cnt; i++) { smcd = ini->ism_dev[i]; gidchids[i - 1].gid = htonll(smcd->ops->get_local_gid(smcd)); gidchids[i - 1].chid = htons(smc_ism_get_chid(ini->ism_dev[i])); } plen += ini->ism_offered_cnt * sizeof(struct smc_clc_smcd_gid_chid); } } if (smcr_indicated(ini->smc_type_v2)) { memcpy(v2_ext->roce, ini->smcrv2.ib_gid_v2, SMC_GID_SIZE); v2_ext->max_conns = SMC_CONN_PER_LGR_PREFER; v2_ext->max_links = SMC_LINKS_PER_LGR_MAX_PREFER; } pclc_base->hdr.length = htons(plen); memcpy(trl->eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER)); /* send SMC Proposal CLC message */ memset(&msg, 0, sizeof(msg)); i = 0; vec[i].iov_base = pclc_base; vec[i++].iov_len = sizeof(*pclc_base); vec[i].iov_base = pclc_smcd; vec[i++].iov_len = sizeof(*pclc_smcd); if (ini->smc_type_v1 != SMC_TYPE_N) { vec[i].iov_base = pclc_prfx; vec[i++].iov_len = sizeof(*pclc_prfx); if (pclc_prfx->ipv6_prefixes_cnt > 0) { vec[i].iov_base = ipv6_prfx; vec[i++].iov_len = pclc_prfx->ipv6_prefixes_cnt * sizeof(ipv6_prfx[0]); } } if (ini->smc_type_v2 != SMC_TYPE_N) { vec[i].iov_base = v2_ext; vec[i++].iov_len = sizeof(*v2_ext) + (v2_ext->hdr.eid_cnt * SMC_MAX_EID_LEN); if (smcd_indicated(ini->smc_type_v2)) { vec[i].iov_base = smcd_v2_ext; vec[i++].iov_len = sizeof(*smcd_v2_ext); if (ini->ism_offered_cnt) { vec[i].iov_base = gidchids; vec[i++].iov_len = ini->ism_offered_cnt * sizeof(struct smc_clc_smcd_gid_chid); } } } vec[i].iov_base = trl; vec[i++].iov_len = sizeof(*trl); /* due to the few bytes needed for clc-handshake this cannot block */ len = kernel_sendmsg(smc->clcsock, &msg, vec, i, plen); if (len < 0) { smc->sk.sk_err = smc->clcsock->sk->sk_err; reason_code = -smc->sk.sk_err; } else if (len < ntohs(pclc_base->hdr.length)) { reason_code = -ENETUNREACH; smc->sk.sk_err = -reason_code; } kfree(pclc); return reason_code; } /* build and send CLC CONFIRM / ACCEPT message */ static int smc_clc_send_confirm_accept(struct smc_sock *smc, struct smc_clc_msg_accept_confirm_v2 *clc_v2, int first_contact, u8 version, u8 *eid, struct smc_init_info *ini) { struct smc_connection *conn = &smc->conn; struct smc_clc_first_contact_ext_v2x fce; struct smc_clc_msg_accept_confirm *clc; struct smc_clc_fce_gid_ext gle; struct smc_clc_msg_trail trl; int i, len, fce_len; struct kvec vec[5]; struct msghdr msg; /* send SMC Confirm CLC msg */ clc = (struct smc_clc_msg_accept_confirm *)clc_v2; clc->hdr.version = version; /* SMC version */ if (first_contact) clc->hdr.typev2 |= SMC_FIRST_CONTACT_MASK; if (conn->lgr->is_smcd) { /* SMC-D specific settings */ memcpy(clc->hdr.eyecatcher, SMCD_EYECATCHER, sizeof(SMCD_EYECATCHER)); clc->hdr.typev1 = SMC_TYPE_D; clc->d0.gid = conn->lgr->smcd->ops->get_local_gid(conn->lgr->smcd); clc->d0.token = conn->rmb_desc->token; clc->d0.dmbe_size = conn->rmbe_size_comp; clc->d0.dmbe_idx = 0; memcpy(&clc->d0.linkid, conn->lgr->id, SMC_LGR_ID_SIZE); if (version == SMC_V1) { clc->hdr.length = htons(SMCD_CLC_ACCEPT_CONFIRM_LEN); } else { clc_v2->d1.chid = htons(smc_ism_get_chid(conn->lgr->smcd)); if (eid && eid[0]) memcpy(clc_v2->d1.eid, eid, SMC_MAX_EID_LEN); len = SMCD_CLC_ACCEPT_CONFIRM_LEN_V2; if (first_contact) { fce_len = smc_clc_fill_fce(&fce, ini); len += fce_len; } clc_v2->hdr.length = htons(len); } memcpy(trl.eyecatcher, SMCD_EYECATCHER, sizeof(SMCD_EYECATCHER)); } else { struct smc_link *link = conn->lnk; /* SMC-R specific settings */ memcpy(clc->hdr.eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER)); clc->hdr.typev1 = SMC_TYPE_R; clc->hdr.length = htons(SMCR_CLC_ACCEPT_CONFIRM_LEN); memcpy(clc->r0.lcl.id_for_peer, local_systemid, sizeof(local_systemid)); memcpy(&clc->r0.lcl.gid, link->gid, SMC_GID_SIZE); memcpy(&clc->r0.lcl.mac, &link->smcibdev->mac[link->ibport - 1], ETH_ALEN); hton24(clc->r0.qpn, link->roce_qp->qp_num); clc->r0.rmb_rkey = htonl(conn->rmb_desc->mr[link->link_idx]->rkey); clc->r0.rmbe_idx = 1; /* for now: 1 RMB = 1 RMBE */ clc->r0.rmbe_alert_token = htonl(conn->alert_token_local); switch (clc->hdr.type) { case SMC_CLC_ACCEPT: clc->r0.qp_mtu = link->path_mtu; break; case SMC_CLC_CONFIRM: clc->r0.qp_mtu = min(link->path_mtu, link->peer_mtu); break; } clc->r0.rmbe_size = conn->rmbe_size_comp; clc->r0.rmb_dma_addr = conn->rmb_desc->is_vm ? cpu_to_be64((uintptr_t)conn->rmb_desc->cpu_addr) : cpu_to_be64((u64)sg_dma_address (conn->rmb_desc->sgt[link->link_idx].sgl)); hton24(clc->r0.psn, link->psn_initial); if (version == SMC_V1) { clc->hdr.length = htons(SMCR_CLC_ACCEPT_CONFIRM_LEN); } else { if (eid && eid[0]) memcpy(clc_v2->r1.eid, eid, SMC_MAX_EID_LEN); len = SMCR_CLC_ACCEPT_CONFIRM_LEN_V2; if (first_contact) { fce_len = smc_clc_fill_fce(&fce, ini); len += fce_len; fce.fce_v2_base.v2_direct = !link->lgr->uses_gateway; if (clc->hdr.type == SMC_CLC_CONFIRM) { memset(&gle, 0, sizeof(gle)); gle.gid_cnt = ini->smcrv2.gidlist.len; len += sizeof(gle); len += gle.gid_cnt * sizeof(gle.gid[0]); } } clc_v2->hdr.length = htons(len); } memcpy(trl.eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER)); } memset(&msg, 0, sizeof(msg)); i = 0; vec[i].iov_base = clc_v2; if (version > SMC_V1) vec[i++].iov_len = (clc->hdr.typev1 == SMC_TYPE_D ? SMCD_CLC_ACCEPT_CONFIRM_LEN_V2 : SMCR_CLC_ACCEPT_CONFIRM_LEN_V2) - sizeof(trl); else vec[i++].iov_len = (clc->hdr.typev1 == SMC_TYPE_D ? SMCD_CLC_ACCEPT_CONFIRM_LEN : SMCR_CLC_ACCEPT_CONFIRM_LEN) - sizeof(trl); if (version > SMC_V1 && first_contact) { vec[i].iov_base = &fce; vec[i++].iov_len = fce_len; if (!conn->lgr->is_smcd) { if (clc->hdr.type == SMC_CLC_CONFIRM) { vec[i].iov_base = &gle; vec[i++].iov_len = sizeof(gle); vec[i].iov_base = &ini->smcrv2.gidlist.list; vec[i++].iov_len = gle.gid_cnt * sizeof(gle.gid[0]); } } } vec[i].iov_base = &trl; vec[i++].iov_len = sizeof(trl); return kernel_sendmsg(smc->clcsock, &msg, vec, 1, ntohs(clc->hdr.length)); } /* send CLC CONFIRM message across internal TCP socket */ int smc_clc_send_confirm(struct smc_sock *smc, bool clnt_first_contact, u8 version, u8 *eid, struct smc_init_info *ini) { struct smc_clc_msg_accept_confirm_v2 cclc_v2; int reason_code = 0; int len; /* send SMC Confirm CLC msg */ memset(&cclc_v2, 0, sizeof(cclc_v2)); cclc_v2.hdr.type = SMC_CLC_CONFIRM; len = smc_clc_send_confirm_accept(smc, &cclc_v2, clnt_first_contact, version, eid, ini); if (len < ntohs(cclc_v2.hdr.length)) { if (len >= 0) { reason_code = -ENETUNREACH; smc->sk.sk_err = -reason_code; } else { smc->sk.sk_err = smc->clcsock->sk->sk_err; reason_code = -smc->sk.sk_err; } } return reason_code; } /* send CLC ACCEPT message across internal TCP socket */ int smc_clc_send_accept(struct smc_sock *new_smc, bool srv_first_contact, u8 version, u8 *negotiated_eid, struct smc_init_info *ini) { struct smc_clc_msg_accept_confirm_v2 aclc_v2; int len; memset(&aclc_v2, 0, sizeof(aclc_v2)); aclc_v2.hdr.type = SMC_CLC_ACCEPT; len = smc_clc_send_confirm_accept(new_smc, &aclc_v2, srv_first_contact, version, negotiated_eid, ini); if (len < ntohs(aclc_v2.hdr.length)) len = len >= 0 ? -EPROTO : -new_smc->clcsock->sk->sk_err; return len > 0 ? 0 : len; } int smc_clc_srv_v2x_features_validate(struct smc_clc_msg_proposal *pclc, struct smc_init_info *ini) { struct smc_clc_v2_extension *pclc_v2_ext; ini->max_conns = SMC_CONN_PER_LGR_MAX; ini->max_links = SMC_LINKS_ADD_LNK_MAX; if ((!(ini->smcd_version & SMC_V2) && !(ini->smcr_version & SMC_V2)) || ini->release_nr < SMC_RELEASE_1) return 0; pclc_v2_ext = smc_get_clc_v2_ext(pclc); if (!pclc_v2_ext) return SMC_CLC_DECL_NOV2EXT; if (ini->smcr_version & SMC_V2) { ini->max_conns = min_t(u8, pclc_v2_ext->max_conns, SMC_CONN_PER_LGR_PREFER); if (ini->max_conns < SMC_CONN_PER_LGR_MIN) return SMC_CLC_DECL_MAXCONNERR; ini->max_links = min_t(u8, pclc_v2_ext->max_links, SMC_LINKS_PER_LGR_MAX_PREFER); if (ini->max_links < SMC_LINKS_ADD_LNK_MIN) return SMC_CLC_DECL_MAXLINKERR; } return 0; } int smc_clc_clnt_v2x_features_validate(struct smc_clc_first_contact_ext *fce, struct smc_init_info *ini) { struct smc_clc_first_contact_ext_v2x *fce_v2x = (struct smc_clc_first_contact_ext_v2x *)fce; if (ini->release_nr < SMC_RELEASE_1) return 0; if (!ini->is_smcd) { if (fce_v2x->max_conns < SMC_CONN_PER_LGR_MIN) return SMC_CLC_DECL_MAXCONNERR; ini->max_conns = fce_v2x->max_conns; if (fce_v2x->max_links > SMC_LINKS_ADD_LNK_MAX || fce_v2x->max_links < SMC_LINKS_ADD_LNK_MIN) return SMC_CLC_DECL_MAXLINKERR; ini->max_links = fce_v2x->max_links; } return 0; } int smc_clc_v2x_features_confirm_check(struct smc_clc_msg_accept_confirm *cclc, struct smc_init_info *ini) { struct smc_clc_msg_accept_confirm_v2 *clc_v2 = (struct smc_clc_msg_accept_confirm_v2 *)cclc; struct smc_clc_first_contact_ext *fce = smc_get_clc_first_contact_ext(clc_v2, ini->is_smcd); struct smc_clc_first_contact_ext_v2x *fce_v2x = (struct smc_clc_first_contact_ext_v2x *)fce; if (cclc->hdr.version == SMC_V1 || !(cclc->hdr.typev2 & SMC_FIRST_CONTACT_MASK)) return 0; if (ini->release_nr != fce->release) return SMC_CLC_DECL_RELEASEERR; if (fce->release < SMC_RELEASE_1) return 0; if (!ini->is_smcd) { if (fce_v2x->max_conns != ini->max_conns) return SMC_CLC_DECL_MAXCONNERR; if (fce_v2x->max_links != ini->max_links) return SMC_CLC_DECL_MAXLINKERR; } return 0; } void smc_clc_get_hostname(u8 **host) { *host = &smc_hostname[0]; } void __init smc_clc_init(void) { struct new_utsname *u; memset(smc_hostname, _S, sizeof(smc_hostname)); /* ASCII blanks */ u = utsname(); memcpy(smc_hostname, u->nodename, min_t(size_t, strlen(u->nodename), sizeof(smc_hostname))); INIT_LIST_HEAD(&smc_clc_eid_table.list); rwlock_init(&smc_clc_eid_table.lock); smc_clc_eid_table.ueid_cnt = 0; smc_clc_eid_table.seid_enabled = 1; } void smc_clc_exit(void) { smc_clc_ueid_remove(NULL); }
linux-master
net/smc/smc_clc.c
// SPDX-License-Identifier: GPL-2.0-only /* * Shared Memory Communications over RDMA (SMC-R) and RoCE * * SMC statistics netlink routines * * Copyright IBM Corp. 2021 * * Author(s): Guvenc Gulce */ #include <linux/init.h> #include <linux/mutex.h> #include <linux/percpu.h> #include <linux/ctype.h> #include <linux/smc.h> #include <net/genetlink.h> #include <net/sock.h> #include "smc_netlink.h" #include "smc_stats.h" int smc_stats_init(struct net *net) { net->smc.fback_rsn = kzalloc(sizeof(*net->smc.fback_rsn), GFP_KERNEL); if (!net->smc.fback_rsn) goto err_fback; net->smc.smc_stats = alloc_percpu(struct smc_stats); if (!net->smc.smc_stats) goto err_stats; mutex_init(&net->smc.mutex_fback_rsn); return 0; err_stats: kfree(net->smc.fback_rsn); err_fback: return -ENOMEM; } void smc_stats_exit(struct net *net) { kfree(net->smc.fback_rsn); if (net->smc.smc_stats) free_percpu(net->smc.smc_stats); } static int smc_nl_fill_stats_rmb_data(struct sk_buff *skb, struct smc_stats *stats, int tech, int type) { struct smc_stats_rmbcnt *stats_rmb_cnt; struct nlattr *attrs; if (type == SMC_NLA_STATS_T_TX_RMB_STATS) stats_rmb_cnt = &stats->smc[tech].rmb_tx; else stats_rmb_cnt = &stats->smc[tech].rmb_rx; attrs = nla_nest_start(skb, type); if (!attrs) goto errout; if (nla_put_u64_64bit(skb, SMC_NLA_STATS_RMB_REUSE_CNT, stats_rmb_cnt->reuse_cnt, SMC_NLA_STATS_RMB_PAD)) goto errattr; if (nla_put_u64_64bit(skb, SMC_NLA_STATS_RMB_SIZE_SM_PEER_CNT, stats_rmb_cnt->buf_size_small_peer_cnt, SMC_NLA_STATS_RMB_PAD)) goto errattr; if (nla_put_u64_64bit(skb, SMC_NLA_STATS_RMB_SIZE_SM_CNT, stats_rmb_cnt->buf_size_small_cnt, SMC_NLA_STATS_RMB_PAD)) goto errattr; if (nla_put_u64_64bit(skb, SMC_NLA_STATS_RMB_FULL_PEER_CNT, stats_rmb_cnt->buf_full_peer_cnt, SMC_NLA_STATS_RMB_PAD)) goto errattr; if (nla_put_u64_64bit(skb, SMC_NLA_STATS_RMB_FULL_CNT, stats_rmb_cnt->buf_full_cnt, SMC_NLA_STATS_RMB_PAD)) goto errattr; if (nla_put_u64_64bit(skb, SMC_NLA_STATS_RMB_ALLOC_CNT, stats_rmb_cnt->alloc_cnt, SMC_NLA_STATS_RMB_PAD)) goto errattr; if (nla_put_u64_64bit(skb, SMC_NLA_STATS_RMB_DGRADE_CNT, stats_rmb_cnt->dgrade_cnt, SMC_NLA_STATS_RMB_PAD)) goto errattr; nla_nest_end(skb, attrs); return 0; errattr: nla_nest_cancel(skb, attrs); errout: return -EMSGSIZE; } static int smc_nl_fill_stats_bufsize_data(struct sk_buff *skb, struct smc_stats *stats, int tech, int type) { struct smc_stats_memsize *stats_pload; struct nlattr *attrs; if (type == SMC_NLA_STATS_T_TXPLOAD_SIZE) stats_pload = &stats->smc[tech].tx_pd; else if (type == SMC_NLA_STATS_T_RXPLOAD_SIZE) stats_pload = &stats->smc[tech].rx_pd; else if (type == SMC_NLA_STATS_T_TX_RMB_SIZE) stats_pload = &stats->smc[tech].tx_rmbsize; else if (type == SMC_NLA_STATS_T_RX_RMB_SIZE) stats_pload = &stats->smc[tech].rx_rmbsize; else goto errout; attrs = nla_nest_start(skb, type); if (!attrs) goto errout; if (nla_put_u64_64bit(skb, SMC_NLA_STATS_PLOAD_8K, stats_pload->buf[SMC_BUF_8K], SMC_NLA_STATS_PLOAD_PAD)) goto errattr; if (nla_put_u64_64bit(skb, SMC_NLA_STATS_PLOAD_16K, stats_pload->buf[SMC_BUF_16K], SMC_NLA_STATS_PLOAD_PAD)) goto errattr; if (nla_put_u64_64bit(skb, SMC_NLA_STATS_PLOAD_32K, stats_pload->buf[SMC_BUF_32K], SMC_NLA_STATS_PLOAD_PAD)) goto errattr; if (nla_put_u64_64bit(skb, SMC_NLA_STATS_PLOAD_64K, stats_pload->buf[SMC_BUF_64K], SMC_NLA_STATS_PLOAD_PAD)) goto errattr; if (nla_put_u64_64bit(skb, SMC_NLA_STATS_PLOAD_128K, stats_pload->buf[SMC_BUF_128K], SMC_NLA_STATS_PLOAD_PAD)) goto errattr; if (nla_put_u64_64bit(skb, SMC_NLA_STATS_PLOAD_256K, stats_pload->buf[SMC_BUF_256K], SMC_NLA_STATS_PLOAD_PAD)) goto errattr; if (nla_put_u64_64bit(skb, SMC_NLA_STATS_PLOAD_512K, stats_pload->buf[SMC_BUF_512K], SMC_NLA_STATS_PLOAD_PAD)) goto errattr; if (nla_put_u64_64bit(skb, SMC_NLA_STATS_PLOAD_1024K, stats_pload->buf[SMC_BUF_1024K], SMC_NLA_STATS_PLOAD_PAD)) goto errattr; if (nla_put_u64_64bit(skb, SMC_NLA_STATS_PLOAD_G_1024K, stats_pload->buf[SMC_BUF_G_1024K], SMC_NLA_STATS_PLOAD_PAD)) goto errattr; nla_nest_end(skb, attrs); return 0; errattr: nla_nest_cancel(skb, attrs); errout: return -EMSGSIZE; } static int smc_nl_fill_stats_tech_data(struct sk_buff *skb, struct smc_stats *stats, int tech) { struct smc_stats_tech *smc_tech; struct nlattr *attrs; smc_tech = &stats->smc[tech]; if (tech == SMC_TYPE_D) attrs = nla_nest_start(skb, SMC_NLA_STATS_SMCD_TECH); else attrs = nla_nest_start(skb, SMC_NLA_STATS_SMCR_TECH); if (!attrs) goto errout; if (smc_nl_fill_stats_rmb_data(skb, stats, tech, SMC_NLA_STATS_T_TX_RMB_STATS)) goto errattr; if (smc_nl_fill_stats_rmb_data(skb, stats, tech, SMC_NLA_STATS_T_RX_RMB_STATS)) goto errattr; if (smc_nl_fill_stats_bufsize_data(skb, stats, tech, SMC_NLA_STATS_T_TXPLOAD_SIZE)) goto errattr; if (smc_nl_fill_stats_bufsize_data(skb, stats, tech, SMC_NLA_STATS_T_RXPLOAD_SIZE)) goto errattr; if (smc_nl_fill_stats_bufsize_data(skb, stats, tech, SMC_NLA_STATS_T_TX_RMB_SIZE)) goto errattr; if (smc_nl_fill_stats_bufsize_data(skb, stats, tech, SMC_NLA_STATS_T_RX_RMB_SIZE)) goto errattr; if (nla_put_u64_64bit(skb, SMC_NLA_STATS_T_CLNT_V1_SUCC, smc_tech->clnt_v1_succ_cnt, SMC_NLA_STATS_PAD)) goto errattr; if (nla_put_u64_64bit(skb, SMC_NLA_STATS_T_CLNT_V2_SUCC, smc_tech->clnt_v2_succ_cnt, SMC_NLA_STATS_PAD)) goto errattr; if (nla_put_u64_64bit(skb, SMC_NLA_STATS_T_SRV_V1_SUCC, smc_tech->srv_v1_succ_cnt, SMC_NLA_STATS_PAD)) goto errattr; if (nla_put_u64_64bit(skb, SMC_NLA_STATS_T_SRV_V2_SUCC, smc_tech->srv_v2_succ_cnt, SMC_NLA_STATS_PAD)) goto errattr; if (nla_put_u64_64bit(skb, SMC_NLA_STATS_T_RX_BYTES, smc_tech->rx_bytes, SMC_NLA_STATS_PAD)) goto errattr; if (nla_put_u64_64bit(skb, SMC_NLA_STATS_T_TX_BYTES, smc_tech->tx_bytes, SMC_NLA_STATS_PAD)) goto errattr; if (nla_put_u64_64bit(skb, SMC_NLA_STATS_T_RX_CNT, smc_tech->rx_cnt, SMC_NLA_STATS_PAD)) goto errattr; if (nla_put_u64_64bit(skb, SMC_NLA_STATS_T_TX_CNT, smc_tech->tx_cnt, SMC_NLA_STATS_PAD)) goto errattr; if (nla_put_u64_64bit(skb, SMC_NLA_STATS_T_SENDPAGE_CNT, 0, SMC_NLA_STATS_PAD)) goto errattr; if (nla_put_u64_64bit(skb, SMC_NLA_STATS_T_CORK_CNT, smc_tech->cork_cnt, SMC_NLA_STATS_PAD)) goto errattr; if (nla_put_u64_64bit(skb, SMC_NLA_STATS_T_NDLY_CNT, smc_tech->ndly_cnt, SMC_NLA_STATS_PAD)) goto errattr; if (nla_put_u64_64bit(skb, SMC_NLA_STATS_T_SPLICE_CNT, smc_tech->splice_cnt, SMC_NLA_STATS_PAD)) goto errattr; if (nla_put_u64_64bit(skb, SMC_NLA_STATS_T_URG_DATA_CNT, smc_tech->urg_data_cnt, SMC_NLA_STATS_PAD)) goto errattr; nla_nest_end(skb, attrs); return 0; errattr: nla_nest_cancel(skb, attrs); errout: return -EMSGSIZE; } int smc_nl_get_stats(struct sk_buff *skb, struct netlink_callback *cb) { struct smc_nl_dmp_ctx *cb_ctx = smc_nl_dmp_ctx(cb); struct net *net = sock_net(skb->sk); struct smc_stats *stats; struct nlattr *attrs; int cpu, i, size; void *nlh; u64 *src; u64 *sum; if (cb_ctx->pos[0]) goto errmsg; nlh = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, &smc_gen_nl_family, NLM_F_MULTI, SMC_NETLINK_GET_STATS); if (!nlh) goto errmsg; attrs = nla_nest_start(skb, SMC_GEN_STATS); if (!attrs) goto errnest; stats = kzalloc(sizeof(*stats), GFP_KERNEL); if (!stats) goto erralloc; size = sizeof(*stats) / sizeof(u64); for_each_possible_cpu(cpu) { src = (u64 *)per_cpu_ptr(net->smc.smc_stats, cpu); sum = (u64 *)stats; for (i = 0; i < size; i++) *(sum++) += *(src++); } if (smc_nl_fill_stats_tech_data(skb, stats, SMC_TYPE_D)) goto errattr; if (smc_nl_fill_stats_tech_data(skb, stats, SMC_TYPE_R)) goto errattr; if (nla_put_u64_64bit(skb, SMC_NLA_STATS_CLNT_HS_ERR_CNT, stats->clnt_hshake_err_cnt, SMC_NLA_STATS_PAD)) goto errattr; if (nla_put_u64_64bit(skb, SMC_NLA_STATS_SRV_HS_ERR_CNT, stats->srv_hshake_err_cnt, SMC_NLA_STATS_PAD)) goto errattr; nla_nest_end(skb, attrs); genlmsg_end(skb, nlh); cb_ctx->pos[0] = 1; kfree(stats); return skb->len; errattr: kfree(stats); erralloc: nla_nest_cancel(skb, attrs); errnest: genlmsg_cancel(skb, nlh); errmsg: return skb->len; } static int smc_nl_get_fback_details(struct sk_buff *skb, struct netlink_callback *cb, int pos, bool is_srv) { struct smc_nl_dmp_ctx *cb_ctx = smc_nl_dmp_ctx(cb); struct net *net = sock_net(skb->sk); int cnt_reported = cb_ctx->pos[2]; struct smc_stats_fback *trgt_arr; struct nlattr *attrs; int rc = 0; void *nlh; if (is_srv) trgt_arr = &net->smc.fback_rsn->srv[0]; else trgt_arr = &net->smc.fback_rsn->clnt[0]; if (!trgt_arr[pos].fback_code) return -ENODATA; nlh = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, &smc_gen_nl_family, NLM_F_MULTI, SMC_NETLINK_GET_FBACK_STATS); if (!nlh) goto errmsg; attrs = nla_nest_start(skb, SMC_GEN_FBACK_STATS); if (!attrs) goto errout; if (nla_put_u8(skb, SMC_NLA_FBACK_STATS_TYPE, is_srv)) goto errattr; if (!cnt_reported) { if (nla_put_u64_64bit(skb, SMC_NLA_FBACK_STATS_SRV_CNT, net->smc.fback_rsn->srv_fback_cnt, SMC_NLA_FBACK_STATS_PAD)) goto errattr; if (nla_put_u64_64bit(skb, SMC_NLA_FBACK_STATS_CLNT_CNT, net->smc.fback_rsn->clnt_fback_cnt, SMC_NLA_FBACK_STATS_PAD)) goto errattr; cnt_reported = 1; } if (nla_put_u32(skb, SMC_NLA_FBACK_STATS_RSN_CODE, trgt_arr[pos].fback_code)) goto errattr; if (nla_put_u16(skb, SMC_NLA_FBACK_STATS_RSN_CNT, trgt_arr[pos].count)) goto errattr; cb_ctx->pos[2] = cnt_reported; nla_nest_end(skb, attrs); genlmsg_end(skb, nlh); return rc; errattr: nla_nest_cancel(skb, attrs); errout: genlmsg_cancel(skb, nlh); errmsg: return -EMSGSIZE; } int smc_nl_get_fback_stats(struct sk_buff *skb, struct netlink_callback *cb) { struct smc_nl_dmp_ctx *cb_ctx = smc_nl_dmp_ctx(cb); struct net *net = sock_net(skb->sk); int rc_srv = 0, rc_clnt = 0, k; int skip_serv = cb_ctx->pos[1]; int snum = cb_ctx->pos[0]; bool is_srv = true; mutex_lock(&net->smc.mutex_fback_rsn); for (k = 0; k < SMC_MAX_FBACK_RSN_CNT; k++) { if (k < snum) continue; if (!skip_serv) { rc_srv = smc_nl_get_fback_details(skb, cb, k, is_srv); if (rc_srv && rc_srv != -ENODATA) break; } else { skip_serv = 0; } rc_clnt = smc_nl_get_fback_details(skb, cb, k, !is_srv); if (rc_clnt && rc_clnt != -ENODATA) { skip_serv = 1; break; } if (rc_clnt == -ENODATA && rc_srv == -ENODATA) break; } mutex_unlock(&net->smc.mutex_fback_rsn); cb_ctx->pos[1] = skip_serv; cb_ctx->pos[0] = k; return skb->len; }
linux-master
net/smc/smc_stats.c
// SPDX-License-Identifier: GPL-2.0 /* * Shared Memory Communications over RDMA (SMC-R) and RoCE * * Basic Transport Functions exploiting Infiniband API * * Copyright IBM Corp. 2016 * * Author(s): Ursula Braun <[email protected]> */ #include <linux/socket.h> #include <linux/if_vlan.h> #include <linux/random.h> #include <linux/workqueue.h> #include <linux/wait.h> #include <linux/reboot.h> #include <linux/mutex.h> #include <linux/list.h> #include <linux/smc.h> #include <net/tcp.h> #include <net/sock.h> #include <rdma/ib_verbs.h> #include <rdma/ib_cache.h> #include "smc.h" #include "smc_clc.h" #include "smc_core.h" #include "smc_ib.h" #include "smc_wr.h" #include "smc_llc.h" #include "smc_cdc.h" #include "smc_close.h" #include "smc_ism.h" #include "smc_netlink.h" #include "smc_stats.h" #include "smc_tracepoint.h" #define SMC_LGR_NUM_INCR 256 #define SMC_LGR_FREE_DELAY_SERV (600 * HZ) #define SMC_LGR_FREE_DELAY_CLNT (SMC_LGR_FREE_DELAY_SERV + 10 * HZ) struct smc_lgr_list smc_lgr_list = { /* established link groups */ .lock = __SPIN_LOCK_UNLOCKED(smc_lgr_list.lock), .list = LIST_HEAD_INIT(smc_lgr_list.list), .num = 0, }; static atomic_t lgr_cnt = ATOMIC_INIT(0); /* number of existing link groups */ static DECLARE_WAIT_QUEUE_HEAD(lgrs_deleted); static void smc_buf_free(struct smc_link_group *lgr, bool is_rmb, struct smc_buf_desc *buf_desc); static void __smc_lgr_terminate(struct smc_link_group *lgr, bool soft); static void smc_link_down_work(struct work_struct *work); /* return head of link group list and its lock for a given link group */ static inline struct list_head *smc_lgr_list_head(struct smc_link_group *lgr, spinlock_t **lgr_lock) { if (lgr->is_smcd) { *lgr_lock = &lgr->smcd->lgr_lock; return &lgr->smcd->lgr_list; } *lgr_lock = &smc_lgr_list.lock; return &smc_lgr_list.list; } static void smc_ibdev_cnt_inc(struct smc_link *lnk) { atomic_inc(&lnk->smcibdev->lnk_cnt_by_port[lnk->ibport - 1]); } static void smc_ibdev_cnt_dec(struct smc_link *lnk) { atomic_dec(&lnk->smcibdev->lnk_cnt_by_port[lnk->ibport - 1]); } static void smc_lgr_schedule_free_work(struct smc_link_group *lgr) { /* client link group creation always follows the server link group * creation. For client use a somewhat higher removal delay time, * otherwise there is a risk of out-of-sync link groups. */ if (!lgr->freeing) { mod_delayed_work(system_wq, &lgr->free_work, (!lgr->is_smcd && lgr->role == SMC_CLNT) ? SMC_LGR_FREE_DELAY_CLNT : SMC_LGR_FREE_DELAY_SERV); } } /* Register connection's alert token in our lookup structure. * To use rbtrees we have to implement our own insert core. * Requires @conns_lock * @smc connection to register * Returns 0 on success, != otherwise. */ static void smc_lgr_add_alert_token(struct smc_connection *conn) { struct rb_node **link, *parent = NULL; u32 token = conn->alert_token_local; link = &conn->lgr->conns_all.rb_node; while (*link) { struct smc_connection *cur = rb_entry(*link, struct smc_connection, alert_node); parent = *link; if (cur->alert_token_local > token) link = &parent->rb_left; else link = &parent->rb_right; } /* Put the new node there */ rb_link_node(&conn->alert_node, parent, link); rb_insert_color(&conn->alert_node, &conn->lgr->conns_all); } /* assign an SMC-R link to the connection */ static int smcr_lgr_conn_assign_link(struct smc_connection *conn, bool first) { enum smc_link_state expected = first ? SMC_LNK_ACTIVATING : SMC_LNK_ACTIVE; int i, j; /* do link balancing */ conn->lnk = NULL; /* reset conn->lnk first */ for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) { struct smc_link *lnk = &conn->lgr->lnk[i]; if (lnk->state != expected || lnk->link_is_asym) continue; if (conn->lgr->role == SMC_CLNT) { conn->lnk = lnk; /* temporary, SMC server assigns link*/ break; } if (conn->lgr->conns_num % 2) { for (j = i + 1; j < SMC_LINKS_PER_LGR_MAX; j++) { struct smc_link *lnk2; lnk2 = &conn->lgr->lnk[j]; if (lnk2->state == expected && !lnk2->link_is_asym) { conn->lnk = lnk2; break; } } } if (!conn->lnk) conn->lnk = lnk; break; } if (!conn->lnk) return SMC_CLC_DECL_NOACTLINK; atomic_inc(&conn->lnk->conn_cnt); return 0; } /* Register connection in link group by assigning an alert token * registered in a search tree. * Requires @conns_lock * Note that '0' is a reserved value and not assigned. */ static int smc_lgr_register_conn(struct smc_connection *conn, bool first) { struct smc_sock *smc = container_of(conn, struct smc_sock, conn); static atomic_t nexttoken = ATOMIC_INIT(0); int rc; if (!conn->lgr->is_smcd) { rc = smcr_lgr_conn_assign_link(conn, first); if (rc) { conn->lgr = NULL; return rc; } } /* find a new alert_token_local value not yet used by some connection * in this link group */ sock_hold(&smc->sk); /* sock_put in smc_lgr_unregister_conn() */ while (!conn->alert_token_local) { conn->alert_token_local = atomic_inc_return(&nexttoken); if (smc_lgr_find_conn(conn->alert_token_local, conn->lgr)) conn->alert_token_local = 0; } smc_lgr_add_alert_token(conn); conn->lgr->conns_num++; return 0; } /* Unregister connection and reset the alert token of the given connection< */ static void __smc_lgr_unregister_conn(struct smc_connection *conn) { struct smc_sock *smc = container_of(conn, struct smc_sock, conn); struct smc_link_group *lgr = conn->lgr; rb_erase(&conn->alert_node, &lgr->conns_all); if (conn->lnk) atomic_dec(&conn->lnk->conn_cnt); lgr->conns_num--; conn->alert_token_local = 0; sock_put(&smc->sk); /* sock_hold in smc_lgr_register_conn() */ } /* Unregister connection from lgr */ static void smc_lgr_unregister_conn(struct smc_connection *conn) { struct smc_link_group *lgr = conn->lgr; if (!smc_conn_lgr_valid(conn)) return; write_lock_bh(&lgr->conns_lock); if (conn->alert_token_local) { __smc_lgr_unregister_conn(conn); } write_unlock_bh(&lgr->conns_lock); } int smc_nl_get_sys_info(struct sk_buff *skb, struct netlink_callback *cb) { struct smc_nl_dmp_ctx *cb_ctx = smc_nl_dmp_ctx(cb); char hostname[SMC_MAX_HOSTNAME_LEN + 1]; char smc_seid[SMC_MAX_EID_LEN + 1]; struct nlattr *attrs; u8 *seid = NULL; u8 *host = NULL; void *nlh; nlh = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, &smc_gen_nl_family, NLM_F_MULTI, SMC_NETLINK_GET_SYS_INFO); if (!nlh) goto errmsg; if (cb_ctx->pos[0]) goto errout; attrs = nla_nest_start(skb, SMC_GEN_SYS_INFO); if (!attrs) goto errout; if (nla_put_u8(skb, SMC_NLA_SYS_VER, SMC_V2)) goto errattr; if (nla_put_u8(skb, SMC_NLA_SYS_REL, SMC_RELEASE)) goto errattr; if (nla_put_u8(skb, SMC_NLA_SYS_IS_ISM_V2, smc_ism_is_v2_capable())) goto errattr; if (nla_put_u8(skb, SMC_NLA_SYS_IS_SMCR_V2, true)) goto errattr; smc_clc_get_hostname(&host); if (host) { memcpy(hostname, host, SMC_MAX_HOSTNAME_LEN); hostname[SMC_MAX_HOSTNAME_LEN] = 0; if (nla_put_string(skb, SMC_NLA_SYS_LOCAL_HOST, hostname)) goto errattr; } if (smc_ism_is_v2_capable()) { smc_ism_get_system_eid(&seid); memcpy(smc_seid, seid, SMC_MAX_EID_LEN); smc_seid[SMC_MAX_EID_LEN] = 0; if (nla_put_string(skb, SMC_NLA_SYS_SEID, smc_seid)) goto errattr; } nla_nest_end(skb, attrs); genlmsg_end(skb, nlh); cb_ctx->pos[0] = 1; return skb->len; errattr: nla_nest_cancel(skb, attrs); errout: genlmsg_cancel(skb, nlh); errmsg: return skb->len; } /* Fill SMC_NLA_LGR_D_V2_COMMON/SMC_NLA_LGR_R_V2_COMMON nested attributes */ static int smc_nl_fill_lgr_v2_common(struct smc_link_group *lgr, struct sk_buff *skb, struct netlink_callback *cb, struct nlattr *v2_attrs) { char smc_host[SMC_MAX_HOSTNAME_LEN + 1]; char smc_eid[SMC_MAX_EID_LEN + 1]; if (nla_put_u8(skb, SMC_NLA_LGR_V2_VER, lgr->smc_version)) goto errv2attr; if (nla_put_u8(skb, SMC_NLA_LGR_V2_REL, lgr->peer_smc_release)) goto errv2attr; if (nla_put_u8(skb, SMC_NLA_LGR_V2_OS, lgr->peer_os)) goto errv2attr; memcpy(smc_host, lgr->peer_hostname, SMC_MAX_HOSTNAME_LEN); smc_host[SMC_MAX_HOSTNAME_LEN] = 0; if (nla_put_string(skb, SMC_NLA_LGR_V2_PEER_HOST, smc_host)) goto errv2attr; memcpy(smc_eid, lgr->negotiated_eid, SMC_MAX_EID_LEN); smc_eid[SMC_MAX_EID_LEN] = 0; if (nla_put_string(skb, SMC_NLA_LGR_V2_NEG_EID, smc_eid)) goto errv2attr; nla_nest_end(skb, v2_attrs); return 0; errv2attr: nla_nest_cancel(skb, v2_attrs); return -EMSGSIZE; } static int smc_nl_fill_smcr_lgr_v2(struct smc_link_group *lgr, struct sk_buff *skb, struct netlink_callback *cb) { struct nlattr *v2_attrs; v2_attrs = nla_nest_start(skb, SMC_NLA_LGR_R_V2); if (!v2_attrs) goto errattr; if (nla_put_u8(skb, SMC_NLA_LGR_R_V2_DIRECT, !lgr->uses_gateway)) goto errv2attr; if (nla_put_u8(skb, SMC_NLA_LGR_R_V2_MAX_CONNS, lgr->max_conns)) goto errv2attr; if (nla_put_u8(skb, SMC_NLA_LGR_R_V2_MAX_LINKS, lgr->max_links)) goto errv2attr; nla_nest_end(skb, v2_attrs); return 0; errv2attr: nla_nest_cancel(skb, v2_attrs); errattr: return -EMSGSIZE; } static int smc_nl_fill_lgr(struct smc_link_group *lgr, struct sk_buff *skb, struct netlink_callback *cb) { char smc_target[SMC_MAX_PNETID_LEN + 1]; struct nlattr *attrs, *v2_attrs; attrs = nla_nest_start(skb, SMC_GEN_LGR_SMCR); if (!attrs) goto errout; if (nla_put_u32(skb, SMC_NLA_LGR_R_ID, *((u32 *)&lgr->id))) goto errattr; if (nla_put_u32(skb, SMC_NLA_LGR_R_CONNS_NUM, lgr->conns_num)) goto errattr; if (nla_put_u8(skb, SMC_NLA_LGR_R_ROLE, lgr->role)) goto errattr; if (nla_put_u8(skb, SMC_NLA_LGR_R_TYPE, lgr->type)) goto errattr; if (nla_put_u8(skb, SMC_NLA_LGR_R_BUF_TYPE, lgr->buf_type)) goto errattr; if (nla_put_u8(skb, SMC_NLA_LGR_R_VLAN_ID, lgr->vlan_id)) goto errattr; if (nla_put_u64_64bit(skb, SMC_NLA_LGR_R_NET_COOKIE, lgr->net->net_cookie, SMC_NLA_LGR_R_PAD)) goto errattr; memcpy(smc_target, lgr->pnet_id, SMC_MAX_PNETID_LEN); smc_target[SMC_MAX_PNETID_LEN] = 0; if (nla_put_string(skb, SMC_NLA_LGR_R_PNETID, smc_target)) goto errattr; if (lgr->smc_version > SMC_V1) { v2_attrs = nla_nest_start(skb, SMC_NLA_LGR_R_V2_COMMON); if (!v2_attrs) goto errattr; if (smc_nl_fill_lgr_v2_common(lgr, skb, cb, v2_attrs)) goto errattr; if (smc_nl_fill_smcr_lgr_v2(lgr, skb, cb)) goto errattr; } nla_nest_end(skb, attrs); return 0; errattr: nla_nest_cancel(skb, attrs); errout: return -EMSGSIZE; } static int smc_nl_fill_lgr_link(struct smc_link_group *lgr, struct smc_link *link, struct sk_buff *skb, struct netlink_callback *cb) { char smc_ibname[IB_DEVICE_NAME_MAX]; u8 smc_gid_target[41]; struct nlattr *attrs; u32 link_uid = 0; void *nlh; nlh = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, &smc_gen_nl_family, NLM_F_MULTI, SMC_NETLINK_GET_LINK_SMCR); if (!nlh) goto errmsg; attrs = nla_nest_start(skb, SMC_GEN_LINK_SMCR); if (!attrs) goto errout; if (nla_put_u8(skb, SMC_NLA_LINK_ID, link->link_id)) goto errattr; if (nla_put_u32(skb, SMC_NLA_LINK_STATE, link->state)) goto errattr; if (nla_put_u32(skb, SMC_NLA_LINK_CONN_CNT, atomic_read(&link->conn_cnt))) goto errattr; if (nla_put_u8(skb, SMC_NLA_LINK_IB_PORT, link->ibport)) goto errattr; if (nla_put_u32(skb, SMC_NLA_LINK_NET_DEV, link->ndev_ifidx)) goto errattr; snprintf(smc_ibname, sizeof(smc_ibname), "%s", link->ibname); if (nla_put_string(skb, SMC_NLA_LINK_IB_DEV, smc_ibname)) goto errattr; memcpy(&link_uid, link->link_uid, sizeof(link_uid)); if (nla_put_u32(skb, SMC_NLA_LINK_UID, link_uid)) goto errattr; memcpy(&link_uid, link->peer_link_uid, sizeof(link_uid)); if (nla_put_u32(skb, SMC_NLA_LINK_PEER_UID, link_uid)) goto errattr; memset(smc_gid_target, 0, sizeof(smc_gid_target)); smc_gid_be16_convert(smc_gid_target, link->gid); if (nla_put_string(skb, SMC_NLA_LINK_GID, smc_gid_target)) goto errattr; memset(smc_gid_target, 0, sizeof(smc_gid_target)); smc_gid_be16_convert(smc_gid_target, link->peer_gid); if (nla_put_string(skb, SMC_NLA_LINK_PEER_GID, smc_gid_target)) goto errattr; nla_nest_end(skb, attrs); genlmsg_end(skb, nlh); return 0; errattr: nla_nest_cancel(skb, attrs); errout: genlmsg_cancel(skb, nlh); errmsg: return -EMSGSIZE; } static int smc_nl_handle_lgr(struct smc_link_group *lgr, struct sk_buff *skb, struct netlink_callback *cb, bool list_links) { void *nlh; int i; nlh = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, &smc_gen_nl_family, NLM_F_MULTI, SMC_NETLINK_GET_LGR_SMCR); if (!nlh) goto errmsg; if (smc_nl_fill_lgr(lgr, skb, cb)) goto errout; genlmsg_end(skb, nlh); if (!list_links) goto out; for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) { if (!smc_link_usable(&lgr->lnk[i])) continue; if (smc_nl_fill_lgr_link(lgr, &lgr->lnk[i], skb, cb)) goto errout; } out: return 0; errout: genlmsg_cancel(skb, nlh); errmsg: return -EMSGSIZE; } static void smc_nl_fill_lgr_list(struct smc_lgr_list *smc_lgr, struct sk_buff *skb, struct netlink_callback *cb, bool list_links) { struct smc_nl_dmp_ctx *cb_ctx = smc_nl_dmp_ctx(cb); struct smc_link_group *lgr; int snum = cb_ctx->pos[0]; int num = 0; spin_lock_bh(&smc_lgr->lock); list_for_each_entry(lgr, &smc_lgr->list, list) { if (num < snum) goto next; if (smc_nl_handle_lgr(lgr, skb, cb, list_links)) goto errout; next: num++; } errout: spin_unlock_bh(&smc_lgr->lock); cb_ctx->pos[0] = num; } static int smc_nl_fill_smcd_lgr(struct smc_link_group *lgr, struct sk_buff *skb, struct netlink_callback *cb) { char smc_pnet[SMC_MAX_PNETID_LEN + 1]; struct smcd_dev *smcd = lgr->smcd; struct nlattr *attrs; void *nlh; nlh = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, &smc_gen_nl_family, NLM_F_MULTI, SMC_NETLINK_GET_LGR_SMCD); if (!nlh) goto errmsg; attrs = nla_nest_start(skb, SMC_GEN_LGR_SMCD); if (!attrs) goto errout; if (nla_put_u32(skb, SMC_NLA_LGR_D_ID, *((u32 *)&lgr->id))) goto errattr; if (nla_put_u64_64bit(skb, SMC_NLA_LGR_D_GID, smcd->ops->get_local_gid(smcd), SMC_NLA_LGR_D_PAD)) goto errattr; if (nla_put_u64_64bit(skb, SMC_NLA_LGR_D_PEER_GID, lgr->peer_gid, SMC_NLA_LGR_D_PAD)) goto errattr; if (nla_put_u8(skb, SMC_NLA_LGR_D_VLAN_ID, lgr->vlan_id)) goto errattr; if (nla_put_u32(skb, SMC_NLA_LGR_D_CONNS_NUM, lgr->conns_num)) goto errattr; if (nla_put_u32(skb, SMC_NLA_LGR_D_CHID, smc_ism_get_chid(lgr->smcd))) goto errattr; memcpy(smc_pnet, lgr->smcd->pnetid, SMC_MAX_PNETID_LEN); smc_pnet[SMC_MAX_PNETID_LEN] = 0; if (nla_put_string(skb, SMC_NLA_LGR_D_PNETID, smc_pnet)) goto errattr; if (lgr->smc_version > SMC_V1) { struct nlattr *v2_attrs; v2_attrs = nla_nest_start(skb, SMC_NLA_LGR_D_V2_COMMON); if (!v2_attrs) goto errattr; if (smc_nl_fill_lgr_v2_common(lgr, skb, cb, v2_attrs)) goto errattr; } nla_nest_end(skb, attrs); genlmsg_end(skb, nlh); return 0; errattr: nla_nest_cancel(skb, attrs); errout: genlmsg_cancel(skb, nlh); errmsg: return -EMSGSIZE; } static int smc_nl_handle_smcd_lgr(struct smcd_dev *dev, struct sk_buff *skb, struct netlink_callback *cb) { struct smc_nl_dmp_ctx *cb_ctx = smc_nl_dmp_ctx(cb); struct smc_link_group *lgr; int snum = cb_ctx->pos[1]; int rc = 0, num = 0; spin_lock_bh(&dev->lgr_lock); list_for_each_entry(lgr, &dev->lgr_list, list) { if (!lgr->is_smcd) continue; if (num < snum) goto next; rc = smc_nl_fill_smcd_lgr(lgr, skb, cb); if (rc) goto errout; next: num++; } errout: spin_unlock_bh(&dev->lgr_lock); cb_ctx->pos[1] = num; return rc; } static int smc_nl_fill_smcd_dev(struct smcd_dev_list *dev_list, struct sk_buff *skb, struct netlink_callback *cb) { struct smc_nl_dmp_ctx *cb_ctx = smc_nl_dmp_ctx(cb); struct smcd_dev *smcd_dev; int snum = cb_ctx->pos[0]; int rc = 0, num = 0; mutex_lock(&dev_list->mutex); list_for_each_entry(smcd_dev, &dev_list->list, list) { if (list_empty(&smcd_dev->lgr_list)) continue; if (num < snum) goto next; rc = smc_nl_handle_smcd_lgr(smcd_dev, skb, cb); if (rc) goto errout; next: num++; } errout: mutex_unlock(&dev_list->mutex); cb_ctx->pos[0] = num; return rc; } int smcr_nl_get_lgr(struct sk_buff *skb, struct netlink_callback *cb) { bool list_links = false; smc_nl_fill_lgr_list(&smc_lgr_list, skb, cb, list_links); return skb->len; } int smcr_nl_get_link(struct sk_buff *skb, struct netlink_callback *cb) { bool list_links = true; smc_nl_fill_lgr_list(&smc_lgr_list, skb, cb, list_links); return skb->len; } int smcd_nl_get_lgr(struct sk_buff *skb, struct netlink_callback *cb) { smc_nl_fill_smcd_dev(&smcd_dev_list, skb, cb); return skb->len; } void smc_lgr_cleanup_early(struct smc_link_group *lgr) { spinlock_t *lgr_lock; if (!lgr) return; smc_lgr_list_head(lgr, &lgr_lock); spin_lock_bh(lgr_lock); /* do not use this link group for new connections */ if (!list_empty(&lgr->list)) list_del_init(&lgr->list); spin_unlock_bh(lgr_lock); __smc_lgr_terminate(lgr, true); } static void smcr_lgr_link_deactivate_all(struct smc_link_group *lgr) { int i; for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) { struct smc_link *lnk = &lgr->lnk[i]; if (smc_link_sendable(lnk)) lnk->state = SMC_LNK_INACTIVE; } wake_up_all(&lgr->llc_msg_waiter); wake_up_all(&lgr->llc_flow_waiter); } static void smc_lgr_free(struct smc_link_group *lgr); static void smc_lgr_free_work(struct work_struct *work) { struct smc_link_group *lgr = container_of(to_delayed_work(work), struct smc_link_group, free_work); spinlock_t *lgr_lock; bool conns; smc_lgr_list_head(lgr, &lgr_lock); spin_lock_bh(lgr_lock); if (lgr->freeing) { spin_unlock_bh(lgr_lock); return; } read_lock_bh(&lgr->conns_lock); conns = RB_EMPTY_ROOT(&lgr->conns_all); read_unlock_bh(&lgr->conns_lock); if (!conns) { /* number of lgr connections is no longer zero */ spin_unlock_bh(lgr_lock); return; } list_del_init(&lgr->list); /* remove from smc_lgr_list */ lgr->freeing = 1; /* this instance does the freeing, no new schedule */ spin_unlock_bh(lgr_lock); cancel_delayed_work(&lgr->free_work); if (!lgr->is_smcd && !lgr->terminating) smc_llc_send_link_delete_all(lgr, true, SMC_LLC_DEL_PROG_INIT_TERM); if (lgr->is_smcd && !lgr->terminating) smc_ism_signal_shutdown(lgr); if (!lgr->is_smcd) smcr_lgr_link_deactivate_all(lgr); smc_lgr_free(lgr); } static void smc_lgr_terminate_work(struct work_struct *work) { struct smc_link_group *lgr = container_of(work, struct smc_link_group, terminate_work); __smc_lgr_terminate(lgr, true); } /* return next unique link id for the lgr */ static u8 smcr_next_link_id(struct smc_link_group *lgr) { u8 link_id; int i; while (1) { again: link_id = ++lgr->next_link_id; if (!link_id) /* skip zero as link_id */ link_id = ++lgr->next_link_id; for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) { if (smc_link_usable(&lgr->lnk[i]) && lgr->lnk[i].link_id == link_id) goto again; } break; } return link_id; } static void smcr_copy_dev_info_to_link(struct smc_link *link) { struct smc_ib_device *smcibdev = link->smcibdev; snprintf(link->ibname, sizeof(link->ibname), "%s", smcibdev->ibdev->name); link->ndev_ifidx = smcibdev->ndev_ifidx[link->ibport - 1]; } int smcr_link_init(struct smc_link_group *lgr, struct smc_link *lnk, u8 link_idx, struct smc_init_info *ini) { struct smc_ib_device *smcibdev; u8 rndvec[3]; int rc; if (lgr->smc_version == SMC_V2) { lnk->smcibdev = ini->smcrv2.ib_dev_v2; lnk->ibport = ini->smcrv2.ib_port_v2; } else { lnk->smcibdev = ini->ib_dev; lnk->ibport = ini->ib_port; } get_device(&lnk->smcibdev->ibdev->dev); atomic_inc(&lnk->smcibdev->lnk_cnt); refcount_set(&lnk->refcnt, 1); /* link refcnt is set to 1 */ lnk->clearing = 0; lnk->path_mtu = lnk->smcibdev->pattr[lnk->ibport - 1].active_mtu; lnk->link_id = smcr_next_link_id(lgr); lnk->lgr = lgr; smc_lgr_hold(lgr); /* lgr_put in smcr_link_clear() */ lnk->link_idx = link_idx; lnk->wr_rx_id_compl = 0; smc_ibdev_cnt_inc(lnk); smcr_copy_dev_info_to_link(lnk); atomic_set(&lnk->conn_cnt, 0); smc_llc_link_set_uid(lnk); INIT_WORK(&lnk->link_down_wrk, smc_link_down_work); if (!lnk->smcibdev->initialized) { rc = (int)smc_ib_setup_per_ibdev(lnk->smcibdev); if (rc) goto out; } get_random_bytes(rndvec, sizeof(rndvec)); lnk->psn_initial = rndvec[0] + (rndvec[1] << 8) + (rndvec[2] << 16); rc = smc_ib_determine_gid(lnk->smcibdev, lnk->ibport, ini->vlan_id, lnk->gid, &lnk->sgid_index, lgr->smc_version == SMC_V2 ? &ini->smcrv2 : NULL); if (rc) goto out; rc = smc_llc_link_init(lnk); if (rc) goto out; rc = smc_wr_alloc_link_mem(lnk); if (rc) goto clear_llc_lnk; rc = smc_ib_create_protection_domain(lnk); if (rc) goto free_link_mem; rc = smc_ib_create_queue_pair(lnk); if (rc) goto dealloc_pd; rc = smc_wr_create_link(lnk); if (rc) goto destroy_qp; lnk->state = SMC_LNK_ACTIVATING; return 0; destroy_qp: smc_ib_destroy_queue_pair(lnk); dealloc_pd: smc_ib_dealloc_protection_domain(lnk); free_link_mem: smc_wr_free_link_mem(lnk); clear_llc_lnk: smc_llc_link_clear(lnk, false); out: smc_ibdev_cnt_dec(lnk); put_device(&lnk->smcibdev->ibdev->dev); smcibdev = lnk->smcibdev; memset(lnk, 0, sizeof(struct smc_link)); lnk->state = SMC_LNK_UNUSED; if (!atomic_dec_return(&smcibdev->lnk_cnt)) wake_up(&smcibdev->lnks_deleted); smc_lgr_put(lgr); /* lgr_hold above */ return rc; } /* create a new SMC link group */ static int smc_lgr_create(struct smc_sock *smc, struct smc_init_info *ini) { struct smc_link_group *lgr; struct list_head *lgr_list; struct smcd_dev *smcd; struct smc_link *lnk; spinlock_t *lgr_lock; u8 link_idx; int rc = 0; int i; if (ini->is_smcd && ini->vlan_id) { if (smc_ism_get_vlan(ini->ism_dev[ini->ism_selected], ini->vlan_id)) { rc = SMC_CLC_DECL_ISMVLANERR; goto out; } } lgr = kzalloc(sizeof(*lgr), GFP_KERNEL); if (!lgr) { rc = SMC_CLC_DECL_MEM; goto ism_put_vlan; } lgr->tx_wq = alloc_workqueue("smc_tx_wq-%*phN", 0, 0, SMC_LGR_ID_SIZE, &lgr->id); if (!lgr->tx_wq) { rc = -ENOMEM; goto free_lgr; } lgr->is_smcd = ini->is_smcd; lgr->sync_err = 0; lgr->terminating = 0; lgr->freeing = 0; lgr->vlan_id = ini->vlan_id; refcount_set(&lgr->refcnt, 1); /* set lgr refcnt to 1 */ init_rwsem(&lgr->sndbufs_lock); init_rwsem(&lgr->rmbs_lock); rwlock_init(&lgr->conns_lock); for (i = 0; i < SMC_RMBE_SIZES; i++) { INIT_LIST_HEAD(&lgr->sndbufs[i]); INIT_LIST_HEAD(&lgr->rmbs[i]); } lgr->next_link_id = 0; smc_lgr_list.num += SMC_LGR_NUM_INCR; memcpy(&lgr->id, (u8 *)&smc_lgr_list.num, SMC_LGR_ID_SIZE); INIT_DELAYED_WORK(&lgr->free_work, smc_lgr_free_work); INIT_WORK(&lgr->terminate_work, smc_lgr_terminate_work); lgr->conns_all = RB_ROOT; if (ini->is_smcd) { /* SMC-D specific settings */ smcd = ini->ism_dev[ini->ism_selected]; get_device(smcd->ops->get_dev(smcd)); lgr->peer_gid = ini->ism_peer_gid[ini->ism_selected]; lgr->smcd = ini->ism_dev[ini->ism_selected]; lgr_list = &ini->ism_dev[ini->ism_selected]->lgr_list; lgr_lock = &lgr->smcd->lgr_lock; lgr->smc_version = ini->smcd_version; lgr->peer_shutdown = 0; atomic_inc(&ini->ism_dev[ini->ism_selected]->lgr_cnt); } else { /* SMC-R specific settings */ struct smc_ib_device *ibdev; int ibport; lgr->role = smc->listen_smc ? SMC_SERV : SMC_CLNT; lgr->smc_version = ini->smcr_version; memcpy(lgr->peer_systemid, ini->peer_systemid, SMC_SYSTEMID_LEN); if (lgr->smc_version == SMC_V2) { ibdev = ini->smcrv2.ib_dev_v2; ibport = ini->smcrv2.ib_port_v2; lgr->saddr = ini->smcrv2.saddr; lgr->uses_gateway = ini->smcrv2.uses_gateway; memcpy(lgr->nexthop_mac, ini->smcrv2.nexthop_mac, ETH_ALEN); lgr->max_conns = ini->max_conns; lgr->max_links = ini->max_links; } else { ibdev = ini->ib_dev; ibport = ini->ib_port; lgr->max_conns = SMC_CONN_PER_LGR_MAX; lgr->max_links = SMC_LINKS_ADD_LNK_MAX; } memcpy(lgr->pnet_id, ibdev->pnetid[ibport - 1], SMC_MAX_PNETID_LEN); rc = smc_wr_alloc_lgr_mem(lgr); if (rc) goto free_wq; smc_llc_lgr_init(lgr, smc); link_idx = SMC_SINGLE_LINK; lnk = &lgr->lnk[link_idx]; rc = smcr_link_init(lgr, lnk, link_idx, ini); if (rc) { smc_wr_free_lgr_mem(lgr); goto free_wq; } lgr->net = smc_ib_net(lnk->smcibdev); lgr_list = &smc_lgr_list.list; lgr_lock = &smc_lgr_list.lock; lgr->buf_type = lgr->net->smc.sysctl_smcr_buf_type; atomic_inc(&lgr_cnt); } smc->conn.lgr = lgr; spin_lock_bh(lgr_lock); list_add_tail(&lgr->list, lgr_list); spin_unlock_bh(lgr_lock); return 0; free_wq: destroy_workqueue(lgr->tx_wq); free_lgr: kfree(lgr); ism_put_vlan: if (ini->is_smcd && ini->vlan_id) smc_ism_put_vlan(ini->ism_dev[ini->ism_selected], ini->vlan_id); out: if (rc < 0) { if (rc == -ENOMEM) rc = SMC_CLC_DECL_MEM; else rc = SMC_CLC_DECL_INTERR; } return rc; } static int smc_write_space(struct smc_connection *conn) { int buffer_len = conn->peer_rmbe_size; union smc_host_cursor prod; union smc_host_cursor cons; int space; smc_curs_copy(&prod, &conn->local_tx_ctrl.prod, conn); smc_curs_copy(&cons, &conn->local_rx_ctrl.cons, conn); /* determine rx_buf space */ space = buffer_len - smc_curs_diff(buffer_len, &cons, &prod); return space; } static int smc_switch_cursor(struct smc_sock *smc, struct smc_cdc_tx_pend *pend, struct smc_wr_buf *wr_buf) { struct smc_connection *conn = &smc->conn; union smc_host_cursor cons, fin; int rc = 0; int diff; smc_curs_copy(&conn->tx_curs_sent, &conn->tx_curs_fin, conn); smc_curs_copy(&fin, &conn->local_tx_ctrl_fin, conn); /* set prod cursor to old state, enforce tx_rdma_writes() */ smc_curs_copy(&conn->local_tx_ctrl.prod, &fin, conn); smc_curs_copy(&cons, &conn->local_rx_ctrl.cons, conn); if (smc_curs_comp(conn->peer_rmbe_size, &cons, &fin) < 0) { /* cons cursor advanced more than fin, and prod was set * fin above, so now prod is smaller than cons. Fix that. */ diff = smc_curs_diff(conn->peer_rmbe_size, &fin, &cons); smc_curs_add(conn->sndbuf_desc->len, &conn->tx_curs_sent, diff); smc_curs_add(conn->sndbuf_desc->len, &conn->tx_curs_fin, diff); smp_mb__before_atomic(); atomic_add(diff, &conn->sndbuf_space); smp_mb__after_atomic(); smc_curs_add(conn->peer_rmbe_size, &conn->local_tx_ctrl.prod, diff); smc_curs_add(conn->peer_rmbe_size, &conn->local_tx_ctrl_fin, diff); } /* recalculate, value is used by tx_rdma_writes() */ atomic_set(&smc->conn.peer_rmbe_space, smc_write_space(conn)); if (smc->sk.sk_state != SMC_INIT && smc->sk.sk_state != SMC_CLOSED) { rc = smcr_cdc_msg_send_validation(conn, pend, wr_buf); if (!rc) { queue_delayed_work(conn->lgr->tx_wq, &conn->tx_work, 0); smc->sk.sk_data_ready(&smc->sk); } } else { smc_wr_tx_put_slot(conn->lnk, (struct smc_wr_tx_pend_priv *)pend); } return rc; } void smc_switch_link_and_count(struct smc_connection *conn, struct smc_link *to_lnk) { atomic_dec(&conn->lnk->conn_cnt); /* link_hold in smc_conn_create() */ smcr_link_put(conn->lnk); conn->lnk = to_lnk; atomic_inc(&conn->lnk->conn_cnt); /* link_put in smc_conn_free() */ smcr_link_hold(conn->lnk); } struct smc_link *smc_switch_conns(struct smc_link_group *lgr, struct smc_link *from_lnk, bool is_dev_err) { struct smc_link *to_lnk = NULL; struct smc_cdc_tx_pend *pend; struct smc_connection *conn; struct smc_wr_buf *wr_buf; struct smc_sock *smc; struct rb_node *node; int i, rc = 0; /* link is inactive, wake up tx waiters */ smc_wr_wakeup_tx_wait(from_lnk); for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) { if (!smc_link_active(&lgr->lnk[i]) || i == from_lnk->link_idx) continue; if (is_dev_err && from_lnk->smcibdev == lgr->lnk[i].smcibdev && from_lnk->ibport == lgr->lnk[i].ibport) { continue; } to_lnk = &lgr->lnk[i]; break; } if (!to_lnk || !smc_wr_tx_link_hold(to_lnk)) { smc_lgr_terminate_sched(lgr); return NULL; } again: read_lock_bh(&lgr->conns_lock); for (node = rb_first(&lgr->conns_all); node; node = rb_next(node)) { conn = rb_entry(node, struct smc_connection, alert_node); if (conn->lnk != from_lnk) continue; smc = container_of(conn, struct smc_sock, conn); /* conn->lnk not yet set in SMC_INIT state */ if (smc->sk.sk_state == SMC_INIT) continue; if (smc->sk.sk_state == SMC_CLOSED || smc->sk.sk_state == SMC_PEERCLOSEWAIT1 || smc->sk.sk_state == SMC_PEERCLOSEWAIT2 || smc->sk.sk_state == SMC_APPFINCLOSEWAIT || smc->sk.sk_state == SMC_APPCLOSEWAIT1 || smc->sk.sk_state == SMC_APPCLOSEWAIT2 || smc->sk.sk_state == SMC_PEERFINCLOSEWAIT || smc->sk.sk_state == SMC_PEERABORTWAIT || smc->sk.sk_state == SMC_PROCESSABORT) { spin_lock_bh(&conn->send_lock); smc_switch_link_and_count(conn, to_lnk); spin_unlock_bh(&conn->send_lock); continue; } sock_hold(&smc->sk); read_unlock_bh(&lgr->conns_lock); /* pre-fetch buffer outside of send_lock, might sleep */ rc = smc_cdc_get_free_slot(conn, to_lnk, &wr_buf, NULL, &pend); if (rc) goto err_out; /* avoid race with smcr_tx_sndbuf_nonempty() */ spin_lock_bh(&conn->send_lock); smc_switch_link_and_count(conn, to_lnk); rc = smc_switch_cursor(smc, pend, wr_buf); spin_unlock_bh(&conn->send_lock); sock_put(&smc->sk); if (rc) goto err_out; goto again; } read_unlock_bh(&lgr->conns_lock); smc_wr_tx_link_put(to_lnk); return to_lnk; err_out: smcr_link_down_cond_sched(to_lnk); smc_wr_tx_link_put(to_lnk); return NULL; } static void smcr_buf_unuse(struct smc_buf_desc *buf_desc, bool is_rmb, struct smc_link_group *lgr) { struct rw_semaphore *lock; /* lock buffer list */ int rc; if (is_rmb && buf_desc->is_conf_rkey && !list_empty(&lgr->list)) { /* unregister rmb with peer */ rc = smc_llc_flow_initiate(lgr, SMC_LLC_FLOW_RKEY); if (!rc) { /* protect against smc_llc_cli_rkey_exchange() */ down_read(&lgr->llc_conf_mutex); smc_llc_do_delete_rkey(lgr, buf_desc); buf_desc->is_conf_rkey = false; up_read(&lgr->llc_conf_mutex); smc_llc_flow_stop(lgr, &lgr->llc_flow_lcl); } } if (buf_desc->is_reg_err) { /* buf registration failed, reuse not possible */ lock = is_rmb ? &lgr->rmbs_lock : &lgr->sndbufs_lock; down_write(lock); list_del(&buf_desc->list); up_write(lock); smc_buf_free(lgr, is_rmb, buf_desc); } else { /* memzero_explicit provides potential memory barrier semantics */ memzero_explicit(buf_desc->cpu_addr, buf_desc->len); WRITE_ONCE(buf_desc->used, 0); } } static void smc_buf_unuse(struct smc_connection *conn, struct smc_link_group *lgr) { if (conn->sndbuf_desc) { if (!lgr->is_smcd && conn->sndbuf_desc->is_vm) { smcr_buf_unuse(conn->sndbuf_desc, false, lgr); } else { memzero_explicit(conn->sndbuf_desc->cpu_addr, conn->sndbuf_desc->len); WRITE_ONCE(conn->sndbuf_desc->used, 0); } } if (conn->rmb_desc) { if (!lgr->is_smcd) { smcr_buf_unuse(conn->rmb_desc, true, lgr); } else { memzero_explicit(conn->rmb_desc->cpu_addr, conn->rmb_desc->len + sizeof(struct smcd_cdc_msg)); WRITE_ONCE(conn->rmb_desc->used, 0); } } } /* remove a finished connection from its link group */ void smc_conn_free(struct smc_connection *conn) { struct smc_link_group *lgr = conn->lgr; if (!lgr || conn->freed) /* Connection has never been registered in a * link group, or has already been freed. */ return; conn->freed = 1; if (!smc_conn_lgr_valid(conn)) /* Connection has already unregistered from * link group. */ goto lgr_put; if (lgr->is_smcd) { if (!list_empty(&lgr->list)) smc_ism_unset_conn(conn); tasklet_kill(&conn->rx_tsklet); } else { smc_cdc_wait_pend_tx_wr(conn); if (current_work() != &conn->abort_work) cancel_work_sync(&conn->abort_work); } if (!list_empty(&lgr->list)) { smc_buf_unuse(conn, lgr); /* allow buffer reuse */ smc_lgr_unregister_conn(conn); } if (!lgr->conns_num) smc_lgr_schedule_free_work(lgr); lgr_put: if (!lgr->is_smcd) smcr_link_put(conn->lnk); /* link_hold in smc_conn_create() */ smc_lgr_put(lgr); /* lgr_hold in smc_conn_create() */ } /* unregister a link from a buf_desc */ static void smcr_buf_unmap_link(struct smc_buf_desc *buf_desc, bool is_rmb, struct smc_link *lnk) { if (is_rmb || buf_desc->is_vm) buf_desc->is_reg_mr[lnk->link_idx] = false; if (!buf_desc->is_map_ib[lnk->link_idx]) return; if ((is_rmb || buf_desc->is_vm) && buf_desc->mr[lnk->link_idx]) { smc_ib_put_memory_region(buf_desc->mr[lnk->link_idx]); buf_desc->mr[lnk->link_idx] = NULL; } if (is_rmb) smc_ib_buf_unmap_sg(lnk, buf_desc, DMA_FROM_DEVICE); else smc_ib_buf_unmap_sg(lnk, buf_desc, DMA_TO_DEVICE); sg_free_table(&buf_desc->sgt[lnk->link_idx]); buf_desc->is_map_ib[lnk->link_idx] = false; } /* unmap all buffers of lgr for a deleted link */ static void smcr_buf_unmap_lgr(struct smc_link *lnk) { struct smc_link_group *lgr = lnk->lgr; struct smc_buf_desc *buf_desc, *bf; int i; for (i = 0; i < SMC_RMBE_SIZES; i++) { down_write(&lgr->rmbs_lock); list_for_each_entry_safe(buf_desc, bf, &lgr->rmbs[i], list) smcr_buf_unmap_link(buf_desc, true, lnk); up_write(&lgr->rmbs_lock); down_write(&lgr->sndbufs_lock); list_for_each_entry_safe(buf_desc, bf, &lgr->sndbufs[i], list) smcr_buf_unmap_link(buf_desc, false, lnk); up_write(&lgr->sndbufs_lock); } } static void smcr_rtoken_clear_link(struct smc_link *lnk) { struct smc_link_group *lgr = lnk->lgr; int i; for (i = 0; i < SMC_RMBS_PER_LGR_MAX; i++) { lgr->rtokens[i][lnk->link_idx].rkey = 0; lgr->rtokens[i][lnk->link_idx].dma_addr = 0; } } static void __smcr_link_clear(struct smc_link *lnk) { struct smc_link_group *lgr = lnk->lgr; struct smc_ib_device *smcibdev; smc_wr_free_link_mem(lnk); smc_ibdev_cnt_dec(lnk); put_device(&lnk->smcibdev->ibdev->dev); smcibdev = lnk->smcibdev; memset(lnk, 0, sizeof(struct smc_link)); lnk->state = SMC_LNK_UNUSED; if (!atomic_dec_return(&smcibdev->lnk_cnt)) wake_up(&smcibdev->lnks_deleted); smc_lgr_put(lgr); /* lgr_hold in smcr_link_init() */ } /* must be called under lgr->llc_conf_mutex lock */ void smcr_link_clear(struct smc_link *lnk, bool log) { if (!lnk->lgr || lnk->clearing || lnk->state == SMC_LNK_UNUSED) return; lnk->clearing = 1; lnk->peer_qpn = 0; smc_llc_link_clear(lnk, log); smcr_buf_unmap_lgr(lnk); smcr_rtoken_clear_link(lnk); smc_ib_modify_qp_error(lnk); smc_wr_free_link(lnk); smc_ib_destroy_queue_pair(lnk); smc_ib_dealloc_protection_domain(lnk); smcr_link_put(lnk); /* theoretically last link_put */ } void smcr_link_hold(struct smc_link *lnk) { refcount_inc(&lnk->refcnt); } void smcr_link_put(struct smc_link *lnk) { if (refcount_dec_and_test(&lnk->refcnt)) __smcr_link_clear(lnk); } static void smcr_buf_free(struct smc_link_group *lgr, bool is_rmb, struct smc_buf_desc *buf_desc) { int i; for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) smcr_buf_unmap_link(buf_desc, is_rmb, &lgr->lnk[i]); if (!buf_desc->is_vm && buf_desc->pages) __free_pages(buf_desc->pages, buf_desc->order); else if (buf_desc->is_vm && buf_desc->cpu_addr) vfree(buf_desc->cpu_addr); kfree(buf_desc); } static void smcd_buf_free(struct smc_link_group *lgr, bool is_dmb, struct smc_buf_desc *buf_desc) { if (is_dmb) { /* restore original buf len */ buf_desc->len += sizeof(struct smcd_cdc_msg); smc_ism_unregister_dmb(lgr->smcd, buf_desc); } else { kfree(buf_desc->cpu_addr); } kfree(buf_desc); } static void smc_buf_free(struct smc_link_group *lgr, bool is_rmb, struct smc_buf_desc *buf_desc) { if (lgr->is_smcd) smcd_buf_free(lgr, is_rmb, buf_desc); else smcr_buf_free(lgr, is_rmb, buf_desc); } static void __smc_lgr_free_bufs(struct smc_link_group *lgr, bool is_rmb) { struct smc_buf_desc *buf_desc, *bf_desc; struct list_head *buf_list; int i; for (i = 0; i < SMC_RMBE_SIZES; i++) { if (is_rmb) buf_list = &lgr->rmbs[i]; else buf_list = &lgr->sndbufs[i]; list_for_each_entry_safe(buf_desc, bf_desc, buf_list, list) { list_del(&buf_desc->list); smc_buf_free(lgr, is_rmb, buf_desc); } } } static void smc_lgr_free_bufs(struct smc_link_group *lgr) { /* free send buffers */ __smc_lgr_free_bufs(lgr, false); /* free rmbs */ __smc_lgr_free_bufs(lgr, true); } /* won't be freed until no one accesses to lgr anymore */ static void __smc_lgr_free(struct smc_link_group *lgr) { smc_lgr_free_bufs(lgr); if (lgr->is_smcd) { if (!atomic_dec_return(&lgr->smcd->lgr_cnt)) wake_up(&lgr->smcd->lgrs_deleted); } else { smc_wr_free_lgr_mem(lgr); if (!atomic_dec_return(&lgr_cnt)) wake_up(&lgrs_deleted); } kfree(lgr); } /* remove a link group */ static void smc_lgr_free(struct smc_link_group *lgr) { int i; if (!lgr->is_smcd) { down_write(&lgr->llc_conf_mutex); for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) { if (lgr->lnk[i].state != SMC_LNK_UNUSED) smcr_link_clear(&lgr->lnk[i], false); } up_write(&lgr->llc_conf_mutex); smc_llc_lgr_clear(lgr); } destroy_workqueue(lgr->tx_wq); if (lgr->is_smcd) { smc_ism_put_vlan(lgr->smcd, lgr->vlan_id); put_device(lgr->smcd->ops->get_dev(lgr->smcd)); } smc_lgr_put(lgr); /* theoretically last lgr_put */ } void smc_lgr_hold(struct smc_link_group *lgr) { refcount_inc(&lgr->refcnt); } void smc_lgr_put(struct smc_link_group *lgr) { if (refcount_dec_and_test(&lgr->refcnt)) __smc_lgr_free(lgr); } static void smc_sk_wake_ups(struct smc_sock *smc) { smc->sk.sk_write_space(&smc->sk); smc->sk.sk_data_ready(&smc->sk); smc->sk.sk_state_change(&smc->sk); } /* kill a connection */ static void smc_conn_kill(struct smc_connection *conn, bool soft) { struct smc_sock *smc = container_of(conn, struct smc_sock, conn); if (conn->lgr->is_smcd && conn->lgr->peer_shutdown) conn->local_tx_ctrl.conn_state_flags.peer_conn_abort = 1; else smc_close_abort(conn); conn->killed = 1; smc->sk.sk_err = ECONNABORTED; smc_sk_wake_ups(smc); if (conn->lgr->is_smcd) { smc_ism_unset_conn(conn); if (soft) tasklet_kill(&conn->rx_tsklet); else tasklet_unlock_wait(&conn->rx_tsklet); } else { smc_cdc_wait_pend_tx_wr(conn); } smc_lgr_unregister_conn(conn); smc_close_active_abort(smc); } static void smc_lgr_cleanup(struct smc_link_group *lgr) { if (lgr->is_smcd) { smc_ism_signal_shutdown(lgr); } else { u32 rsn = lgr->llc_termination_rsn; if (!rsn) rsn = SMC_LLC_DEL_PROG_INIT_TERM; smc_llc_send_link_delete_all(lgr, false, rsn); smcr_lgr_link_deactivate_all(lgr); } } /* terminate link group * @soft: true if link group shutdown can take its time * false if immediate link group shutdown is required */ static void __smc_lgr_terminate(struct smc_link_group *lgr, bool soft) { struct smc_connection *conn; struct smc_sock *smc; struct rb_node *node; if (lgr->terminating) return; /* lgr already terminating */ /* cancel free_work sync, will terminate when lgr->freeing is set */ cancel_delayed_work(&lgr->free_work); lgr->terminating = 1; /* kill remaining link group connections */ read_lock_bh(&lgr->conns_lock); node = rb_first(&lgr->conns_all); while (node) { read_unlock_bh(&lgr->conns_lock); conn = rb_entry(node, struct smc_connection, alert_node); smc = container_of(conn, struct smc_sock, conn); sock_hold(&smc->sk); /* sock_put below */ lock_sock(&smc->sk); smc_conn_kill(conn, soft); release_sock(&smc->sk); sock_put(&smc->sk); /* sock_hold above */ read_lock_bh(&lgr->conns_lock); node = rb_first(&lgr->conns_all); } read_unlock_bh(&lgr->conns_lock); smc_lgr_cleanup(lgr); smc_lgr_free(lgr); } /* unlink link group and schedule termination */ void smc_lgr_terminate_sched(struct smc_link_group *lgr) { spinlock_t *lgr_lock; smc_lgr_list_head(lgr, &lgr_lock); spin_lock_bh(lgr_lock); if (list_empty(&lgr->list) || lgr->terminating || lgr->freeing) { spin_unlock_bh(lgr_lock); return; /* lgr already terminating */ } list_del_init(&lgr->list); lgr->freeing = 1; spin_unlock_bh(lgr_lock); schedule_work(&lgr->terminate_work); } /* Called when peer lgr shutdown (regularly or abnormally) is received */ void smc_smcd_terminate(struct smcd_dev *dev, u64 peer_gid, unsigned short vlan) { struct smc_link_group *lgr, *l; LIST_HEAD(lgr_free_list); /* run common cleanup function and build free list */ spin_lock_bh(&dev->lgr_lock); list_for_each_entry_safe(lgr, l, &dev->lgr_list, list) { if ((!peer_gid || lgr->peer_gid == peer_gid) && (vlan == VLAN_VID_MASK || lgr->vlan_id == vlan)) { if (peer_gid) /* peer triggered termination */ lgr->peer_shutdown = 1; list_move(&lgr->list, &lgr_free_list); lgr->freeing = 1; } } spin_unlock_bh(&dev->lgr_lock); /* cancel the regular free workers and actually free lgrs */ list_for_each_entry_safe(lgr, l, &lgr_free_list, list) { list_del_init(&lgr->list); schedule_work(&lgr->terminate_work); } } /* Called when an SMCD device is removed or the smc module is unloaded */ void smc_smcd_terminate_all(struct smcd_dev *smcd) { struct smc_link_group *lgr, *lg; LIST_HEAD(lgr_free_list); spin_lock_bh(&smcd->lgr_lock); list_splice_init(&smcd->lgr_list, &lgr_free_list); list_for_each_entry(lgr, &lgr_free_list, list) lgr->freeing = 1; spin_unlock_bh(&smcd->lgr_lock); list_for_each_entry_safe(lgr, lg, &lgr_free_list, list) { list_del_init(&lgr->list); __smc_lgr_terminate(lgr, false); } if (atomic_read(&smcd->lgr_cnt)) wait_event(smcd->lgrs_deleted, !atomic_read(&smcd->lgr_cnt)); } /* Called when an SMCR device is removed or the smc module is unloaded. * If smcibdev is given, all SMCR link groups using this device are terminated. * If smcibdev is NULL, all SMCR link groups are terminated. */ void smc_smcr_terminate_all(struct smc_ib_device *smcibdev) { struct smc_link_group *lgr, *lg; LIST_HEAD(lgr_free_list); int i; spin_lock_bh(&smc_lgr_list.lock); if (!smcibdev) { list_splice_init(&smc_lgr_list.list, &lgr_free_list); list_for_each_entry(lgr, &lgr_free_list, list) lgr->freeing = 1; } else { list_for_each_entry_safe(lgr, lg, &smc_lgr_list.list, list) { for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) { if (lgr->lnk[i].smcibdev == smcibdev) smcr_link_down_cond_sched(&lgr->lnk[i]); } } } spin_unlock_bh(&smc_lgr_list.lock); list_for_each_entry_safe(lgr, lg, &lgr_free_list, list) { list_del_init(&lgr->list); smc_llc_set_termination_rsn(lgr, SMC_LLC_DEL_OP_INIT_TERM); __smc_lgr_terminate(lgr, false); } if (smcibdev) { if (atomic_read(&smcibdev->lnk_cnt)) wait_event(smcibdev->lnks_deleted, !atomic_read(&smcibdev->lnk_cnt)); } else { if (atomic_read(&lgr_cnt)) wait_event(lgrs_deleted, !atomic_read(&lgr_cnt)); } } /* set new lgr type and clear all asymmetric link tagging */ void smcr_lgr_set_type(struct smc_link_group *lgr, enum smc_lgr_type new_type) { char *lgr_type = ""; int i; for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) if (smc_link_usable(&lgr->lnk[i])) lgr->lnk[i].link_is_asym = false; if (lgr->type == new_type) return; lgr->type = new_type; switch (lgr->type) { case SMC_LGR_NONE: lgr_type = "NONE"; break; case SMC_LGR_SINGLE: lgr_type = "SINGLE"; break; case SMC_LGR_SYMMETRIC: lgr_type = "SYMMETRIC"; break; case SMC_LGR_ASYMMETRIC_PEER: lgr_type = "ASYMMETRIC_PEER"; break; case SMC_LGR_ASYMMETRIC_LOCAL: lgr_type = "ASYMMETRIC_LOCAL"; break; } pr_warn_ratelimited("smc: SMC-R lg %*phN net %llu state changed: " "%s, pnetid %.16s\n", SMC_LGR_ID_SIZE, &lgr->id, lgr->net->net_cookie, lgr_type, lgr->pnet_id); } /* set new lgr type and tag a link as asymmetric */ void smcr_lgr_set_type_asym(struct smc_link_group *lgr, enum smc_lgr_type new_type, int asym_lnk_idx) { smcr_lgr_set_type(lgr, new_type); lgr->lnk[asym_lnk_idx].link_is_asym = true; } /* abort connection, abort_work scheduled from tasklet context */ static void smc_conn_abort_work(struct work_struct *work) { struct smc_connection *conn = container_of(work, struct smc_connection, abort_work); struct smc_sock *smc = container_of(conn, struct smc_sock, conn); lock_sock(&smc->sk); smc_conn_kill(conn, true); release_sock(&smc->sk); sock_put(&smc->sk); /* sock_hold done by schedulers of abort_work */ } void smcr_port_add(struct smc_ib_device *smcibdev, u8 ibport) { struct smc_link_group *lgr, *n; spin_lock_bh(&smc_lgr_list.lock); list_for_each_entry_safe(lgr, n, &smc_lgr_list.list, list) { struct smc_link *link; if (strncmp(smcibdev->pnetid[ibport - 1], lgr->pnet_id, SMC_MAX_PNETID_LEN) || lgr->type == SMC_LGR_SYMMETRIC || lgr->type == SMC_LGR_ASYMMETRIC_PEER || !rdma_dev_access_netns(smcibdev->ibdev, lgr->net)) continue; if (lgr->type == SMC_LGR_SINGLE && lgr->max_links <= 1) continue; /* trigger local add link processing */ link = smc_llc_usable_link(lgr); if (link) smc_llc_add_link_local(link); } spin_unlock_bh(&smc_lgr_list.lock); } /* link is down - switch connections to alternate link, * must be called under lgr->llc_conf_mutex lock */ static void smcr_link_down(struct smc_link *lnk) { struct smc_link_group *lgr = lnk->lgr; struct smc_link *to_lnk; int del_link_id; if (!lgr || lnk->state == SMC_LNK_UNUSED || list_empty(&lgr->list)) return; to_lnk = smc_switch_conns(lgr, lnk, true); if (!to_lnk) { /* no backup link available */ smcr_link_clear(lnk, true); return; } smcr_lgr_set_type(lgr, SMC_LGR_SINGLE); del_link_id = lnk->link_id; if (lgr->role == SMC_SERV) { /* trigger local delete link processing */ smc_llc_srv_delete_link_local(to_lnk, del_link_id); } else { if (lgr->llc_flow_lcl.type != SMC_LLC_FLOW_NONE) { /* another llc task is ongoing */ up_write(&lgr->llc_conf_mutex); wait_event_timeout(lgr->llc_flow_waiter, (list_empty(&lgr->list) || lgr->llc_flow_lcl.type == SMC_LLC_FLOW_NONE), SMC_LLC_WAIT_TIME); down_write(&lgr->llc_conf_mutex); } if (!list_empty(&lgr->list)) { smc_llc_send_delete_link(to_lnk, del_link_id, SMC_LLC_REQ, true, SMC_LLC_DEL_LOST_PATH); smcr_link_clear(lnk, true); } wake_up(&lgr->llc_flow_waiter); /* wake up next waiter */ } } /* must be called under lgr->llc_conf_mutex lock */ void smcr_link_down_cond(struct smc_link *lnk) { if (smc_link_downing(&lnk->state)) { trace_smcr_link_down(lnk, __builtin_return_address(0)); smcr_link_down(lnk); } } /* will get the lgr->llc_conf_mutex lock */ void smcr_link_down_cond_sched(struct smc_link *lnk) { if (smc_link_downing(&lnk->state)) { trace_smcr_link_down(lnk, __builtin_return_address(0)); schedule_work(&lnk->link_down_wrk); } } void smcr_port_err(struct smc_ib_device *smcibdev, u8 ibport) { struct smc_link_group *lgr, *n; int i; list_for_each_entry_safe(lgr, n, &smc_lgr_list.list, list) { if (strncmp(smcibdev->pnetid[ibport - 1], lgr->pnet_id, SMC_MAX_PNETID_LEN)) continue; /* lgr is not affected */ if (list_empty(&lgr->list)) continue; for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) { struct smc_link *lnk = &lgr->lnk[i]; if (smc_link_usable(lnk) && lnk->smcibdev == smcibdev && lnk->ibport == ibport) smcr_link_down_cond_sched(lnk); } } } static void smc_link_down_work(struct work_struct *work) { struct smc_link *link = container_of(work, struct smc_link, link_down_wrk); struct smc_link_group *lgr = link->lgr; if (list_empty(&lgr->list)) return; wake_up_all(&lgr->llc_msg_waiter); down_write(&lgr->llc_conf_mutex); smcr_link_down(link); up_write(&lgr->llc_conf_mutex); } static int smc_vlan_by_tcpsk_walk(struct net_device *lower_dev, struct netdev_nested_priv *priv) { unsigned short *vlan_id = (unsigned short *)priv->data; if (is_vlan_dev(lower_dev)) { *vlan_id = vlan_dev_vlan_id(lower_dev); return 1; } return 0; } /* Determine vlan of internal TCP socket. */ int smc_vlan_by_tcpsk(struct socket *clcsock, struct smc_init_info *ini) { struct dst_entry *dst = sk_dst_get(clcsock->sk); struct netdev_nested_priv priv; struct net_device *ndev; int rc = 0; ini->vlan_id = 0; if (!dst) { rc = -ENOTCONN; goto out; } if (!dst->dev) { rc = -ENODEV; goto out_rel; } ndev = dst->dev; if (is_vlan_dev(ndev)) { ini->vlan_id = vlan_dev_vlan_id(ndev); goto out_rel; } priv.data = (void *)&ini->vlan_id; rtnl_lock(); netdev_walk_all_lower_dev(ndev, smc_vlan_by_tcpsk_walk, &priv); rtnl_unlock(); out_rel: dst_release(dst); out: return rc; } static bool smcr_lgr_match(struct smc_link_group *lgr, u8 smcr_version, u8 peer_systemid[], u8 peer_gid[], u8 peer_mac_v1[], enum smc_lgr_role role, u32 clcqpn, struct net *net) { struct smc_link *lnk; int i; if (memcmp(lgr->peer_systemid, peer_systemid, SMC_SYSTEMID_LEN) || lgr->role != role) return false; for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) { lnk = &lgr->lnk[i]; if (!smc_link_active(lnk)) continue; /* use verbs API to check netns, instead of lgr->net */ if (!rdma_dev_access_netns(lnk->smcibdev->ibdev, net)) return false; if ((lgr->role == SMC_SERV || lnk->peer_qpn == clcqpn) && !memcmp(lnk->peer_gid, peer_gid, SMC_GID_SIZE) && (smcr_version == SMC_V2 || !memcmp(lnk->peer_mac, peer_mac_v1, ETH_ALEN))) return true; } return false; } static bool smcd_lgr_match(struct smc_link_group *lgr, struct smcd_dev *smcismdev, u64 peer_gid) { return lgr->peer_gid == peer_gid && lgr->smcd == smcismdev; } /* create a new SMC connection (and a new link group if necessary) */ int smc_conn_create(struct smc_sock *smc, struct smc_init_info *ini) { struct smc_connection *conn = &smc->conn; struct net *net = sock_net(&smc->sk); struct list_head *lgr_list; struct smc_link_group *lgr; enum smc_lgr_role role; spinlock_t *lgr_lock; int rc = 0; lgr_list = ini->is_smcd ? &ini->ism_dev[ini->ism_selected]->lgr_list : &smc_lgr_list.list; lgr_lock = ini->is_smcd ? &ini->ism_dev[ini->ism_selected]->lgr_lock : &smc_lgr_list.lock; ini->first_contact_local = 1; role = smc->listen_smc ? SMC_SERV : SMC_CLNT; if (role == SMC_CLNT && ini->first_contact_peer) /* create new link group as well */ goto create; /* determine if an existing link group can be reused */ spin_lock_bh(lgr_lock); list_for_each_entry(lgr, lgr_list, list) { write_lock_bh(&lgr->conns_lock); if ((ini->is_smcd ? smcd_lgr_match(lgr, ini->ism_dev[ini->ism_selected], ini->ism_peer_gid[ini->ism_selected]) : smcr_lgr_match(lgr, ini->smcr_version, ini->peer_systemid, ini->peer_gid, ini->peer_mac, role, ini->ib_clcqpn, net)) && !lgr->sync_err && (ini->smcd_version == SMC_V2 || lgr->vlan_id == ini->vlan_id) && (role == SMC_CLNT || ini->is_smcd || (lgr->conns_num < lgr->max_conns && !bitmap_full(lgr->rtokens_used_mask, SMC_RMBS_PER_LGR_MAX)))) { /* link group found */ ini->first_contact_local = 0; conn->lgr = lgr; rc = smc_lgr_register_conn(conn, false); write_unlock_bh(&lgr->conns_lock); if (!rc && delayed_work_pending(&lgr->free_work)) cancel_delayed_work(&lgr->free_work); break; } write_unlock_bh(&lgr->conns_lock); } spin_unlock_bh(lgr_lock); if (rc) return rc; if (role == SMC_CLNT && !ini->first_contact_peer && ini->first_contact_local) { /* Server reuses a link group, but Client wants to start * a new one * send out_of_sync decline, reason synchr. error */ return SMC_CLC_DECL_SYNCERR; } create: if (ini->first_contact_local) { rc = smc_lgr_create(smc, ini); if (rc) goto out; lgr = conn->lgr; write_lock_bh(&lgr->conns_lock); rc = smc_lgr_register_conn(conn, true); write_unlock_bh(&lgr->conns_lock); if (rc) { smc_lgr_cleanup_early(lgr); goto out; } } smc_lgr_hold(conn->lgr); /* lgr_put in smc_conn_free() */ if (!conn->lgr->is_smcd) smcr_link_hold(conn->lnk); /* link_put in smc_conn_free() */ conn->freed = 0; conn->local_tx_ctrl.common.type = SMC_CDC_MSG_TYPE; conn->local_tx_ctrl.len = SMC_WR_TX_SIZE; conn->urg_state = SMC_URG_READ; init_waitqueue_head(&conn->cdc_pend_tx_wq); INIT_WORK(&smc->conn.abort_work, smc_conn_abort_work); if (ini->is_smcd) { conn->rx_off = sizeof(struct smcd_cdc_msg); smcd_cdc_rx_init(conn); /* init tasklet for this conn */ } else { conn->rx_off = 0; } #ifndef KERNEL_HAS_ATOMIC64 spin_lock_init(&conn->acurs_lock); #endif out: return rc; } #define SMCD_DMBE_SIZES 6 /* 0 -> 16KB, 1 -> 32KB, .. 6 -> 1MB */ #define SMCR_RMBE_SIZES 5 /* 0 -> 16KB, 1 -> 32KB, .. 5 -> 512KB */ /* convert the RMB size into the compressed notation (minimum 16K, see * SMCD/R_DMBE_SIZES. * In contrast to plain ilog2, this rounds towards the next power of 2, * so the socket application gets at least its desired sndbuf / rcvbuf size. */ static u8 smc_compress_bufsize(int size, bool is_smcd, bool is_rmb) { const unsigned int max_scat = SG_MAX_SINGLE_ALLOC * PAGE_SIZE; u8 compressed; if (size <= SMC_BUF_MIN_SIZE) return 0; size = (size - 1) >> 14; /* convert to 16K multiple */ compressed = min_t(u8, ilog2(size) + 1, is_smcd ? SMCD_DMBE_SIZES : SMCR_RMBE_SIZES); if (!is_smcd && is_rmb) /* RMBs are backed by & limited to max size of scatterlists */ compressed = min_t(u8, compressed, ilog2(max_scat >> 14)); return compressed; } /* convert the RMB size from compressed notation into integer */ int smc_uncompress_bufsize(u8 compressed) { u32 size; size = 0x00000001 << (((int)compressed) + 14); return (int)size; } /* try to reuse a sndbuf or rmb description slot for a certain * buffer size; if not available, return NULL */ static struct smc_buf_desc *smc_buf_get_slot(int compressed_bufsize, struct rw_semaphore *lock, struct list_head *buf_list) { struct smc_buf_desc *buf_slot; down_read(lock); list_for_each_entry(buf_slot, buf_list, list) { if (cmpxchg(&buf_slot->used, 0, 1) == 0) { up_read(lock); return buf_slot; } } up_read(lock); return NULL; } /* one of the conditions for announcing a receiver's current window size is * that it "results in a minimum increase in the window size of 10% of the * receive buffer space" [RFC7609] */ static inline int smc_rmb_wnd_update_limit(int rmbe_size) { return max_t(int, rmbe_size / 10, SOCK_MIN_SNDBUF / 2); } /* map an buf to a link */ static int smcr_buf_map_link(struct smc_buf_desc *buf_desc, bool is_rmb, struct smc_link *lnk) { int rc, i, nents, offset, buf_size, size, access_flags; struct scatterlist *sg; void *buf; if (buf_desc->is_map_ib[lnk->link_idx]) return 0; if (buf_desc->is_vm) { buf = buf_desc->cpu_addr; buf_size = buf_desc->len; offset = offset_in_page(buf_desc->cpu_addr); nents = PAGE_ALIGN(buf_size + offset) / PAGE_SIZE; } else { nents = 1; } rc = sg_alloc_table(&buf_desc->sgt[lnk->link_idx], nents, GFP_KERNEL); if (rc) return rc; if (buf_desc->is_vm) { /* virtually contiguous buffer */ for_each_sg(buf_desc->sgt[lnk->link_idx].sgl, sg, nents, i) { size = min_t(int, PAGE_SIZE - offset, buf_size); sg_set_page(sg, vmalloc_to_page(buf), size, offset); buf += size / sizeof(*buf); buf_size -= size; offset = 0; } } else { /* physically contiguous buffer */ sg_set_buf(buf_desc->sgt[lnk->link_idx].sgl, buf_desc->cpu_addr, buf_desc->len); } /* map sg table to DMA address */ rc = smc_ib_buf_map_sg(lnk, buf_desc, is_rmb ? DMA_FROM_DEVICE : DMA_TO_DEVICE); /* SMC protocol depends on mapping to one DMA address only */ if (rc != nents) { rc = -EAGAIN; goto free_table; } buf_desc->is_dma_need_sync |= smc_ib_is_sg_need_sync(lnk, buf_desc) << lnk->link_idx; if (is_rmb || buf_desc->is_vm) { /* create a new memory region for the RMB or vzalloced sndbuf */ access_flags = is_rmb ? IB_ACCESS_REMOTE_WRITE | IB_ACCESS_LOCAL_WRITE : IB_ACCESS_LOCAL_WRITE; rc = smc_ib_get_memory_region(lnk->roce_pd, access_flags, buf_desc, lnk->link_idx); if (rc) goto buf_unmap; smc_ib_sync_sg_for_device(lnk, buf_desc, is_rmb ? DMA_FROM_DEVICE : DMA_TO_DEVICE); } buf_desc->is_map_ib[lnk->link_idx] = true; return 0; buf_unmap: smc_ib_buf_unmap_sg(lnk, buf_desc, is_rmb ? DMA_FROM_DEVICE : DMA_TO_DEVICE); free_table: sg_free_table(&buf_desc->sgt[lnk->link_idx]); return rc; } /* register a new buf on IB device, rmb or vzalloced sndbuf * must be called under lgr->llc_conf_mutex lock */ int smcr_link_reg_buf(struct smc_link *link, struct smc_buf_desc *buf_desc) { if (list_empty(&link->lgr->list)) return -ENOLINK; if (!buf_desc->is_reg_mr[link->link_idx]) { /* register memory region for new buf */ if (buf_desc->is_vm) buf_desc->mr[link->link_idx]->iova = (uintptr_t)buf_desc->cpu_addr; if (smc_wr_reg_send(link, buf_desc->mr[link->link_idx])) { buf_desc->is_reg_err = true; return -EFAULT; } buf_desc->is_reg_mr[link->link_idx] = true; } return 0; } static int _smcr_buf_map_lgr(struct smc_link *lnk, struct rw_semaphore *lock, struct list_head *lst, bool is_rmb) { struct smc_buf_desc *buf_desc, *bf; int rc = 0; down_write(lock); list_for_each_entry_safe(buf_desc, bf, lst, list) { if (!buf_desc->used) continue; rc = smcr_buf_map_link(buf_desc, is_rmb, lnk); if (rc) goto out; } out: up_write(lock); return rc; } /* map all used buffers of lgr for a new link */ int smcr_buf_map_lgr(struct smc_link *lnk) { struct smc_link_group *lgr = lnk->lgr; int i, rc = 0; for (i = 0; i < SMC_RMBE_SIZES; i++) { rc = _smcr_buf_map_lgr(lnk, &lgr->rmbs_lock, &lgr->rmbs[i], true); if (rc) return rc; rc = _smcr_buf_map_lgr(lnk, &lgr->sndbufs_lock, &lgr->sndbufs[i], false); if (rc) return rc; } return 0; } /* register all used buffers of lgr for a new link, * must be called under lgr->llc_conf_mutex lock */ int smcr_buf_reg_lgr(struct smc_link *lnk) { struct smc_link_group *lgr = lnk->lgr; struct smc_buf_desc *buf_desc, *bf; int i, rc = 0; /* reg all RMBs for a new link */ down_write(&lgr->rmbs_lock); for (i = 0; i < SMC_RMBE_SIZES; i++) { list_for_each_entry_safe(buf_desc, bf, &lgr->rmbs[i], list) { if (!buf_desc->used) continue; rc = smcr_link_reg_buf(lnk, buf_desc); if (rc) { up_write(&lgr->rmbs_lock); return rc; } } } up_write(&lgr->rmbs_lock); if (lgr->buf_type == SMCR_PHYS_CONT_BUFS) return rc; /* reg all vzalloced sndbufs for a new link */ down_write(&lgr->sndbufs_lock); for (i = 0; i < SMC_RMBE_SIZES; i++) { list_for_each_entry_safe(buf_desc, bf, &lgr->sndbufs[i], list) { if (!buf_desc->used || !buf_desc->is_vm) continue; rc = smcr_link_reg_buf(lnk, buf_desc); if (rc) { up_write(&lgr->sndbufs_lock); return rc; } } } up_write(&lgr->sndbufs_lock); return rc; } static struct smc_buf_desc *smcr_new_buf_create(struct smc_link_group *lgr, bool is_rmb, int bufsize) { struct smc_buf_desc *buf_desc; /* try to alloc a new buffer */ buf_desc = kzalloc(sizeof(*buf_desc), GFP_KERNEL); if (!buf_desc) return ERR_PTR(-ENOMEM); switch (lgr->buf_type) { case SMCR_PHYS_CONT_BUFS: case SMCR_MIXED_BUFS: buf_desc->order = get_order(bufsize); buf_desc->pages = alloc_pages(GFP_KERNEL | __GFP_NOWARN | __GFP_NOMEMALLOC | __GFP_COMP | __GFP_NORETRY | __GFP_ZERO, buf_desc->order); if (buf_desc->pages) { buf_desc->cpu_addr = (void *)page_address(buf_desc->pages); buf_desc->len = bufsize; buf_desc->is_vm = false; break; } if (lgr->buf_type == SMCR_PHYS_CONT_BUFS) goto out; fallthrough; // try virtually continguous buf case SMCR_VIRT_CONT_BUFS: buf_desc->order = get_order(bufsize); buf_desc->cpu_addr = vzalloc(PAGE_SIZE << buf_desc->order); if (!buf_desc->cpu_addr) goto out; buf_desc->pages = NULL; buf_desc->len = bufsize; buf_desc->is_vm = true; break; } return buf_desc; out: kfree(buf_desc); return ERR_PTR(-EAGAIN); } /* map buf_desc on all usable links, * unused buffers stay mapped as long as the link is up */ static int smcr_buf_map_usable_links(struct smc_link_group *lgr, struct smc_buf_desc *buf_desc, bool is_rmb) { int i, rc = 0, cnt = 0; /* protect against parallel link reconfiguration */ down_read(&lgr->llc_conf_mutex); for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) { struct smc_link *lnk = &lgr->lnk[i]; if (!smc_link_usable(lnk)) continue; if (smcr_buf_map_link(buf_desc, is_rmb, lnk)) { rc = -ENOMEM; goto out; } cnt++; } out: up_read(&lgr->llc_conf_mutex); if (!rc && !cnt) rc = -EINVAL; return rc; } static struct smc_buf_desc *smcd_new_buf_create(struct smc_link_group *lgr, bool is_dmb, int bufsize) { struct smc_buf_desc *buf_desc; int rc; /* try to alloc a new DMB */ buf_desc = kzalloc(sizeof(*buf_desc), GFP_KERNEL); if (!buf_desc) return ERR_PTR(-ENOMEM); if (is_dmb) { rc = smc_ism_register_dmb(lgr, bufsize, buf_desc); if (rc) { kfree(buf_desc); if (rc == -ENOMEM) return ERR_PTR(-EAGAIN); if (rc == -ENOSPC) return ERR_PTR(-ENOSPC); return ERR_PTR(-EIO); } buf_desc->pages = virt_to_page(buf_desc->cpu_addr); /* CDC header stored in buf. So, pretend it was smaller */ buf_desc->len = bufsize - sizeof(struct smcd_cdc_msg); } else { buf_desc->cpu_addr = kzalloc(bufsize, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY | __GFP_NOMEMALLOC); if (!buf_desc->cpu_addr) { kfree(buf_desc); return ERR_PTR(-EAGAIN); } buf_desc->len = bufsize; } return buf_desc; } static int __smc_buf_create(struct smc_sock *smc, bool is_smcd, bool is_rmb) { struct smc_buf_desc *buf_desc = ERR_PTR(-ENOMEM); struct smc_connection *conn = &smc->conn; struct smc_link_group *lgr = conn->lgr; struct list_head *buf_list; int bufsize, bufsize_comp; struct rw_semaphore *lock; /* lock buffer list */ bool is_dgraded = false; if (is_rmb) /* use socket recv buffer size (w/o overhead) as start value */ bufsize = smc->sk.sk_rcvbuf / 2; else /* use socket send buffer size (w/o overhead) as start value */ bufsize = smc->sk.sk_sndbuf / 2; for (bufsize_comp = smc_compress_bufsize(bufsize, is_smcd, is_rmb); bufsize_comp >= 0; bufsize_comp--) { if (is_rmb) { lock = &lgr->rmbs_lock; buf_list = &lgr->rmbs[bufsize_comp]; } else { lock = &lgr->sndbufs_lock; buf_list = &lgr->sndbufs[bufsize_comp]; } bufsize = smc_uncompress_bufsize(bufsize_comp); /* check for reusable slot in the link group */ buf_desc = smc_buf_get_slot(bufsize_comp, lock, buf_list); if (buf_desc) { buf_desc->is_dma_need_sync = 0; SMC_STAT_RMB_SIZE(smc, is_smcd, is_rmb, bufsize); SMC_STAT_BUF_REUSE(smc, is_smcd, is_rmb); break; /* found reusable slot */ } if (is_smcd) buf_desc = smcd_new_buf_create(lgr, is_rmb, bufsize); else buf_desc = smcr_new_buf_create(lgr, is_rmb, bufsize); if (PTR_ERR(buf_desc) == -ENOMEM) break; if (IS_ERR(buf_desc)) { if (!is_dgraded) { is_dgraded = true; SMC_STAT_RMB_DOWNGRADED(smc, is_smcd, is_rmb); } continue; } SMC_STAT_RMB_ALLOC(smc, is_smcd, is_rmb); SMC_STAT_RMB_SIZE(smc, is_smcd, is_rmb, bufsize); buf_desc->used = 1; down_write(lock); list_add(&buf_desc->list, buf_list); up_write(lock); break; /* found */ } if (IS_ERR(buf_desc)) return PTR_ERR(buf_desc); if (!is_smcd) { if (smcr_buf_map_usable_links(lgr, buf_desc, is_rmb)) { smcr_buf_unuse(buf_desc, is_rmb, lgr); return -ENOMEM; } } if (is_rmb) { conn->rmb_desc = buf_desc; conn->rmbe_size_comp = bufsize_comp; smc->sk.sk_rcvbuf = bufsize * 2; atomic_set(&conn->bytes_to_rcv, 0); conn->rmbe_update_limit = smc_rmb_wnd_update_limit(buf_desc->len); if (is_smcd) smc_ism_set_conn(conn); /* map RMB/smcd_dev to conn */ } else { conn->sndbuf_desc = buf_desc; smc->sk.sk_sndbuf = bufsize * 2; atomic_set(&conn->sndbuf_space, bufsize); } return 0; } void smc_sndbuf_sync_sg_for_device(struct smc_connection *conn) { if (!conn->sndbuf_desc->is_dma_need_sync) return; if (!smc_conn_lgr_valid(conn) || conn->lgr->is_smcd || !smc_link_active(conn->lnk)) return; smc_ib_sync_sg_for_device(conn->lnk, conn->sndbuf_desc, DMA_TO_DEVICE); } void smc_rmb_sync_sg_for_cpu(struct smc_connection *conn) { int i; if (!conn->rmb_desc->is_dma_need_sync) return; if (!smc_conn_lgr_valid(conn) || conn->lgr->is_smcd) return; for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) { if (!smc_link_active(&conn->lgr->lnk[i])) continue; smc_ib_sync_sg_for_cpu(&conn->lgr->lnk[i], conn->rmb_desc, DMA_FROM_DEVICE); } } /* create the send and receive buffer for an SMC socket; * receive buffers are called RMBs; * (even though the SMC protocol allows more than one RMB-element per RMB, * the Linux implementation uses just one RMB-element per RMB, i.e. uses an * extra RMB for every connection in a link group */ int smc_buf_create(struct smc_sock *smc, bool is_smcd) { int rc; /* create send buffer */ rc = __smc_buf_create(smc, is_smcd, false); if (rc) return rc; /* create rmb */ rc = __smc_buf_create(smc, is_smcd, true); if (rc) { down_write(&smc->conn.lgr->sndbufs_lock); list_del(&smc->conn.sndbuf_desc->list); up_write(&smc->conn.lgr->sndbufs_lock); smc_buf_free(smc->conn.lgr, false, smc->conn.sndbuf_desc); smc->conn.sndbuf_desc = NULL; } return rc; } static inline int smc_rmb_reserve_rtoken_idx(struct smc_link_group *lgr) { int i; for_each_clear_bit(i, lgr->rtokens_used_mask, SMC_RMBS_PER_LGR_MAX) { if (!test_and_set_bit(i, lgr->rtokens_used_mask)) return i; } return -ENOSPC; } static int smc_rtoken_find_by_link(struct smc_link_group *lgr, int lnk_idx, u32 rkey) { int i; for (i = 0; i < SMC_RMBS_PER_LGR_MAX; i++) { if (test_bit(i, lgr->rtokens_used_mask) && lgr->rtokens[i][lnk_idx].rkey == rkey) return i; } return -ENOENT; } /* set rtoken for a new link to an existing rmb */ void smc_rtoken_set(struct smc_link_group *lgr, int link_idx, int link_idx_new, __be32 nw_rkey_known, __be64 nw_vaddr, __be32 nw_rkey) { int rtok_idx; rtok_idx = smc_rtoken_find_by_link(lgr, link_idx, ntohl(nw_rkey_known)); if (rtok_idx == -ENOENT) return; lgr->rtokens[rtok_idx][link_idx_new].rkey = ntohl(nw_rkey); lgr->rtokens[rtok_idx][link_idx_new].dma_addr = be64_to_cpu(nw_vaddr); } /* set rtoken for a new link whose link_id is given */ void smc_rtoken_set2(struct smc_link_group *lgr, int rtok_idx, int link_id, __be64 nw_vaddr, __be32 nw_rkey) { u64 dma_addr = be64_to_cpu(nw_vaddr); u32 rkey = ntohl(nw_rkey); bool found = false; int link_idx; for (link_idx = 0; link_idx < SMC_LINKS_PER_LGR_MAX; link_idx++) { if (lgr->lnk[link_idx].link_id == link_id) { found = true; break; } } if (!found) return; lgr->rtokens[rtok_idx][link_idx].rkey = rkey; lgr->rtokens[rtok_idx][link_idx].dma_addr = dma_addr; } /* add a new rtoken from peer */ int smc_rtoken_add(struct smc_link *lnk, __be64 nw_vaddr, __be32 nw_rkey) { struct smc_link_group *lgr = smc_get_lgr(lnk); u64 dma_addr = be64_to_cpu(nw_vaddr); u32 rkey = ntohl(nw_rkey); int i; for (i = 0; i < SMC_RMBS_PER_LGR_MAX; i++) { if (lgr->rtokens[i][lnk->link_idx].rkey == rkey && lgr->rtokens[i][lnk->link_idx].dma_addr == dma_addr && test_bit(i, lgr->rtokens_used_mask)) { /* already in list */ return i; } } i = smc_rmb_reserve_rtoken_idx(lgr); if (i < 0) return i; lgr->rtokens[i][lnk->link_idx].rkey = rkey; lgr->rtokens[i][lnk->link_idx].dma_addr = dma_addr; return i; } /* delete an rtoken from all links */ int smc_rtoken_delete(struct smc_link *lnk, __be32 nw_rkey) { struct smc_link_group *lgr = smc_get_lgr(lnk); u32 rkey = ntohl(nw_rkey); int i, j; for (i = 0; i < SMC_RMBS_PER_LGR_MAX; i++) { if (lgr->rtokens[i][lnk->link_idx].rkey == rkey && test_bit(i, lgr->rtokens_used_mask)) { for (j = 0; j < SMC_LINKS_PER_LGR_MAX; j++) { lgr->rtokens[i][j].rkey = 0; lgr->rtokens[i][j].dma_addr = 0; } clear_bit(i, lgr->rtokens_used_mask); return 0; } } return -ENOENT; } /* save rkey and dma_addr received from peer during clc handshake */ int smc_rmb_rtoken_handling(struct smc_connection *conn, struct smc_link *lnk, struct smc_clc_msg_accept_confirm *clc) { conn->rtoken_idx = smc_rtoken_add(lnk, clc->r0.rmb_dma_addr, clc->r0.rmb_rkey); if (conn->rtoken_idx < 0) return conn->rtoken_idx; return 0; } static void smc_core_going_away(void) { struct smc_ib_device *smcibdev; struct smcd_dev *smcd; mutex_lock(&smc_ib_devices.mutex); list_for_each_entry(smcibdev, &smc_ib_devices.list, list) { int i; for (i = 0; i < SMC_MAX_PORTS; i++) set_bit(i, smcibdev->ports_going_away); } mutex_unlock(&smc_ib_devices.mutex); mutex_lock(&smcd_dev_list.mutex); list_for_each_entry(smcd, &smcd_dev_list.list, list) { smcd->going_away = 1; } mutex_unlock(&smcd_dev_list.mutex); } /* Clean up all SMC link groups */ static void smc_lgrs_shutdown(void) { struct smcd_dev *smcd; smc_core_going_away(); smc_smcr_terminate_all(NULL); mutex_lock(&smcd_dev_list.mutex); list_for_each_entry(smcd, &smcd_dev_list.list, list) smc_smcd_terminate_all(smcd); mutex_unlock(&smcd_dev_list.mutex); } static int smc_core_reboot_event(struct notifier_block *this, unsigned long event, void *ptr) { smc_lgrs_shutdown(); smc_ib_unregister_client(); smc_ism_exit(); return 0; } static struct notifier_block smc_reboot_notifier = { .notifier_call = smc_core_reboot_event, }; int __init smc_core_init(void) { return register_reboot_notifier(&smc_reboot_notifier); } /* Called (from smc_exit) when module is removed */ void smc_core_exit(void) { unregister_reboot_notifier(&smc_reboot_notifier); smc_lgrs_shutdown(); }
linux-master
net/smc/smc_core.c
// SPDX-License-Identifier: GPL-2.0 /* * Shared Memory Communications over RDMA (SMC-R) and RoCE * * Link Layer Control (LLC) * * Copyright IBM Corp. 2016 * * Author(s): Klaus Wacker <[email protected]> * Ursula Braun <[email protected]> */ #include <net/tcp.h> #include <rdma/ib_verbs.h> #include "smc.h" #include "smc_core.h" #include "smc_clc.h" #include "smc_llc.h" #include "smc_pnet.h" #define SMC_LLC_DATA_LEN 40 struct smc_llc_hdr { struct smc_wr_rx_hdr common; union { struct { u8 length; /* 44 */ #if defined(__BIG_ENDIAN_BITFIELD) u8 reserved:4, add_link_rej_rsn:4; #elif defined(__LITTLE_ENDIAN_BITFIELD) u8 add_link_rej_rsn:4, reserved:4; #endif }; u16 length_v2; /* 44 - 8192*/ }; u8 flags; } __packed; /* format defined in * IBM Shared Memory Communications Version 2 * (https://www.ibm.com/support/pages/node/6326337) */ #define SMC_LLC_FLAG_NO_RMBE_EYEC 0x03 struct smc_llc_msg_confirm_link { /* type 0x01 */ struct smc_llc_hdr hd; u8 sender_mac[ETH_ALEN]; u8 sender_gid[SMC_GID_SIZE]; u8 sender_qp_num[3]; u8 link_num; u8 link_uid[SMC_LGR_ID_SIZE]; u8 max_links; u8 max_conns; u8 reserved[8]; }; #define SMC_LLC_FLAG_ADD_LNK_REJ 0x40 #define SMC_LLC_REJ_RSN_NO_ALT_PATH 1 struct smc_llc_msg_add_link { /* type 0x02 */ struct smc_llc_hdr hd; u8 sender_mac[ETH_ALEN]; u8 reserved2[2]; u8 sender_gid[SMC_GID_SIZE]; u8 sender_qp_num[3]; u8 link_num; #if defined(__BIG_ENDIAN_BITFIELD) u8 reserved3 : 4, qp_mtu : 4; #elif defined(__LITTLE_ENDIAN_BITFIELD) u8 qp_mtu : 4, reserved3 : 4; #endif u8 initial_psn[3]; u8 reserved[8]; }; struct smc_llc_msg_add_link_cont_rt { __be32 rmb_key; __be32 rmb_key_new; __be64 rmb_vaddr_new; }; struct smc_llc_msg_add_link_v2_ext { #if defined(__BIG_ENDIAN_BITFIELD) u8 v2_direct : 1, reserved : 7; #elif defined(__LITTLE_ENDIAN_BITFIELD) u8 reserved : 7, v2_direct : 1; #endif u8 reserved2; u8 client_target_gid[SMC_GID_SIZE]; u8 reserved3[8]; u16 num_rkeys; struct smc_llc_msg_add_link_cont_rt rt[]; } __packed; /* format defined in * IBM Shared Memory Communications Version 2 * (https://www.ibm.com/support/pages/node/6326337) */ struct smc_llc_msg_req_add_link_v2 { struct smc_llc_hdr hd; u8 reserved[20]; u8 gid_cnt; u8 reserved2[3]; u8 gid[][SMC_GID_SIZE]; }; #define SMC_LLC_RKEYS_PER_CONT_MSG 2 struct smc_llc_msg_add_link_cont { /* type 0x03 */ struct smc_llc_hdr hd; u8 link_num; u8 num_rkeys; u8 reserved2[2]; struct smc_llc_msg_add_link_cont_rt rt[SMC_LLC_RKEYS_PER_CONT_MSG]; u8 reserved[4]; } __packed; /* format defined in RFC7609 */ #define SMC_LLC_FLAG_DEL_LINK_ALL 0x40 #define SMC_LLC_FLAG_DEL_LINK_ORDERLY 0x20 struct smc_llc_msg_del_link { /* type 0x04 */ struct smc_llc_hdr hd; u8 link_num; __be32 reason; u8 reserved[35]; } __packed; /* format defined in RFC7609 */ struct smc_llc_msg_test_link { /* type 0x07 */ struct smc_llc_hdr hd; u8 user_data[16]; u8 reserved[24]; }; struct smc_rmb_rtoken { union { u8 num_rkeys; /* first rtoken byte of CONFIRM LINK msg */ /* is actually the num of rtokens, first */ /* rtoken is always for the current link */ u8 link_id; /* link id of the rtoken */ }; __be32 rmb_key; __be64 rmb_vaddr; } __packed; /* format defined in RFC7609 */ #define SMC_LLC_RKEYS_PER_MSG 3 #define SMC_LLC_RKEYS_PER_MSG_V2 255 struct smc_llc_msg_confirm_rkey { /* type 0x06 */ struct smc_llc_hdr hd; struct smc_rmb_rtoken rtoken[SMC_LLC_RKEYS_PER_MSG]; u8 reserved; }; #define SMC_LLC_DEL_RKEY_MAX 8 #define SMC_LLC_FLAG_RKEY_RETRY 0x10 #define SMC_LLC_FLAG_RKEY_NEG 0x20 struct smc_llc_msg_delete_rkey { /* type 0x09 */ struct smc_llc_hdr hd; u8 num_rkeys; u8 err_mask; u8 reserved[2]; __be32 rkey[8]; u8 reserved2[4]; }; struct smc_llc_msg_delete_rkey_v2 { /* type 0x29 */ struct smc_llc_hdr hd; u8 num_rkeys; u8 num_inval_rkeys; u8 reserved[2]; __be32 rkey[]; }; union smc_llc_msg { struct smc_llc_msg_confirm_link confirm_link; struct smc_llc_msg_add_link add_link; struct smc_llc_msg_req_add_link_v2 req_add_link; struct smc_llc_msg_add_link_cont add_link_cont; struct smc_llc_msg_del_link delete_link; struct smc_llc_msg_confirm_rkey confirm_rkey; struct smc_llc_msg_delete_rkey delete_rkey; struct smc_llc_msg_test_link test_link; struct { struct smc_llc_hdr hdr; u8 data[SMC_LLC_DATA_LEN]; } raw; }; #define SMC_LLC_FLAG_RESP 0x80 struct smc_llc_qentry { struct list_head list; struct smc_link *link; union smc_llc_msg msg; }; static void smc_llc_enqueue(struct smc_link *link, union smc_llc_msg *llc); struct smc_llc_qentry *smc_llc_flow_qentry_clr(struct smc_llc_flow *flow) { struct smc_llc_qentry *qentry = flow->qentry; flow->qentry = NULL; return qentry; } void smc_llc_flow_qentry_del(struct smc_llc_flow *flow) { struct smc_llc_qentry *qentry; if (flow->qentry) { qentry = flow->qentry; flow->qentry = NULL; kfree(qentry); } } static inline void smc_llc_flow_qentry_set(struct smc_llc_flow *flow, struct smc_llc_qentry *qentry) { flow->qentry = qentry; } static void smc_llc_flow_parallel(struct smc_link_group *lgr, u8 flow_type, struct smc_llc_qentry *qentry) { u8 msg_type = qentry->msg.raw.hdr.common.llc_type; if ((msg_type == SMC_LLC_ADD_LINK || msg_type == SMC_LLC_DELETE_LINK) && flow_type != msg_type && !lgr->delayed_event) { lgr->delayed_event = qentry; return; } /* drop parallel or already-in-progress llc requests */ if (flow_type != msg_type) pr_warn_once("smc: SMC-R lg %*phN net %llu dropped parallel " "LLC msg: msg %d flow %d role %d\n", SMC_LGR_ID_SIZE, &lgr->id, lgr->net->net_cookie, qentry->msg.raw.hdr.common.type, flow_type, lgr->role); kfree(qentry); } /* try to start a new llc flow, initiated by an incoming llc msg */ static bool smc_llc_flow_start(struct smc_llc_flow *flow, struct smc_llc_qentry *qentry) { struct smc_link_group *lgr = qentry->link->lgr; spin_lock_bh(&lgr->llc_flow_lock); if (flow->type) { /* a flow is already active */ smc_llc_flow_parallel(lgr, flow->type, qentry); spin_unlock_bh(&lgr->llc_flow_lock); return false; } switch (qentry->msg.raw.hdr.common.llc_type) { case SMC_LLC_ADD_LINK: flow->type = SMC_LLC_FLOW_ADD_LINK; break; case SMC_LLC_DELETE_LINK: flow->type = SMC_LLC_FLOW_DEL_LINK; break; case SMC_LLC_CONFIRM_RKEY: case SMC_LLC_DELETE_RKEY: flow->type = SMC_LLC_FLOW_RKEY; break; default: flow->type = SMC_LLC_FLOW_NONE; } smc_llc_flow_qentry_set(flow, qentry); spin_unlock_bh(&lgr->llc_flow_lock); return true; } /* start a new local llc flow, wait till current flow finished */ int smc_llc_flow_initiate(struct smc_link_group *lgr, enum smc_llc_flowtype type) { enum smc_llc_flowtype allowed_remote = SMC_LLC_FLOW_NONE; int rc; /* all flows except confirm_rkey and delete_rkey are exclusive, * confirm/delete rkey flows can run concurrently (local and remote) */ if (type == SMC_LLC_FLOW_RKEY) allowed_remote = SMC_LLC_FLOW_RKEY; again: if (list_empty(&lgr->list)) return -ENODEV; spin_lock_bh(&lgr->llc_flow_lock); if (lgr->llc_flow_lcl.type == SMC_LLC_FLOW_NONE && (lgr->llc_flow_rmt.type == SMC_LLC_FLOW_NONE || lgr->llc_flow_rmt.type == allowed_remote)) { lgr->llc_flow_lcl.type = type; spin_unlock_bh(&lgr->llc_flow_lock); return 0; } spin_unlock_bh(&lgr->llc_flow_lock); rc = wait_event_timeout(lgr->llc_flow_waiter, (list_empty(&lgr->list) || (lgr->llc_flow_lcl.type == SMC_LLC_FLOW_NONE && (lgr->llc_flow_rmt.type == SMC_LLC_FLOW_NONE || lgr->llc_flow_rmt.type == allowed_remote))), SMC_LLC_WAIT_TIME * 10); if (!rc) return -ETIMEDOUT; goto again; } /* finish the current llc flow */ void smc_llc_flow_stop(struct smc_link_group *lgr, struct smc_llc_flow *flow) { spin_lock_bh(&lgr->llc_flow_lock); memset(flow, 0, sizeof(*flow)); flow->type = SMC_LLC_FLOW_NONE; spin_unlock_bh(&lgr->llc_flow_lock); if (!list_empty(&lgr->list) && lgr->delayed_event && flow == &lgr->llc_flow_lcl) schedule_work(&lgr->llc_event_work); else wake_up(&lgr->llc_flow_waiter); } /* lnk is optional and used for early wakeup when link goes down, useful in * cases where we wait for a response on the link after we sent a request */ struct smc_llc_qentry *smc_llc_wait(struct smc_link_group *lgr, struct smc_link *lnk, int time_out, u8 exp_msg) { struct smc_llc_flow *flow = &lgr->llc_flow_lcl; u8 rcv_msg; wait_event_timeout(lgr->llc_msg_waiter, (flow->qentry || (lnk && !smc_link_usable(lnk)) || list_empty(&lgr->list)), time_out); if (!flow->qentry || (lnk && !smc_link_usable(lnk)) || list_empty(&lgr->list)) { smc_llc_flow_qentry_del(flow); goto out; } rcv_msg = flow->qentry->msg.raw.hdr.common.llc_type; if (exp_msg && rcv_msg != exp_msg) { if (exp_msg == SMC_LLC_ADD_LINK && rcv_msg == SMC_LLC_DELETE_LINK) { /* flow_start will delay the unexpected msg */ smc_llc_flow_start(&lgr->llc_flow_lcl, smc_llc_flow_qentry_clr(flow)); return NULL; } pr_warn_once("smc: SMC-R lg %*phN net %llu dropped unexpected LLC msg: " "msg %d exp %d flow %d role %d flags %x\n", SMC_LGR_ID_SIZE, &lgr->id, lgr->net->net_cookie, rcv_msg, exp_msg, flow->type, lgr->role, flow->qentry->msg.raw.hdr.flags); smc_llc_flow_qentry_del(flow); } out: return flow->qentry; } /********************************** send *************************************/ struct smc_llc_tx_pend { }; /* handler for send/transmission completion of an LLC msg */ static void smc_llc_tx_handler(struct smc_wr_tx_pend_priv *pend, struct smc_link *link, enum ib_wc_status wc_status) { /* future work: handle wc_status error for recovery and failover */ } /** * smc_llc_add_pending_send() - add LLC control message to pending WQE transmits * @link: Pointer to SMC link used for sending LLC control message. * @wr_buf: Out variable returning pointer to work request payload buffer. * @pend: Out variable returning pointer to private pending WR tracking. * It's the context the transmit complete handler will get. * * Reserves and pre-fills an entry for a pending work request send/tx. * Used by mid-level smc_llc_send_msg() to prepare for later actual send/tx. * Can sleep due to smc_get_ctrl_buf (if not in softirq context). * * Return: 0 on success, otherwise an error value. */ static int smc_llc_add_pending_send(struct smc_link *link, struct smc_wr_buf **wr_buf, struct smc_wr_tx_pend_priv **pend) { int rc; rc = smc_wr_tx_get_free_slot(link, smc_llc_tx_handler, wr_buf, NULL, pend); if (rc < 0) return rc; BUILD_BUG_ON_MSG( sizeof(union smc_llc_msg) > SMC_WR_BUF_SIZE, "must increase SMC_WR_BUF_SIZE to at least sizeof(struct smc_llc_msg)"); BUILD_BUG_ON_MSG( sizeof(union smc_llc_msg) != SMC_WR_TX_SIZE, "must adapt SMC_WR_TX_SIZE to sizeof(struct smc_llc_msg); if not all smc_wr upper layer protocols use the same message size any more, must start to set link->wr_tx_sges[i].length on each individual smc_wr_tx_send()"); BUILD_BUG_ON_MSG( sizeof(struct smc_llc_tx_pend) > SMC_WR_TX_PEND_PRIV_SIZE, "must increase SMC_WR_TX_PEND_PRIV_SIZE to at least sizeof(struct smc_llc_tx_pend)"); return 0; } static int smc_llc_add_pending_send_v2(struct smc_link *link, struct smc_wr_v2_buf **wr_buf, struct smc_wr_tx_pend_priv **pend) { int rc; rc = smc_wr_tx_get_v2_slot(link, smc_llc_tx_handler, wr_buf, pend); if (rc < 0) return rc; return 0; } static void smc_llc_init_msg_hdr(struct smc_llc_hdr *hdr, struct smc_link_group *lgr, size_t len) { if (lgr->smc_version == SMC_V2) { hdr->common.llc_version = SMC_V2; hdr->length_v2 = len; } else { hdr->common.llc_version = 0; hdr->length = len; } } /* high-level API to send LLC confirm link */ int smc_llc_send_confirm_link(struct smc_link *link, enum smc_llc_reqresp reqresp) { struct smc_llc_msg_confirm_link *confllc; struct smc_wr_tx_pend_priv *pend; struct smc_wr_buf *wr_buf; int rc; if (!smc_wr_tx_link_hold(link)) return -ENOLINK; rc = smc_llc_add_pending_send(link, &wr_buf, &pend); if (rc) goto put_out; confllc = (struct smc_llc_msg_confirm_link *)wr_buf; memset(confllc, 0, sizeof(*confllc)); confllc->hd.common.llc_type = SMC_LLC_CONFIRM_LINK; smc_llc_init_msg_hdr(&confllc->hd, link->lgr, sizeof(*confllc)); confllc->hd.flags |= SMC_LLC_FLAG_NO_RMBE_EYEC; if (reqresp == SMC_LLC_RESP) confllc->hd.flags |= SMC_LLC_FLAG_RESP; memcpy(confllc->sender_mac, link->smcibdev->mac[link->ibport - 1], ETH_ALEN); memcpy(confllc->sender_gid, link->gid, SMC_GID_SIZE); hton24(confllc->sender_qp_num, link->roce_qp->qp_num); confllc->link_num = link->link_id; memcpy(confllc->link_uid, link->link_uid, SMC_LGR_ID_SIZE); confllc->max_links = SMC_LINKS_ADD_LNK_MAX; if (link->lgr->smc_version == SMC_V2 && link->lgr->peer_smc_release >= SMC_RELEASE_1) { confllc->max_conns = link->lgr->max_conns; confllc->max_links = link->lgr->max_links; } /* send llc message */ rc = smc_wr_tx_send(link, pend); put_out: smc_wr_tx_link_put(link); return rc; } /* send LLC confirm rkey request */ static int smc_llc_send_confirm_rkey(struct smc_link *send_link, struct smc_buf_desc *rmb_desc) { struct smc_llc_msg_confirm_rkey *rkeyllc; struct smc_wr_tx_pend_priv *pend; struct smc_wr_buf *wr_buf; struct smc_link *link; int i, rc, rtok_ix; if (!smc_wr_tx_link_hold(send_link)) return -ENOLINK; rc = smc_llc_add_pending_send(send_link, &wr_buf, &pend); if (rc) goto put_out; rkeyllc = (struct smc_llc_msg_confirm_rkey *)wr_buf; memset(rkeyllc, 0, sizeof(*rkeyllc)); rkeyllc->hd.common.llc_type = SMC_LLC_CONFIRM_RKEY; smc_llc_init_msg_hdr(&rkeyllc->hd, send_link->lgr, sizeof(*rkeyllc)); rtok_ix = 1; for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) { link = &send_link->lgr->lnk[i]; if (smc_link_active(link) && link != send_link) { rkeyllc->rtoken[rtok_ix].link_id = link->link_id; rkeyllc->rtoken[rtok_ix].rmb_key = htonl(rmb_desc->mr[link->link_idx]->rkey); rkeyllc->rtoken[rtok_ix].rmb_vaddr = rmb_desc->is_vm ? cpu_to_be64((uintptr_t)rmb_desc->cpu_addr) : cpu_to_be64((u64)sg_dma_address (rmb_desc->sgt[link->link_idx].sgl)); rtok_ix++; } } /* rkey of send_link is in rtoken[0] */ rkeyllc->rtoken[0].num_rkeys = rtok_ix - 1; rkeyllc->rtoken[0].rmb_key = htonl(rmb_desc->mr[send_link->link_idx]->rkey); rkeyllc->rtoken[0].rmb_vaddr = rmb_desc->is_vm ? cpu_to_be64((uintptr_t)rmb_desc->cpu_addr) : cpu_to_be64((u64)sg_dma_address (rmb_desc->sgt[send_link->link_idx].sgl)); /* send llc message */ rc = smc_wr_tx_send(send_link, pend); put_out: smc_wr_tx_link_put(send_link); return rc; } /* send LLC delete rkey request */ static int smc_llc_send_delete_rkey(struct smc_link *link, struct smc_buf_desc *rmb_desc) { struct smc_llc_msg_delete_rkey *rkeyllc; struct smc_wr_tx_pend_priv *pend; struct smc_wr_buf *wr_buf; int rc; if (!smc_wr_tx_link_hold(link)) return -ENOLINK; rc = smc_llc_add_pending_send(link, &wr_buf, &pend); if (rc) goto put_out; rkeyllc = (struct smc_llc_msg_delete_rkey *)wr_buf; memset(rkeyllc, 0, sizeof(*rkeyllc)); rkeyllc->hd.common.llc_type = SMC_LLC_DELETE_RKEY; smc_llc_init_msg_hdr(&rkeyllc->hd, link->lgr, sizeof(*rkeyllc)); rkeyllc->num_rkeys = 1; rkeyllc->rkey[0] = htonl(rmb_desc->mr[link->link_idx]->rkey); /* send llc message */ rc = smc_wr_tx_send(link, pend); put_out: smc_wr_tx_link_put(link); return rc; } /* return first buffer from any of the next buf lists */ static struct smc_buf_desc *_smc_llc_get_next_rmb(struct smc_link_group *lgr, int *buf_lst) { struct smc_buf_desc *buf_pos; while (*buf_lst < SMC_RMBE_SIZES) { buf_pos = list_first_entry_or_null(&lgr->rmbs[*buf_lst], struct smc_buf_desc, list); if (buf_pos) return buf_pos; (*buf_lst)++; } return NULL; } /* return next rmb from buffer lists */ static struct smc_buf_desc *smc_llc_get_next_rmb(struct smc_link_group *lgr, int *buf_lst, struct smc_buf_desc *buf_pos) { struct smc_buf_desc *buf_next; if (!buf_pos) return _smc_llc_get_next_rmb(lgr, buf_lst); if (list_is_last(&buf_pos->list, &lgr->rmbs[*buf_lst])) { (*buf_lst)++; return _smc_llc_get_next_rmb(lgr, buf_lst); } buf_next = list_next_entry(buf_pos, list); return buf_next; } static struct smc_buf_desc *smc_llc_get_first_rmb(struct smc_link_group *lgr, int *buf_lst) { *buf_lst = 0; return smc_llc_get_next_rmb(lgr, buf_lst, NULL); } static int smc_llc_fill_ext_v2(struct smc_llc_msg_add_link_v2_ext *ext, struct smc_link *link, struct smc_link *link_new) { struct smc_link_group *lgr = link->lgr; struct smc_buf_desc *buf_pos; int prim_lnk_idx, lnk_idx, i; struct smc_buf_desc *rmb; int len = sizeof(*ext); int buf_lst; ext->v2_direct = !lgr->uses_gateway; memcpy(ext->client_target_gid, link_new->gid, SMC_GID_SIZE); prim_lnk_idx = link->link_idx; lnk_idx = link_new->link_idx; down_write(&lgr->rmbs_lock); ext->num_rkeys = lgr->conns_num; if (!ext->num_rkeys) goto out; buf_pos = smc_llc_get_first_rmb(lgr, &buf_lst); for (i = 0; i < ext->num_rkeys; i++) { while (buf_pos && !(buf_pos)->used) buf_pos = smc_llc_get_next_rmb(lgr, &buf_lst, buf_pos); if (!buf_pos) break; rmb = buf_pos; ext->rt[i].rmb_key = htonl(rmb->mr[prim_lnk_idx]->rkey); ext->rt[i].rmb_key_new = htonl(rmb->mr[lnk_idx]->rkey); ext->rt[i].rmb_vaddr_new = rmb->is_vm ? cpu_to_be64((uintptr_t)rmb->cpu_addr) : cpu_to_be64((u64)sg_dma_address(rmb->sgt[lnk_idx].sgl)); buf_pos = smc_llc_get_next_rmb(lgr, &buf_lst, buf_pos); } len += i * sizeof(ext->rt[0]); out: up_write(&lgr->rmbs_lock); return len; } /* send ADD LINK request or response */ int smc_llc_send_add_link(struct smc_link *link, u8 mac[], u8 gid[], struct smc_link *link_new, enum smc_llc_reqresp reqresp) { struct smc_llc_msg_add_link_v2_ext *ext = NULL; struct smc_llc_msg_add_link *addllc; struct smc_wr_tx_pend_priv *pend; int len = sizeof(*addllc); int rc; if (!smc_wr_tx_link_hold(link)) return -ENOLINK; if (link->lgr->smc_version == SMC_V2) { struct smc_wr_v2_buf *wr_buf; rc = smc_llc_add_pending_send_v2(link, &wr_buf, &pend); if (rc) goto put_out; addllc = (struct smc_llc_msg_add_link *)wr_buf; ext = (struct smc_llc_msg_add_link_v2_ext *) &wr_buf->raw[sizeof(*addllc)]; memset(ext, 0, SMC_WR_TX_SIZE); } else { struct smc_wr_buf *wr_buf; rc = smc_llc_add_pending_send(link, &wr_buf, &pend); if (rc) goto put_out; addllc = (struct smc_llc_msg_add_link *)wr_buf; } memset(addllc, 0, sizeof(*addllc)); addllc->hd.common.llc_type = SMC_LLC_ADD_LINK; if (reqresp == SMC_LLC_RESP) addllc->hd.flags |= SMC_LLC_FLAG_RESP; memcpy(addllc->sender_mac, mac, ETH_ALEN); memcpy(addllc->sender_gid, gid, SMC_GID_SIZE); if (link_new) { addllc->link_num = link_new->link_id; hton24(addllc->sender_qp_num, link_new->roce_qp->qp_num); hton24(addllc->initial_psn, link_new->psn_initial); if (reqresp == SMC_LLC_REQ) addllc->qp_mtu = link_new->path_mtu; else addllc->qp_mtu = min(link_new->path_mtu, link_new->peer_mtu); } if (ext && link_new) len += smc_llc_fill_ext_v2(ext, link, link_new); smc_llc_init_msg_hdr(&addllc->hd, link->lgr, len); /* send llc message */ if (link->lgr->smc_version == SMC_V2) rc = smc_wr_tx_v2_send(link, pend, len); else rc = smc_wr_tx_send(link, pend); put_out: smc_wr_tx_link_put(link); return rc; } /* send DELETE LINK request or response */ int smc_llc_send_delete_link(struct smc_link *link, u8 link_del_id, enum smc_llc_reqresp reqresp, bool orderly, u32 reason) { struct smc_llc_msg_del_link *delllc; struct smc_wr_tx_pend_priv *pend; struct smc_wr_buf *wr_buf; int rc; if (!smc_wr_tx_link_hold(link)) return -ENOLINK; rc = smc_llc_add_pending_send(link, &wr_buf, &pend); if (rc) goto put_out; delllc = (struct smc_llc_msg_del_link *)wr_buf; memset(delllc, 0, sizeof(*delllc)); delllc->hd.common.llc_type = SMC_LLC_DELETE_LINK; smc_llc_init_msg_hdr(&delllc->hd, link->lgr, sizeof(*delllc)); if (reqresp == SMC_LLC_RESP) delllc->hd.flags |= SMC_LLC_FLAG_RESP; if (orderly) delllc->hd.flags |= SMC_LLC_FLAG_DEL_LINK_ORDERLY; if (link_del_id) delllc->link_num = link_del_id; else delllc->hd.flags |= SMC_LLC_FLAG_DEL_LINK_ALL; delllc->reason = htonl(reason); /* send llc message */ rc = smc_wr_tx_send(link, pend); put_out: smc_wr_tx_link_put(link); return rc; } /* send LLC test link request */ static int smc_llc_send_test_link(struct smc_link *link, u8 user_data[16]) { struct smc_llc_msg_test_link *testllc; struct smc_wr_tx_pend_priv *pend; struct smc_wr_buf *wr_buf; int rc; if (!smc_wr_tx_link_hold(link)) return -ENOLINK; rc = smc_llc_add_pending_send(link, &wr_buf, &pend); if (rc) goto put_out; testllc = (struct smc_llc_msg_test_link *)wr_buf; memset(testllc, 0, sizeof(*testllc)); testllc->hd.common.llc_type = SMC_LLC_TEST_LINK; smc_llc_init_msg_hdr(&testllc->hd, link->lgr, sizeof(*testllc)); memcpy(testllc->user_data, user_data, sizeof(testllc->user_data)); /* send llc message */ rc = smc_wr_tx_send(link, pend); put_out: smc_wr_tx_link_put(link); return rc; } /* schedule an llc send on link, may wait for buffers */ static int smc_llc_send_message(struct smc_link *link, void *llcbuf) { struct smc_wr_tx_pend_priv *pend; struct smc_wr_buf *wr_buf; int rc; if (!smc_wr_tx_link_hold(link)) return -ENOLINK; rc = smc_llc_add_pending_send(link, &wr_buf, &pend); if (rc) goto put_out; memcpy(wr_buf, llcbuf, sizeof(union smc_llc_msg)); rc = smc_wr_tx_send(link, pend); put_out: smc_wr_tx_link_put(link); return rc; } /* schedule an llc send on link, may wait for buffers, * and wait for send completion notification. * @return 0 on success */ static int smc_llc_send_message_wait(struct smc_link *link, void *llcbuf) { struct smc_wr_tx_pend_priv *pend; struct smc_wr_buf *wr_buf; int rc; if (!smc_wr_tx_link_hold(link)) return -ENOLINK; rc = smc_llc_add_pending_send(link, &wr_buf, &pend); if (rc) goto put_out; memcpy(wr_buf, llcbuf, sizeof(union smc_llc_msg)); rc = smc_wr_tx_send_wait(link, pend, SMC_LLC_WAIT_TIME); put_out: smc_wr_tx_link_put(link); return rc; } /********************************* receive ***********************************/ static int smc_llc_alloc_alt_link(struct smc_link_group *lgr, enum smc_lgr_type lgr_new_t) { int i; if (lgr->type == SMC_LGR_SYMMETRIC || (lgr->type != SMC_LGR_SINGLE && (lgr_new_t == SMC_LGR_ASYMMETRIC_LOCAL || lgr_new_t == SMC_LGR_ASYMMETRIC_PEER))) return -EMLINK; if (lgr_new_t == SMC_LGR_ASYMMETRIC_LOCAL || lgr_new_t == SMC_LGR_ASYMMETRIC_PEER) { for (i = SMC_LINKS_PER_LGR_MAX - 1; i >= 0; i--) if (lgr->lnk[i].state == SMC_LNK_UNUSED) return i; } else { for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) if (lgr->lnk[i].state == SMC_LNK_UNUSED) return i; } return -EMLINK; } /* send one add_link_continue msg */ static int smc_llc_add_link_cont(struct smc_link *link, struct smc_link *link_new, u8 *num_rkeys_todo, int *buf_lst, struct smc_buf_desc **buf_pos) { struct smc_llc_msg_add_link_cont *addc_llc; struct smc_link_group *lgr = link->lgr; int prim_lnk_idx, lnk_idx, i, rc; struct smc_wr_tx_pend_priv *pend; struct smc_wr_buf *wr_buf; struct smc_buf_desc *rmb; u8 n; if (!smc_wr_tx_link_hold(link)) return -ENOLINK; rc = smc_llc_add_pending_send(link, &wr_buf, &pend); if (rc) goto put_out; addc_llc = (struct smc_llc_msg_add_link_cont *)wr_buf; memset(addc_llc, 0, sizeof(*addc_llc)); prim_lnk_idx = link->link_idx; lnk_idx = link_new->link_idx; addc_llc->link_num = link_new->link_id; addc_llc->num_rkeys = *num_rkeys_todo; n = *num_rkeys_todo; for (i = 0; i < min_t(u8, n, SMC_LLC_RKEYS_PER_CONT_MSG); i++) { while (*buf_pos && !(*buf_pos)->used) *buf_pos = smc_llc_get_next_rmb(lgr, buf_lst, *buf_pos); if (!*buf_pos) { addc_llc->num_rkeys = addc_llc->num_rkeys - *num_rkeys_todo; *num_rkeys_todo = 0; break; } rmb = *buf_pos; addc_llc->rt[i].rmb_key = htonl(rmb->mr[prim_lnk_idx]->rkey); addc_llc->rt[i].rmb_key_new = htonl(rmb->mr[lnk_idx]->rkey); addc_llc->rt[i].rmb_vaddr_new = rmb->is_vm ? cpu_to_be64((uintptr_t)rmb->cpu_addr) : cpu_to_be64((u64)sg_dma_address(rmb->sgt[lnk_idx].sgl)); (*num_rkeys_todo)--; *buf_pos = smc_llc_get_next_rmb(lgr, buf_lst, *buf_pos); } addc_llc->hd.common.llc_type = SMC_LLC_ADD_LINK_CONT; addc_llc->hd.length = sizeof(struct smc_llc_msg_add_link_cont); if (lgr->role == SMC_CLNT) addc_llc->hd.flags |= SMC_LLC_FLAG_RESP; rc = smc_wr_tx_send(link, pend); put_out: smc_wr_tx_link_put(link); return rc; } static int smc_llc_cli_rkey_exchange(struct smc_link *link, struct smc_link *link_new) { struct smc_llc_msg_add_link_cont *addc_llc; struct smc_link_group *lgr = link->lgr; u8 max, num_rkeys_send, num_rkeys_recv; struct smc_llc_qentry *qentry; struct smc_buf_desc *buf_pos; int buf_lst; int rc = 0; int i; down_write(&lgr->rmbs_lock); num_rkeys_send = lgr->conns_num; buf_pos = smc_llc_get_first_rmb(lgr, &buf_lst); do { qentry = smc_llc_wait(lgr, NULL, SMC_LLC_WAIT_TIME, SMC_LLC_ADD_LINK_CONT); if (!qentry) { rc = -ETIMEDOUT; break; } addc_llc = &qentry->msg.add_link_cont; num_rkeys_recv = addc_llc->num_rkeys; max = min_t(u8, num_rkeys_recv, SMC_LLC_RKEYS_PER_CONT_MSG); for (i = 0; i < max; i++) { smc_rtoken_set(lgr, link->link_idx, link_new->link_idx, addc_llc->rt[i].rmb_key, addc_llc->rt[i].rmb_vaddr_new, addc_llc->rt[i].rmb_key_new); num_rkeys_recv--; } smc_llc_flow_qentry_del(&lgr->llc_flow_lcl); rc = smc_llc_add_link_cont(link, link_new, &num_rkeys_send, &buf_lst, &buf_pos); if (rc) break; } while (num_rkeys_send || num_rkeys_recv); up_write(&lgr->rmbs_lock); return rc; } /* prepare and send an add link reject response */ static int smc_llc_cli_add_link_reject(struct smc_llc_qentry *qentry) { qentry->msg.raw.hdr.flags |= SMC_LLC_FLAG_RESP; qentry->msg.raw.hdr.flags |= SMC_LLC_FLAG_ADD_LNK_REJ; qentry->msg.raw.hdr.add_link_rej_rsn = SMC_LLC_REJ_RSN_NO_ALT_PATH; smc_llc_init_msg_hdr(&qentry->msg.raw.hdr, qentry->link->lgr, sizeof(qentry->msg)); return smc_llc_send_message(qentry->link, &qentry->msg); } static int smc_llc_cli_conf_link(struct smc_link *link, struct smc_init_info *ini, struct smc_link *link_new, enum smc_lgr_type lgr_new_t) { struct smc_link_group *lgr = link->lgr; struct smc_llc_qentry *qentry = NULL; int rc = 0; /* receive CONFIRM LINK request over RoCE fabric */ qentry = smc_llc_wait(lgr, NULL, SMC_LLC_WAIT_FIRST_TIME, 0); if (!qentry) { rc = smc_llc_send_delete_link(link, link_new->link_id, SMC_LLC_REQ, false, SMC_LLC_DEL_LOST_PATH); return -ENOLINK; } if (qentry->msg.raw.hdr.common.llc_type != SMC_LLC_CONFIRM_LINK) { /* received DELETE_LINK instead */ qentry->msg.raw.hdr.flags |= SMC_LLC_FLAG_RESP; smc_llc_send_message(link, &qentry->msg); smc_llc_flow_qentry_del(&lgr->llc_flow_lcl); return -ENOLINK; } smc_llc_save_peer_uid(qentry); smc_llc_flow_qentry_del(&lgr->llc_flow_lcl); rc = smc_ib_modify_qp_rts(link_new); if (rc) { smc_llc_send_delete_link(link, link_new->link_id, SMC_LLC_REQ, false, SMC_LLC_DEL_LOST_PATH); return -ENOLINK; } smc_wr_remember_qp_attr(link_new); rc = smcr_buf_reg_lgr(link_new); if (rc) { smc_llc_send_delete_link(link, link_new->link_id, SMC_LLC_REQ, false, SMC_LLC_DEL_LOST_PATH); return -ENOLINK; } /* send CONFIRM LINK response over RoCE fabric */ rc = smc_llc_send_confirm_link(link_new, SMC_LLC_RESP); if (rc) { smc_llc_send_delete_link(link, link_new->link_id, SMC_LLC_REQ, false, SMC_LLC_DEL_LOST_PATH); return -ENOLINK; } smc_llc_link_active(link_new); if (lgr_new_t == SMC_LGR_ASYMMETRIC_LOCAL || lgr_new_t == SMC_LGR_ASYMMETRIC_PEER) smcr_lgr_set_type_asym(lgr, lgr_new_t, link_new->link_idx); else smcr_lgr_set_type(lgr, lgr_new_t); return 0; } static void smc_llc_save_add_link_rkeys(struct smc_link *link, struct smc_link *link_new) { struct smc_llc_msg_add_link_v2_ext *ext; struct smc_link_group *lgr = link->lgr; int max, i; ext = (struct smc_llc_msg_add_link_v2_ext *)((u8 *)lgr->wr_rx_buf_v2 + SMC_WR_TX_SIZE); max = min_t(u8, ext->num_rkeys, SMC_LLC_RKEYS_PER_MSG_V2); down_write(&lgr->rmbs_lock); for (i = 0; i < max; i++) { smc_rtoken_set(lgr, link->link_idx, link_new->link_idx, ext->rt[i].rmb_key, ext->rt[i].rmb_vaddr_new, ext->rt[i].rmb_key_new); } up_write(&lgr->rmbs_lock); } static void smc_llc_save_add_link_info(struct smc_link *link, struct smc_llc_msg_add_link *add_llc) { link->peer_qpn = ntoh24(add_llc->sender_qp_num); memcpy(link->peer_gid, add_llc->sender_gid, SMC_GID_SIZE); memcpy(link->peer_mac, add_llc->sender_mac, ETH_ALEN); link->peer_psn = ntoh24(add_llc->initial_psn); link->peer_mtu = add_llc->qp_mtu; } /* as an SMC client, process an add link request */ int smc_llc_cli_add_link(struct smc_link *link, struct smc_llc_qentry *qentry) { struct smc_llc_msg_add_link *llc = &qentry->msg.add_link; enum smc_lgr_type lgr_new_t = SMC_LGR_SYMMETRIC; struct smc_link_group *lgr = smc_get_lgr(link); struct smc_init_info *ini = NULL; struct smc_link *lnk_new = NULL; int lnk_idx, rc = 0; if (!llc->qp_mtu) goto out_reject; ini = kzalloc(sizeof(*ini), GFP_KERNEL); if (!ini) { rc = -ENOMEM; goto out_reject; } if (lgr->type == SMC_LGR_SINGLE && lgr->max_links <= 1) { rc = 0; goto out_reject; } ini->vlan_id = lgr->vlan_id; if (lgr->smc_version == SMC_V2) { ini->check_smcrv2 = true; ini->smcrv2.saddr = lgr->saddr; ini->smcrv2.daddr = smc_ib_gid_to_ipv4(llc->sender_gid); } smc_pnet_find_alt_roce(lgr, ini, link->smcibdev); if (!memcmp(llc->sender_gid, link->peer_gid, SMC_GID_SIZE) && (lgr->smc_version == SMC_V2 || !memcmp(llc->sender_mac, link->peer_mac, ETH_ALEN))) { if (!ini->ib_dev && !ini->smcrv2.ib_dev_v2) goto out_reject; lgr_new_t = SMC_LGR_ASYMMETRIC_PEER; } if (lgr->smc_version == SMC_V2 && !ini->smcrv2.ib_dev_v2) { lgr_new_t = SMC_LGR_ASYMMETRIC_LOCAL; ini->smcrv2.ib_dev_v2 = link->smcibdev; ini->smcrv2.ib_port_v2 = link->ibport; } else if (lgr->smc_version < SMC_V2 && !ini->ib_dev) { lgr_new_t = SMC_LGR_ASYMMETRIC_LOCAL; ini->ib_dev = link->smcibdev; ini->ib_port = link->ibport; } lnk_idx = smc_llc_alloc_alt_link(lgr, lgr_new_t); if (lnk_idx < 0) goto out_reject; lnk_new = &lgr->lnk[lnk_idx]; rc = smcr_link_init(lgr, lnk_new, lnk_idx, ini); if (rc) goto out_reject; smc_llc_save_add_link_info(lnk_new, llc); lnk_new->link_id = llc->link_num; /* SMC server assigns link id */ smc_llc_link_set_uid(lnk_new); rc = smc_ib_ready_link(lnk_new); if (rc) goto out_clear_lnk; rc = smcr_buf_map_lgr(lnk_new); if (rc) goto out_clear_lnk; rc = smc_llc_send_add_link(link, lnk_new->smcibdev->mac[lnk_new->ibport - 1], lnk_new->gid, lnk_new, SMC_LLC_RESP); if (rc) goto out_clear_lnk; if (lgr->smc_version == SMC_V2) { smc_llc_save_add_link_rkeys(link, lnk_new); } else { rc = smc_llc_cli_rkey_exchange(link, lnk_new); if (rc) { rc = 0; goto out_clear_lnk; } } rc = smc_llc_cli_conf_link(link, ini, lnk_new, lgr_new_t); if (!rc) goto out; out_clear_lnk: lnk_new->state = SMC_LNK_INACTIVE; smcr_link_clear(lnk_new, false); out_reject: smc_llc_cli_add_link_reject(qentry); out: kfree(ini); kfree(qentry); return rc; } static void smc_llc_send_request_add_link(struct smc_link *link) { struct smc_llc_msg_req_add_link_v2 *llc; struct smc_wr_tx_pend_priv *pend; struct smc_wr_v2_buf *wr_buf; struct smc_gidlist gidlist; int rc, len, i; if (!smc_wr_tx_link_hold(link)) return; if (link->lgr->type == SMC_LGR_SYMMETRIC || link->lgr->type == SMC_LGR_ASYMMETRIC_PEER) goto put_out; smc_fill_gid_list(link->lgr, &gidlist, link->smcibdev, link->gid); if (gidlist.len <= 1) goto put_out; rc = smc_llc_add_pending_send_v2(link, &wr_buf, &pend); if (rc) goto put_out; llc = (struct smc_llc_msg_req_add_link_v2 *)wr_buf; memset(llc, 0, SMC_WR_TX_SIZE); llc->hd.common.llc_type = SMC_LLC_REQ_ADD_LINK; for (i = 0; i < gidlist.len; i++) memcpy(llc->gid[i], gidlist.list[i], sizeof(gidlist.list[0])); llc->gid_cnt = gidlist.len; len = sizeof(*llc) + (gidlist.len * sizeof(gidlist.list[0])); smc_llc_init_msg_hdr(&llc->hd, link->lgr, len); rc = smc_wr_tx_v2_send(link, pend, len); if (!rc) /* set REQ_ADD_LINK flow and wait for response from peer */ link->lgr->llc_flow_lcl.type = SMC_LLC_FLOW_REQ_ADD_LINK; put_out: smc_wr_tx_link_put(link); } /* as an SMC client, invite server to start the add_link processing */ static void smc_llc_cli_add_link_invite(struct smc_link *link, struct smc_llc_qentry *qentry) { struct smc_link_group *lgr = smc_get_lgr(link); struct smc_init_info *ini = NULL; if (lgr->smc_version == SMC_V2) { smc_llc_send_request_add_link(link); goto out; } if (lgr->type == SMC_LGR_SYMMETRIC || lgr->type == SMC_LGR_ASYMMETRIC_PEER) goto out; if (lgr->type == SMC_LGR_SINGLE && lgr->max_links <= 1) goto out; ini = kzalloc(sizeof(*ini), GFP_KERNEL); if (!ini) goto out; ini->vlan_id = lgr->vlan_id; smc_pnet_find_alt_roce(lgr, ini, link->smcibdev); if (!ini->ib_dev) goto out; smc_llc_send_add_link(link, ini->ib_dev->mac[ini->ib_port - 1], ini->ib_gid, NULL, SMC_LLC_REQ); out: kfree(ini); kfree(qentry); } static bool smc_llc_is_empty_llc_message(union smc_llc_msg *llc) { int i; for (i = 0; i < ARRAY_SIZE(llc->raw.data); i++) if (llc->raw.data[i]) return false; return true; } static bool smc_llc_is_local_add_link(union smc_llc_msg *llc) { if (llc->raw.hdr.common.llc_type == SMC_LLC_ADD_LINK && smc_llc_is_empty_llc_message(llc)) return true; return false; } static void smc_llc_process_cli_add_link(struct smc_link_group *lgr) { struct smc_llc_qentry *qentry; qentry = smc_llc_flow_qentry_clr(&lgr->llc_flow_lcl); down_write(&lgr->llc_conf_mutex); if (smc_llc_is_local_add_link(&qentry->msg)) smc_llc_cli_add_link_invite(qentry->link, qentry); else smc_llc_cli_add_link(qentry->link, qentry); up_write(&lgr->llc_conf_mutex); } static int smc_llc_active_link_count(struct smc_link_group *lgr) { int i, link_count = 0; for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) { if (!smc_link_active(&lgr->lnk[i])) continue; link_count++; } return link_count; } /* find the asymmetric link when 3 links are established */ static struct smc_link *smc_llc_find_asym_link(struct smc_link_group *lgr) { int asym_idx = -ENOENT; int i, j, k; bool found; /* determine asymmetric link */ found = false; for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) { for (j = i + 1; j < SMC_LINKS_PER_LGR_MAX; j++) { if (!smc_link_usable(&lgr->lnk[i]) || !smc_link_usable(&lgr->lnk[j])) continue; if (!memcmp(lgr->lnk[i].gid, lgr->lnk[j].gid, SMC_GID_SIZE)) { found = true; /* asym_lnk is i or j */ break; } } if (found) break; } if (!found) goto out; /* no asymmetric link */ for (k = 0; k < SMC_LINKS_PER_LGR_MAX; k++) { if (!smc_link_usable(&lgr->lnk[k])) continue; if (k != i && !memcmp(lgr->lnk[i].peer_gid, lgr->lnk[k].peer_gid, SMC_GID_SIZE)) { asym_idx = i; break; } if (k != j && !memcmp(lgr->lnk[j].peer_gid, lgr->lnk[k].peer_gid, SMC_GID_SIZE)) { asym_idx = j; break; } } out: return (asym_idx < 0) ? NULL : &lgr->lnk[asym_idx]; } static void smc_llc_delete_asym_link(struct smc_link_group *lgr) { struct smc_link *lnk_new = NULL, *lnk_asym; struct smc_llc_qentry *qentry; int rc; lnk_asym = smc_llc_find_asym_link(lgr); if (!lnk_asym) return; /* no asymmetric link */ if (!smc_link_downing(&lnk_asym->state)) return; lnk_new = smc_switch_conns(lgr, lnk_asym, false); smc_wr_tx_wait_no_pending_sends(lnk_asym); if (!lnk_new) goto out_free; /* change flow type from ADD_LINK into DEL_LINK */ lgr->llc_flow_lcl.type = SMC_LLC_FLOW_DEL_LINK; rc = smc_llc_send_delete_link(lnk_new, lnk_asym->link_id, SMC_LLC_REQ, true, SMC_LLC_DEL_NO_ASYM_NEEDED); if (rc) { smcr_link_down_cond(lnk_new); goto out_free; } qentry = smc_llc_wait(lgr, lnk_new, SMC_LLC_WAIT_TIME, SMC_LLC_DELETE_LINK); if (!qentry) { smcr_link_down_cond(lnk_new); goto out_free; } smc_llc_flow_qentry_del(&lgr->llc_flow_lcl); out_free: smcr_link_clear(lnk_asym, true); } static int smc_llc_srv_rkey_exchange(struct smc_link *link, struct smc_link *link_new) { struct smc_llc_msg_add_link_cont *addc_llc; struct smc_link_group *lgr = link->lgr; u8 max, num_rkeys_send, num_rkeys_recv; struct smc_llc_qentry *qentry = NULL; struct smc_buf_desc *buf_pos; int buf_lst; int rc = 0; int i; down_write(&lgr->rmbs_lock); num_rkeys_send = lgr->conns_num; buf_pos = smc_llc_get_first_rmb(lgr, &buf_lst); do { smc_llc_add_link_cont(link, link_new, &num_rkeys_send, &buf_lst, &buf_pos); qentry = smc_llc_wait(lgr, link, SMC_LLC_WAIT_TIME, SMC_LLC_ADD_LINK_CONT); if (!qentry) { rc = -ETIMEDOUT; goto out; } addc_llc = &qentry->msg.add_link_cont; num_rkeys_recv = addc_llc->num_rkeys; max = min_t(u8, num_rkeys_recv, SMC_LLC_RKEYS_PER_CONT_MSG); for (i = 0; i < max; i++) { smc_rtoken_set(lgr, link->link_idx, link_new->link_idx, addc_llc->rt[i].rmb_key, addc_llc->rt[i].rmb_vaddr_new, addc_llc->rt[i].rmb_key_new); num_rkeys_recv--; } smc_llc_flow_qentry_del(&lgr->llc_flow_lcl); } while (num_rkeys_send || num_rkeys_recv); out: up_write(&lgr->rmbs_lock); return rc; } static int smc_llc_srv_conf_link(struct smc_link *link, struct smc_link *link_new, enum smc_lgr_type lgr_new_t) { struct smc_link_group *lgr = link->lgr; struct smc_llc_qentry *qentry = NULL; int rc; /* send CONFIRM LINK request over the RoCE fabric */ rc = smc_llc_send_confirm_link(link_new, SMC_LLC_REQ); if (rc) return -ENOLINK; /* receive CONFIRM LINK response over the RoCE fabric */ qentry = smc_llc_wait(lgr, link, SMC_LLC_WAIT_FIRST_TIME, 0); if (!qentry || qentry->msg.raw.hdr.common.llc_type != SMC_LLC_CONFIRM_LINK) { /* send DELETE LINK */ smc_llc_send_delete_link(link, link_new->link_id, SMC_LLC_REQ, false, SMC_LLC_DEL_LOST_PATH); if (qentry) smc_llc_flow_qentry_del(&lgr->llc_flow_lcl); return -ENOLINK; } smc_llc_save_peer_uid(qentry); smc_llc_link_active(link_new); if (lgr_new_t == SMC_LGR_ASYMMETRIC_LOCAL || lgr_new_t == SMC_LGR_ASYMMETRIC_PEER) smcr_lgr_set_type_asym(lgr, lgr_new_t, link_new->link_idx); else smcr_lgr_set_type(lgr, lgr_new_t); smc_llc_flow_qentry_del(&lgr->llc_flow_lcl); return 0; } static void smc_llc_send_req_add_link_response(struct smc_llc_qentry *qentry) { qentry->msg.raw.hdr.flags |= SMC_LLC_FLAG_RESP; smc_llc_init_msg_hdr(&qentry->msg.raw.hdr, qentry->link->lgr, sizeof(qentry->msg)); memset(&qentry->msg.raw.data, 0, sizeof(qentry->msg.raw.data)); smc_llc_send_message(qentry->link, &qentry->msg); } int smc_llc_srv_add_link(struct smc_link *link, struct smc_llc_qentry *req_qentry) { enum smc_lgr_type lgr_new_t = SMC_LGR_SYMMETRIC; struct smc_link_group *lgr = link->lgr; struct smc_llc_msg_add_link *add_llc; struct smc_llc_qentry *qentry = NULL; bool send_req_add_link_resp = false; struct smc_link *link_new = NULL; struct smc_init_info *ini = NULL; int lnk_idx, rc = 0; if (req_qentry && req_qentry->msg.raw.hdr.common.llc_type == SMC_LLC_REQ_ADD_LINK) send_req_add_link_resp = true; ini = kzalloc(sizeof(*ini), GFP_KERNEL); if (!ini) { rc = -ENOMEM; goto out; } if (lgr->type == SMC_LGR_SINGLE && lgr->max_links <= 1) { rc = 0; goto out; } /* ignore client add link recommendation, start new flow */ ini->vlan_id = lgr->vlan_id; if (lgr->smc_version == SMC_V2) { ini->check_smcrv2 = true; ini->smcrv2.saddr = lgr->saddr; if (send_req_add_link_resp) { struct smc_llc_msg_req_add_link_v2 *req_add = &req_qentry->msg.req_add_link; ini->smcrv2.daddr = smc_ib_gid_to_ipv4(req_add->gid[0]); } } smc_pnet_find_alt_roce(lgr, ini, link->smcibdev); if (lgr->smc_version == SMC_V2 && !ini->smcrv2.ib_dev_v2) { lgr_new_t = SMC_LGR_ASYMMETRIC_LOCAL; ini->smcrv2.ib_dev_v2 = link->smcibdev; ini->smcrv2.ib_port_v2 = link->ibport; } else if (lgr->smc_version < SMC_V2 && !ini->ib_dev) { lgr_new_t = SMC_LGR_ASYMMETRIC_LOCAL; ini->ib_dev = link->smcibdev; ini->ib_port = link->ibport; } lnk_idx = smc_llc_alloc_alt_link(lgr, lgr_new_t); if (lnk_idx < 0) { rc = 0; goto out; } rc = smcr_link_init(lgr, &lgr->lnk[lnk_idx], lnk_idx, ini); if (rc) goto out; link_new = &lgr->lnk[lnk_idx]; rc = smcr_buf_map_lgr(link_new); if (rc) goto out_err; rc = smc_llc_send_add_link(link, link_new->smcibdev->mac[link_new->ibport-1], link_new->gid, link_new, SMC_LLC_REQ); if (rc) goto out_err; send_req_add_link_resp = false; /* receive ADD LINK response over the RoCE fabric */ qentry = smc_llc_wait(lgr, link, SMC_LLC_WAIT_TIME, SMC_LLC_ADD_LINK); if (!qentry) { rc = -ETIMEDOUT; goto out_err; } add_llc = &qentry->msg.add_link; if (add_llc->hd.flags & SMC_LLC_FLAG_ADD_LNK_REJ) { smc_llc_flow_qentry_del(&lgr->llc_flow_lcl); rc = -ENOLINK; goto out_err; } if (lgr->type == SMC_LGR_SINGLE && (!memcmp(add_llc->sender_gid, link->peer_gid, SMC_GID_SIZE) && (lgr->smc_version == SMC_V2 || !memcmp(add_llc->sender_mac, link->peer_mac, ETH_ALEN)))) { lgr_new_t = SMC_LGR_ASYMMETRIC_PEER; } smc_llc_save_add_link_info(link_new, add_llc); smc_llc_flow_qentry_del(&lgr->llc_flow_lcl); rc = smc_ib_ready_link(link_new); if (rc) goto out_err; rc = smcr_buf_reg_lgr(link_new); if (rc) goto out_err; if (lgr->smc_version == SMC_V2) { smc_llc_save_add_link_rkeys(link, link_new); } else { rc = smc_llc_srv_rkey_exchange(link, link_new); if (rc) goto out_err; } rc = smc_llc_srv_conf_link(link, link_new, lgr_new_t); if (rc) goto out_err; kfree(ini); return 0; out_err: if (link_new) { link_new->state = SMC_LNK_INACTIVE; smcr_link_clear(link_new, false); } out: kfree(ini); if (send_req_add_link_resp) smc_llc_send_req_add_link_response(req_qentry); return rc; } static void smc_llc_process_srv_add_link(struct smc_link_group *lgr) { struct smc_link *link = lgr->llc_flow_lcl.qentry->link; struct smc_llc_qentry *qentry; int rc; qentry = smc_llc_flow_qentry_clr(&lgr->llc_flow_lcl); down_write(&lgr->llc_conf_mutex); rc = smc_llc_srv_add_link(link, qentry); if (!rc && lgr->type == SMC_LGR_SYMMETRIC) { /* delete any asymmetric link */ smc_llc_delete_asym_link(lgr); } up_write(&lgr->llc_conf_mutex); kfree(qentry); } /* enqueue a local add_link req to trigger a new add_link flow */ void smc_llc_add_link_local(struct smc_link *link) { struct smc_llc_msg_add_link add_llc = {}; add_llc.hd.common.llc_type = SMC_LLC_ADD_LINK; smc_llc_init_msg_hdr(&add_llc.hd, link->lgr, sizeof(add_llc)); /* no dev and port needed */ smc_llc_enqueue(link, (union smc_llc_msg *)&add_llc); } /* worker to process an add link message */ static void smc_llc_add_link_work(struct work_struct *work) { struct smc_link_group *lgr = container_of(work, struct smc_link_group, llc_add_link_work); if (list_empty(&lgr->list)) { /* link group is terminating */ smc_llc_flow_qentry_del(&lgr->llc_flow_lcl); goto out; } if (lgr->role == SMC_CLNT) smc_llc_process_cli_add_link(lgr); else smc_llc_process_srv_add_link(lgr); out: if (lgr->llc_flow_lcl.type != SMC_LLC_FLOW_REQ_ADD_LINK) smc_llc_flow_stop(lgr, &lgr->llc_flow_lcl); } /* enqueue a local del_link msg to trigger a new del_link flow, * called only for role SMC_SERV */ void smc_llc_srv_delete_link_local(struct smc_link *link, u8 del_link_id) { struct smc_llc_msg_del_link del_llc = {}; del_llc.hd.common.llc_type = SMC_LLC_DELETE_LINK; smc_llc_init_msg_hdr(&del_llc.hd, link->lgr, sizeof(del_llc)); del_llc.link_num = del_link_id; del_llc.reason = htonl(SMC_LLC_DEL_LOST_PATH); del_llc.hd.flags |= SMC_LLC_FLAG_DEL_LINK_ORDERLY; smc_llc_enqueue(link, (union smc_llc_msg *)&del_llc); } static void smc_llc_process_cli_delete_link(struct smc_link_group *lgr) { struct smc_link *lnk_del = NULL, *lnk_asym, *lnk; struct smc_llc_msg_del_link *del_llc; struct smc_llc_qentry *qentry; int active_links; int lnk_idx; qentry = smc_llc_flow_qentry_clr(&lgr->llc_flow_lcl); lnk = qentry->link; del_llc = &qentry->msg.delete_link; if (del_llc->hd.flags & SMC_LLC_FLAG_DEL_LINK_ALL) { smc_lgr_terminate_sched(lgr); goto out; } down_write(&lgr->llc_conf_mutex); /* delete single link */ for (lnk_idx = 0; lnk_idx < SMC_LINKS_PER_LGR_MAX; lnk_idx++) { if (lgr->lnk[lnk_idx].link_id != del_llc->link_num) continue; lnk_del = &lgr->lnk[lnk_idx]; break; } del_llc->hd.flags |= SMC_LLC_FLAG_RESP; if (!lnk_del) { /* link was not found */ del_llc->reason = htonl(SMC_LLC_DEL_NOLNK); smc_llc_send_message(lnk, &qentry->msg); goto out_unlock; } lnk_asym = smc_llc_find_asym_link(lgr); del_llc->reason = 0; smc_llc_send_message(lnk, &qentry->msg); /* response */ if (smc_link_downing(&lnk_del->state)) smc_switch_conns(lgr, lnk_del, false); smcr_link_clear(lnk_del, true); active_links = smc_llc_active_link_count(lgr); if (lnk_del == lnk_asym) { /* expected deletion of asym link, don't change lgr state */ } else if (active_links == 1) { smcr_lgr_set_type(lgr, SMC_LGR_SINGLE); } else if (!active_links) { smcr_lgr_set_type(lgr, SMC_LGR_NONE); smc_lgr_terminate_sched(lgr); } out_unlock: up_write(&lgr->llc_conf_mutex); out: kfree(qentry); } /* try to send a DELETE LINK ALL request on any active link, * waiting for send completion */ void smc_llc_send_link_delete_all(struct smc_link_group *lgr, bool ord, u32 rsn) { struct smc_llc_msg_del_link delllc = {}; int i; delllc.hd.common.llc_type = SMC_LLC_DELETE_LINK; smc_llc_init_msg_hdr(&delllc.hd, lgr, sizeof(delllc)); if (ord) delllc.hd.flags |= SMC_LLC_FLAG_DEL_LINK_ORDERLY; delllc.hd.flags |= SMC_LLC_FLAG_DEL_LINK_ALL; delllc.reason = htonl(rsn); for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) { if (!smc_link_sendable(&lgr->lnk[i])) continue; if (!smc_llc_send_message_wait(&lgr->lnk[i], &delllc)) break; } } static void smc_llc_process_srv_delete_link(struct smc_link_group *lgr) { struct smc_llc_msg_del_link *del_llc; struct smc_link *lnk, *lnk_del; struct smc_llc_qentry *qentry; int active_links; int i; down_write(&lgr->llc_conf_mutex); qentry = smc_llc_flow_qentry_clr(&lgr->llc_flow_lcl); lnk = qentry->link; del_llc = &qentry->msg.delete_link; if (qentry->msg.delete_link.hd.flags & SMC_LLC_FLAG_DEL_LINK_ALL) { /* delete entire lgr */ smc_llc_send_link_delete_all(lgr, true, ntohl( qentry->msg.delete_link.reason)); smc_lgr_terminate_sched(lgr); goto out; } /* delete single link */ lnk_del = NULL; for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) { if (lgr->lnk[i].link_id == del_llc->link_num) { lnk_del = &lgr->lnk[i]; break; } } if (!lnk_del) goto out; /* asymmetric link already deleted */ if (smc_link_downing(&lnk_del->state)) { if (smc_switch_conns(lgr, lnk_del, false)) smc_wr_tx_wait_no_pending_sends(lnk_del); } if (!list_empty(&lgr->list)) { /* qentry is either a request from peer (send it back to * initiate the DELETE_LINK processing), or a locally * enqueued DELETE_LINK request (forward it) */ if (!smc_llc_send_message(lnk, &qentry->msg)) { struct smc_llc_qentry *qentry2; qentry2 = smc_llc_wait(lgr, lnk, SMC_LLC_WAIT_TIME, SMC_LLC_DELETE_LINK); if (qentry2) smc_llc_flow_qentry_del(&lgr->llc_flow_lcl); } } smcr_link_clear(lnk_del, true); active_links = smc_llc_active_link_count(lgr); if (active_links == 1) { smcr_lgr_set_type(lgr, SMC_LGR_SINGLE); } else if (!active_links) { smcr_lgr_set_type(lgr, SMC_LGR_NONE); smc_lgr_terminate_sched(lgr); } if (lgr->type == SMC_LGR_SINGLE && !list_empty(&lgr->list)) { /* trigger setup of asymm alt link */ smc_llc_add_link_local(lnk); } out: up_write(&lgr->llc_conf_mutex); kfree(qentry); } static void smc_llc_delete_link_work(struct work_struct *work) { struct smc_link_group *lgr = container_of(work, struct smc_link_group, llc_del_link_work); if (list_empty(&lgr->list)) { /* link group is terminating */ smc_llc_flow_qentry_del(&lgr->llc_flow_lcl); goto out; } if (lgr->role == SMC_CLNT) smc_llc_process_cli_delete_link(lgr); else smc_llc_process_srv_delete_link(lgr); out: smc_llc_flow_stop(lgr, &lgr->llc_flow_lcl); } /* process a confirm_rkey request from peer, remote flow */ static void smc_llc_rmt_conf_rkey(struct smc_link_group *lgr) { struct smc_llc_msg_confirm_rkey *llc; struct smc_llc_qentry *qentry; struct smc_link *link; int num_entries; int rk_idx; int i; qentry = lgr->llc_flow_rmt.qentry; llc = &qentry->msg.confirm_rkey; link = qentry->link; num_entries = llc->rtoken[0].num_rkeys; if (num_entries > SMC_LLC_RKEYS_PER_MSG) goto out_err; /* first rkey entry is for receiving link */ rk_idx = smc_rtoken_add(link, llc->rtoken[0].rmb_vaddr, llc->rtoken[0].rmb_key); if (rk_idx < 0) goto out_err; for (i = 1; i <= min_t(u8, num_entries, SMC_LLC_RKEYS_PER_MSG - 1); i++) smc_rtoken_set2(lgr, rk_idx, llc->rtoken[i].link_id, llc->rtoken[i].rmb_vaddr, llc->rtoken[i].rmb_key); /* max links is 3 so there is no need to support conf_rkey_cont msgs */ goto out; out_err: llc->hd.flags |= SMC_LLC_FLAG_RKEY_NEG; llc->hd.flags |= SMC_LLC_FLAG_RKEY_RETRY; out: llc->hd.flags |= SMC_LLC_FLAG_RESP; smc_llc_init_msg_hdr(&llc->hd, link->lgr, sizeof(*llc)); smc_llc_send_message(link, &qentry->msg); smc_llc_flow_qentry_del(&lgr->llc_flow_rmt); } /* process a delete_rkey request from peer, remote flow */ static void smc_llc_rmt_delete_rkey(struct smc_link_group *lgr) { struct smc_llc_msg_delete_rkey *llc; struct smc_llc_qentry *qentry; struct smc_link *link; u8 err_mask = 0; int i, max; qentry = lgr->llc_flow_rmt.qentry; llc = &qentry->msg.delete_rkey; link = qentry->link; if (lgr->smc_version == SMC_V2) { struct smc_llc_msg_delete_rkey_v2 *llcv2; memcpy(lgr->wr_rx_buf_v2, llc, sizeof(*llc)); llcv2 = (struct smc_llc_msg_delete_rkey_v2 *)lgr->wr_rx_buf_v2; llcv2->num_inval_rkeys = 0; max = min_t(u8, llcv2->num_rkeys, SMC_LLC_RKEYS_PER_MSG_V2); for (i = 0; i < max; i++) { if (smc_rtoken_delete(link, llcv2->rkey[i])) llcv2->num_inval_rkeys++; } memset(&llc->rkey[0], 0, sizeof(llc->rkey)); memset(&llc->reserved2, 0, sizeof(llc->reserved2)); smc_llc_init_msg_hdr(&llc->hd, link->lgr, sizeof(*llc)); if (llcv2->num_inval_rkeys) { llc->hd.flags |= SMC_LLC_FLAG_RKEY_NEG; llc->err_mask = llcv2->num_inval_rkeys; } goto finish; } max = min_t(u8, llc->num_rkeys, SMC_LLC_DEL_RKEY_MAX); for (i = 0; i < max; i++) { if (smc_rtoken_delete(link, llc->rkey[i])) err_mask |= 1 << (SMC_LLC_DEL_RKEY_MAX - 1 - i); } if (err_mask) { llc->hd.flags |= SMC_LLC_FLAG_RKEY_NEG; llc->err_mask = err_mask; } finish: llc->hd.flags |= SMC_LLC_FLAG_RESP; smc_llc_send_message(link, &qentry->msg); smc_llc_flow_qentry_del(&lgr->llc_flow_rmt); } static void smc_llc_protocol_violation(struct smc_link_group *lgr, u8 type) { pr_warn_ratelimited("smc: SMC-R lg %*phN net %llu LLC protocol violation: " "llc_type %d\n", SMC_LGR_ID_SIZE, &lgr->id, lgr->net->net_cookie, type); smc_llc_set_termination_rsn(lgr, SMC_LLC_DEL_PROT_VIOL); smc_lgr_terminate_sched(lgr); } /* flush the llc event queue */ static void smc_llc_event_flush(struct smc_link_group *lgr) { struct smc_llc_qentry *qentry, *q; spin_lock_bh(&lgr->llc_event_q_lock); list_for_each_entry_safe(qentry, q, &lgr->llc_event_q, list) { list_del_init(&qentry->list); kfree(qentry); } spin_unlock_bh(&lgr->llc_event_q_lock); } static void smc_llc_event_handler(struct smc_llc_qentry *qentry) { union smc_llc_msg *llc = &qentry->msg; struct smc_link *link = qentry->link; struct smc_link_group *lgr = link->lgr; if (!smc_link_usable(link)) goto out; switch (llc->raw.hdr.common.llc_type) { case SMC_LLC_TEST_LINK: llc->test_link.hd.flags |= SMC_LLC_FLAG_RESP; smc_llc_send_message(link, llc); break; case SMC_LLC_ADD_LINK: if (list_empty(&lgr->list)) goto out; /* lgr is terminating */ if (lgr->role == SMC_CLNT) { if (smc_llc_is_local_add_link(llc)) { if (lgr->llc_flow_lcl.type == SMC_LLC_FLOW_ADD_LINK) break; /* add_link in progress */ if (smc_llc_flow_start(&lgr->llc_flow_lcl, qentry)) { schedule_work(&lgr->llc_add_link_work); } return; } if (lgr->llc_flow_lcl.type == SMC_LLC_FLOW_ADD_LINK && !lgr->llc_flow_lcl.qentry) { /* a flow is waiting for this message */ smc_llc_flow_qentry_set(&lgr->llc_flow_lcl, qentry); wake_up(&lgr->llc_msg_waiter); return; } if (lgr->llc_flow_lcl.type == SMC_LLC_FLOW_REQ_ADD_LINK) { /* server started add_link processing */ lgr->llc_flow_lcl.type = SMC_LLC_FLOW_ADD_LINK; smc_llc_flow_qentry_set(&lgr->llc_flow_lcl, qentry); schedule_work(&lgr->llc_add_link_work); return; } if (smc_llc_flow_start(&lgr->llc_flow_lcl, qentry)) { schedule_work(&lgr->llc_add_link_work); } } else if (smc_llc_flow_start(&lgr->llc_flow_lcl, qentry)) { /* as smc server, handle client suggestion */ schedule_work(&lgr->llc_add_link_work); } return; case SMC_LLC_CONFIRM_LINK: case SMC_LLC_ADD_LINK_CONT: if (lgr->llc_flow_lcl.type != SMC_LLC_FLOW_NONE) { /* a flow is waiting for this message */ smc_llc_flow_qentry_set(&lgr->llc_flow_lcl, qentry); wake_up(&lgr->llc_msg_waiter); return; } break; case SMC_LLC_DELETE_LINK: if (lgr->llc_flow_lcl.type == SMC_LLC_FLOW_ADD_LINK && !lgr->llc_flow_lcl.qentry) { /* DEL LINK REQ during ADD LINK SEQ */ smc_llc_flow_qentry_set(&lgr->llc_flow_lcl, qentry); wake_up(&lgr->llc_msg_waiter); } else if (smc_llc_flow_start(&lgr->llc_flow_lcl, qentry)) { schedule_work(&lgr->llc_del_link_work); } return; case SMC_LLC_CONFIRM_RKEY: /* new request from remote, assign to remote flow */ if (smc_llc_flow_start(&lgr->llc_flow_rmt, qentry)) { /* process here, does not wait for more llc msgs */ smc_llc_rmt_conf_rkey(lgr); smc_llc_flow_stop(lgr, &lgr->llc_flow_rmt); } return; case SMC_LLC_CONFIRM_RKEY_CONT: /* not used because max links is 3, and 3 rkeys fit into * one CONFIRM_RKEY message */ break; case SMC_LLC_DELETE_RKEY: /* new request from remote, assign to remote flow */ if (smc_llc_flow_start(&lgr->llc_flow_rmt, qentry)) { /* process here, does not wait for more llc msgs */ smc_llc_rmt_delete_rkey(lgr); smc_llc_flow_stop(lgr, &lgr->llc_flow_rmt); } return; case SMC_LLC_REQ_ADD_LINK: /* handle response here, smc_llc_flow_stop() cannot be called * in tasklet context */ if (lgr->role == SMC_CLNT && lgr->llc_flow_lcl.type == SMC_LLC_FLOW_REQ_ADD_LINK && (llc->raw.hdr.flags & SMC_LLC_FLAG_RESP)) { smc_llc_flow_stop(link->lgr, &lgr->llc_flow_lcl); } else if (lgr->role == SMC_SERV) { if (smc_llc_flow_start(&lgr->llc_flow_lcl, qentry)) { /* as smc server, handle client suggestion */ lgr->llc_flow_lcl.type = SMC_LLC_FLOW_ADD_LINK; schedule_work(&lgr->llc_add_link_work); } return; } break; default: smc_llc_protocol_violation(lgr, llc->raw.hdr.common.type); break; } out: kfree(qentry); } /* worker to process llc messages on the event queue */ static void smc_llc_event_work(struct work_struct *work) { struct smc_link_group *lgr = container_of(work, struct smc_link_group, llc_event_work); struct smc_llc_qentry *qentry; if (!lgr->llc_flow_lcl.type && lgr->delayed_event) { qentry = lgr->delayed_event; lgr->delayed_event = NULL; if (smc_link_usable(qentry->link)) smc_llc_event_handler(qentry); else kfree(qentry); } again: spin_lock_bh(&lgr->llc_event_q_lock); if (!list_empty(&lgr->llc_event_q)) { qentry = list_first_entry(&lgr->llc_event_q, struct smc_llc_qentry, list); list_del_init(&qentry->list); spin_unlock_bh(&lgr->llc_event_q_lock); smc_llc_event_handler(qentry); goto again; } spin_unlock_bh(&lgr->llc_event_q_lock); } /* process llc responses in tasklet context */ static void smc_llc_rx_response(struct smc_link *link, struct smc_llc_qentry *qentry) { enum smc_llc_flowtype flowtype = link->lgr->llc_flow_lcl.type; struct smc_llc_flow *flow = &link->lgr->llc_flow_lcl; u8 llc_type = qentry->msg.raw.hdr.common.llc_type; switch (llc_type) { case SMC_LLC_TEST_LINK: if (smc_link_active(link)) complete(&link->llc_testlink_resp); break; case SMC_LLC_ADD_LINK: case SMC_LLC_ADD_LINK_CONT: case SMC_LLC_CONFIRM_LINK: if (flowtype != SMC_LLC_FLOW_ADD_LINK || flow->qentry) break; /* drop out-of-flow response */ goto assign; case SMC_LLC_DELETE_LINK: if (flowtype != SMC_LLC_FLOW_DEL_LINK || flow->qentry) break; /* drop out-of-flow response */ goto assign; case SMC_LLC_CONFIRM_RKEY: case SMC_LLC_DELETE_RKEY: if (flowtype != SMC_LLC_FLOW_RKEY || flow->qentry) break; /* drop out-of-flow response */ goto assign; case SMC_LLC_CONFIRM_RKEY_CONT: /* not used because max links is 3 */ break; default: smc_llc_protocol_violation(link->lgr, qentry->msg.raw.hdr.common.type); break; } kfree(qentry); return; assign: /* assign responses to the local flow, we requested them */ smc_llc_flow_qentry_set(&link->lgr->llc_flow_lcl, qentry); wake_up(&link->lgr->llc_msg_waiter); } static void smc_llc_enqueue(struct smc_link *link, union smc_llc_msg *llc) { struct smc_link_group *lgr = link->lgr; struct smc_llc_qentry *qentry; unsigned long flags; qentry = kmalloc(sizeof(*qentry), GFP_ATOMIC); if (!qentry) return; qentry->link = link; INIT_LIST_HEAD(&qentry->list); memcpy(&qentry->msg, llc, sizeof(union smc_llc_msg)); /* process responses immediately */ if ((llc->raw.hdr.flags & SMC_LLC_FLAG_RESP) && llc->raw.hdr.common.llc_type != SMC_LLC_REQ_ADD_LINK) { smc_llc_rx_response(link, qentry); return; } /* add requests to event queue */ spin_lock_irqsave(&lgr->llc_event_q_lock, flags); list_add_tail(&qentry->list, &lgr->llc_event_q); spin_unlock_irqrestore(&lgr->llc_event_q_lock, flags); queue_work(system_highpri_wq, &lgr->llc_event_work); } /* copy received msg and add it to the event queue */ static void smc_llc_rx_handler(struct ib_wc *wc, void *buf) { struct smc_link *link = (struct smc_link *)wc->qp->qp_context; union smc_llc_msg *llc = buf; if (wc->byte_len < sizeof(*llc)) return; /* short message */ if (!llc->raw.hdr.common.llc_version) { if (llc->raw.hdr.length != sizeof(*llc)) return; /* invalid message */ } else { if (llc->raw.hdr.length_v2 < sizeof(*llc)) return; /* invalid message */ } smc_llc_enqueue(link, llc); } /***************************** worker, utils *********************************/ static void smc_llc_testlink_work(struct work_struct *work) { struct smc_link *link = container_of(to_delayed_work(work), struct smc_link, llc_testlink_wrk); unsigned long next_interval; unsigned long expire_time; u8 user_data[16] = { 0 }; int rc; if (!smc_link_active(link)) return; /* don't reschedule worker */ expire_time = link->wr_rx_tstamp + link->llc_testlink_time; if (time_is_after_jiffies(expire_time)) { next_interval = expire_time - jiffies; goto out; } reinit_completion(&link->llc_testlink_resp); smc_llc_send_test_link(link, user_data); /* receive TEST LINK response over RoCE fabric */ rc = wait_for_completion_interruptible_timeout(&link->llc_testlink_resp, SMC_LLC_WAIT_TIME); if (!smc_link_active(link)) return; /* link state changed */ if (rc <= 0) { smcr_link_down_cond_sched(link); return; } next_interval = link->llc_testlink_time; out: schedule_delayed_work(&link->llc_testlink_wrk, next_interval); } void smc_llc_lgr_init(struct smc_link_group *lgr, struct smc_sock *smc) { struct net *net = sock_net(smc->clcsock->sk); INIT_WORK(&lgr->llc_event_work, smc_llc_event_work); INIT_WORK(&lgr->llc_add_link_work, smc_llc_add_link_work); INIT_WORK(&lgr->llc_del_link_work, smc_llc_delete_link_work); INIT_LIST_HEAD(&lgr->llc_event_q); spin_lock_init(&lgr->llc_event_q_lock); spin_lock_init(&lgr->llc_flow_lock); init_waitqueue_head(&lgr->llc_flow_waiter); init_waitqueue_head(&lgr->llc_msg_waiter); init_rwsem(&lgr->llc_conf_mutex); lgr->llc_testlink_time = READ_ONCE(net->smc.sysctl_smcr_testlink_time); } /* called after lgr was removed from lgr_list */ void smc_llc_lgr_clear(struct smc_link_group *lgr) { smc_llc_event_flush(lgr); wake_up_all(&lgr->llc_flow_waiter); wake_up_all(&lgr->llc_msg_waiter); cancel_work_sync(&lgr->llc_event_work); cancel_work_sync(&lgr->llc_add_link_work); cancel_work_sync(&lgr->llc_del_link_work); if (lgr->delayed_event) { kfree(lgr->delayed_event); lgr->delayed_event = NULL; } } int smc_llc_link_init(struct smc_link *link) { init_completion(&link->llc_testlink_resp); INIT_DELAYED_WORK(&link->llc_testlink_wrk, smc_llc_testlink_work); return 0; } void smc_llc_link_active(struct smc_link *link) { pr_warn_ratelimited("smc: SMC-R lg %*phN net %llu link added: id %*phN, " "peerid %*phN, ibdev %s, ibport %d\n", SMC_LGR_ID_SIZE, &link->lgr->id, link->lgr->net->net_cookie, SMC_LGR_ID_SIZE, &link->link_uid, SMC_LGR_ID_SIZE, &link->peer_link_uid, link->smcibdev->ibdev->name, link->ibport); link->state = SMC_LNK_ACTIVE; if (link->lgr->llc_testlink_time) { link->llc_testlink_time = link->lgr->llc_testlink_time; schedule_delayed_work(&link->llc_testlink_wrk, link->llc_testlink_time); } } /* called in worker context */ void smc_llc_link_clear(struct smc_link *link, bool log) { if (log) pr_warn_ratelimited("smc: SMC-R lg %*phN net %llu link removed: id %*phN" ", peerid %*phN, ibdev %s, ibport %d\n", SMC_LGR_ID_SIZE, &link->lgr->id, link->lgr->net->net_cookie, SMC_LGR_ID_SIZE, &link->link_uid, SMC_LGR_ID_SIZE, &link->peer_link_uid, link->smcibdev->ibdev->name, link->ibport); complete(&link->llc_testlink_resp); cancel_delayed_work_sync(&link->llc_testlink_wrk); } /* register a new rtoken at the remote peer (for all links) */ int smc_llc_do_confirm_rkey(struct smc_link *send_link, struct smc_buf_desc *rmb_desc) { struct smc_link_group *lgr = send_link->lgr; struct smc_llc_qentry *qentry = NULL; int rc = 0; rc = smc_llc_send_confirm_rkey(send_link, rmb_desc); if (rc) goto out; /* receive CONFIRM RKEY response from server over RoCE fabric */ qentry = smc_llc_wait(lgr, send_link, SMC_LLC_WAIT_TIME, SMC_LLC_CONFIRM_RKEY); if (!qentry || (qentry->msg.raw.hdr.flags & SMC_LLC_FLAG_RKEY_NEG)) rc = -EFAULT; out: if (qentry) smc_llc_flow_qentry_del(&lgr->llc_flow_lcl); return rc; } /* unregister an rtoken at the remote peer */ int smc_llc_do_delete_rkey(struct smc_link_group *lgr, struct smc_buf_desc *rmb_desc) { struct smc_llc_qentry *qentry = NULL; struct smc_link *send_link; int rc = 0; send_link = smc_llc_usable_link(lgr); if (!send_link) return -ENOLINK; /* protected by llc_flow control */ rc = smc_llc_send_delete_rkey(send_link, rmb_desc); if (rc) goto out; /* receive DELETE RKEY response from server over RoCE fabric */ qentry = smc_llc_wait(lgr, send_link, SMC_LLC_WAIT_TIME, SMC_LLC_DELETE_RKEY); if (!qentry || (qentry->msg.raw.hdr.flags & SMC_LLC_FLAG_RKEY_NEG)) rc = -EFAULT; out: if (qentry) smc_llc_flow_qentry_del(&lgr->llc_flow_lcl); return rc; } void smc_llc_link_set_uid(struct smc_link *link) { __be32 link_uid; link_uid = htonl(*((u32 *)link->lgr->id) + link->link_id); memcpy(link->link_uid, &link_uid, SMC_LGR_ID_SIZE); } /* save peers link user id, used for debug purposes */ void smc_llc_save_peer_uid(struct smc_llc_qentry *qentry) { memcpy(qentry->link->peer_link_uid, qentry->msg.confirm_link.link_uid, SMC_LGR_ID_SIZE); } /* evaluate confirm link request or response */ int smc_llc_eval_conf_link(struct smc_llc_qentry *qentry, enum smc_llc_reqresp type) { if (type == SMC_LLC_REQ) { /* SMC server assigns link_id */ qentry->link->link_id = qentry->msg.confirm_link.link_num; smc_llc_link_set_uid(qentry->link); } if (!(qentry->msg.raw.hdr.flags & SMC_LLC_FLAG_NO_RMBE_EYEC)) return -ENOTSUPP; return 0; } /***************************** init, exit, misc ******************************/ static struct smc_wr_rx_handler smc_llc_rx_handlers[] = { { .handler = smc_llc_rx_handler, .type = SMC_LLC_CONFIRM_LINK }, { .handler = smc_llc_rx_handler, .type = SMC_LLC_TEST_LINK }, { .handler = smc_llc_rx_handler, .type = SMC_LLC_ADD_LINK }, { .handler = smc_llc_rx_handler, .type = SMC_LLC_ADD_LINK_CONT }, { .handler = smc_llc_rx_handler, .type = SMC_LLC_DELETE_LINK }, { .handler = smc_llc_rx_handler, .type = SMC_LLC_CONFIRM_RKEY }, { .handler = smc_llc_rx_handler, .type = SMC_LLC_CONFIRM_RKEY_CONT }, { .handler = smc_llc_rx_handler, .type = SMC_LLC_DELETE_RKEY }, /* V2 types */ { .handler = smc_llc_rx_handler, .type = SMC_LLC_CONFIRM_LINK_V2 }, { .handler = smc_llc_rx_handler, .type = SMC_LLC_TEST_LINK_V2 }, { .handler = smc_llc_rx_handler, .type = SMC_LLC_ADD_LINK_V2 }, { .handler = smc_llc_rx_handler, .type = SMC_LLC_DELETE_LINK_V2 }, { .handler = smc_llc_rx_handler, .type = SMC_LLC_REQ_ADD_LINK_V2 }, { .handler = smc_llc_rx_handler, .type = SMC_LLC_CONFIRM_RKEY_V2 }, { .handler = smc_llc_rx_handler, .type = SMC_LLC_DELETE_RKEY_V2 }, { .handler = NULL, } }; int __init smc_llc_init(void) { struct smc_wr_rx_handler *handler; int rc = 0; for (handler = smc_llc_rx_handlers; handler->handler; handler++) { INIT_HLIST_NODE(&handler->list); rc = smc_wr_rx_register_handler(handler); if (rc) break; } return rc; }
linux-master
net/smc/smc_llc.c
// SPDX-License-Identifier: GPL-2.0 /* * Shared Memory Communications over RDMA (SMC-R) and RoCE * * IB infrastructure: * Establish SMC-R as an Infiniband Client to be notified about added and * removed IB devices of type RDMA. * Determine device and port characteristics for these IB devices. * * Copyright IBM Corp. 2016 * * Author(s): Ursula Braun <[email protected]> */ #include <linux/etherdevice.h> #include <linux/if_vlan.h> #include <linux/random.h> #include <linux/workqueue.h> #include <linux/scatterlist.h> #include <linux/wait.h> #include <linux/mutex.h> #include <linux/inetdevice.h> #include <rdma/ib_verbs.h> #include <rdma/ib_cache.h> #include "smc_pnet.h" #include "smc_ib.h" #include "smc_core.h" #include "smc_wr.h" #include "smc.h" #include "smc_netlink.h" #define SMC_MAX_CQE 32766 /* max. # of completion queue elements */ #define SMC_QP_MIN_RNR_TIMER 5 #define SMC_QP_TIMEOUT 15 /* 4096 * 2 ** timeout usec */ #define SMC_QP_RETRY_CNT 7 /* 7: infinite */ #define SMC_QP_RNR_RETRY 7 /* 7: infinite */ struct smc_ib_devices smc_ib_devices = { /* smc-registered ib devices */ .mutex = __MUTEX_INITIALIZER(smc_ib_devices.mutex), .list = LIST_HEAD_INIT(smc_ib_devices.list), }; u8 local_systemid[SMC_SYSTEMID_LEN]; /* unique system identifier */ static int smc_ib_modify_qp_init(struct smc_link *lnk) { struct ib_qp_attr qp_attr; memset(&qp_attr, 0, sizeof(qp_attr)); qp_attr.qp_state = IB_QPS_INIT; qp_attr.pkey_index = 0; qp_attr.port_num = lnk->ibport; qp_attr.qp_access_flags = IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE; return ib_modify_qp(lnk->roce_qp, &qp_attr, IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_ACCESS_FLAGS | IB_QP_PORT); } static int smc_ib_modify_qp_rtr(struct smc_link *lnk) { enum ib_qp_attr_mask qp_attr_mask = IB_QP_STATE | IB_QP_AV | IB_QP_PATH_MTU | IB_QP_DEST_QPN | IB_QP_RQ_PSN | IB_QP_MAX_DEST_RD_ATOMIC | IB_QP_MIN_RNR_TIMER; struct ib_qp_attr qp_attr; u8 hop_lim = 1; memset(&qp_attr, 0, sizeof(qp_attr)); qp_attr.qp_state = IB_QPS_RTR; qp_attr.path_mtu = min(lnk->path_mtu, lnk->peer_mtu); qp_attr.ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE; rdma_ah_set_port_num(&qp_attr.ah_attr, lnk->ibport); if (lnk->lgr->smc_version == SMC_V2 && lnk->lgr->uses_gateway) hop_lim = IPV6_DEFAULT_HOPLIMIT; rdma_ah_set_grh(&qp_attr.ah_attr, NULL, 0, lnk->sgid_index, hop_lim, 0); rdma_ah_set_dgid_raw(&qp_attr.ah_attr, lnk->peer_gid); if (lnk->lgr->smc_version == SMC_V2 && lnk->lgr->uses_gateway) memcpy(&qp_attr.ah_attr.roce.dmac, lnk->lgr->nexthop_mac, sizeof(lnk->lgr->nexthop_mac)); else memcpy(&qp_attr.ah_attr.roce.dmac, lnk->peer_mac, sizeof(lnk->peer_mac)); qp_attr.dest_qp_num = lnk->peer_qpn; qp_attr.rq_psn = lnk->peer_psn; /* starting receive packet seq # */ qp_attr.max_dest_rd_atomic = 1; /* max # of resources for incoming * requests */ qp_attr.min_rnr_timer = SMC_QP_MIN_RNR_TIMER; return ib_modify_qp(lnk->roce_qp, &qp_attr, qp_attr_mask); } int smc_ib_modify_qp_rts(struct smc_link *lnk) { struct ib_qp_attr qp_attr; memset(&qp_attr, 0, sizeof(qp_attr)); qp_attr.qp_state = IB_QPS_RTS; qp_attr.timeout = SMC_QP_TIMEOUT; /* local ack timeout */ qp_attr.retry_cnt = SMC_QP_RETRY_CNT; /* retry count */ qp_attr.rnr_retry = SMC_QP_RNR_RETRY; /* RNR retries, 7=infinite */ qp_attr.sq_psn = lnk->psn_initial; /* starting send packet seq # */ qp_attr.max_rd_atomic = 1; /* # of outstanding RDMA reads and * atomic ops allowed */ return ib_modify_qp(lnk->roce_qp, &qp_attr, IB_QP_STATE | IB_QP_TIMEOUT | IB_QP_RETRY_CNT | IB_QP_SQ_PSN | IB_QP_RNR_RETRY | IB_QP_MAX_QP_RD_ATOMIC); } int smc_ib_modify_qp_error(struct smc_link *lnk) { struct ib_qp_attr qp_attr; memset(&qp_attr, 0, sizeof(qp_attr)); qp_attr.qp_state = IB_QPS_ERR; return ib_modify_qp(lnk->roce_qp, &qp_attr, IB_QP_STATE); } int smc_ib_ready_link(struct smc_link *lnk) { struct smc_link_group *lgr = smc_get_lgr(lnk); int rc = 0; rc = smc_ib_modify_qp_init(lnk); if (rc) goto out; rc = smc_ib_modify_qp_rtr(lnk); if (rc) goto out; smc_wr_remember_qp_attr(lnk); rc = ib_req_notify_cq(lnk->smcibdev->roce_cq_recv, IB_CQ_SOLICITED_MASK); if (rc) goto out; rc = smc_wr_rx_post_init(lnk); if (rc) goto out; smc_wr_remember_qp_attr(lnk); if (lgr->role == SMC_SERV) { rc = smc_ib_modify_qp_rts(lnk); if (rc) goto out; smc_wr_remember_qp_attr(lnk); } out: return rc; } static int smc_ib_fill_mac(struct smc_ib_device *smcibdev, u8 ibport) { const struct ib_gid_attr *attr; int rc; attr = rdma_get_gid_attr(smcibdev->ibdev, ibport, 0); if (IS_ERR(attr)) return -ENODEV; rc = rdma_read_gid_l2_fields(attr, NULL, smcibdev->mac[ibport - 1]); rdma_put_gid_attr(attr); return rc; } /* Create an identifier unique for this instance of SMC-R. * The MAC-address of the first active registered IB device * plus a random 2-byte number is used to create this identifier. * This name is delivered to the peer during connection initialization. */ static inline void smc_ib_define_local_systemid(struct smc_ib_device *smcibdev, u8 ibport) { memcpy(&local_systemid[2], &smcibdev->mac[ibport - 1], sizeof(smcibdev->mac[ibport - 1])); } bool smc_ib_is_valid_local_systemid(void) { return !is_zero_ether_addr(&local_systemid[2]); } static void smc_ib_init_local_systemid(void) { get_random_bytes(&local_systemid[0], 2); } bool smc_ib_port_active(struct smc_ib_device *smcibdev, u8 ibport) { return smcibdev->pattr[ibport - 1].state == IB_PORT_ACTIVE; } int smc_ib_find_route(__be32 saddr, __be32 daddr, u8 nexthop_mac[], u8 *uses_gateway) { struct neighbour *neigh = NULL; struct rtable *rt = NULL; struct flowi4 fl4 = { .saddr = saddr, .daddr = daddr }; if (daddr == cpu_to_be32(INADDR_NONE)) goto out; rt = ip_route_output_flow(&init_net, &fl4, NULL); if (IS_ERR(rt)) goto out; if (rt->rt_uses_gateway && rt->rt_gw_family != AF_INET) goto out; neigh = rt->dst.ops->neigh_lookup(&rt->dst, NULL, &fl4.daddr); if (neigh) { memcpy(nexthop_mac, neigh->ha, ETH_ALEN); *uses_gateway = rt->rt_uses_gateway; return 0; } out: return -ENOENT; } static int smc_ib_determine_gid_rcu(const struct net_device *ndev, const struct ib_gid_attr *attr, u8 gid[], u8 *sgid_index, struct smc_init_info_smcrv2 *smcrv2) { if (!smcrv2 && attr->gid_type == IB_GID_TYPE_ROCE) { if (gid) memcpy(gid, &attr->gid, SMC_GID_SIZE); if (sgid_index) *sgid_index = attr->index; return 0; } if (smcrv2 && attr->gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP && smc_ib_gid_to_ipv4((u8 *)&attr->gid) != cpu_to_be32(INADDR_NONE)) { struct in_device *in_dev = __in_dev_get_rcu(ndev); const struct in_ifaddr *ifa; bool subnet_match = false; if (!in_dev) goto out; in_dev_for_each_ifa_rcu(ifa, in_dev) { if (!inet_ifa_match(smcrv2->saddr, ifa)) continue; subnet_match = true; break; } if (!subnet_match) goto out; if (smcrv2->daddr && smc_ib_find_route(smcrv2->saddr, smcrv2->daddr, smcrv2->nexthop_mac, &smcrv2->uses_gateway)) goto out; if (gid) memcpy(gid, &attr->gid, SMC_GID_SIZE); if (sgid_index) *sgid_index = attr->index; return 0; } out: return -ENODEV; } /* determine the gid for an ib-device port and vlan id */ int smc_ib_determine_gid(struct smc_ib_device *smcibdev, u8 ibport, unsigned short vlan_id, u8 gid[], u8 *sgid_index, struct smc_init_info_smcrv2 *smcrv2) { const struct ib_gid_attr *attr; const struct net_device *ndev; int i; for (i = 0; i < smcibdev->pattr[ibport - 1].gid_tbl_len; i++) { attr = rdma_get_gid_attr(smcibdev->ibdev, ibport, i); if (IS_ERR(attr)) continue; rcu_read_lock(); ndev = rdma_read_gid_attr_ndev_rcu(attr); if (!IS_ERR(ndev) && ((!vlan_id && !is_vlan_dev(ndev)) || (vlan_id && is_vlan_dev(ndev) && vlan_dev_vlan_id(ndev) == vlan_id))) { if (!smc_ib_determine_gid_rcu(ndev, attr, gid, sgid_index, smcrv2)) { rcu_read_unlock(); rdma_put_gid_attr(attr); return 0; } } rcu_read_unlock(); rdma_put_gid_attr(attr); } return -ENODEV; } /* check if gid is still defined on smcibdev */ static bool smc_ib_check_link_gid(u8 gid[SMC_GID_SIZE], bool smcrv2, struct smc_ib_device *smcibdev, u8 ibport) { const struct ib_gid_attr *attr; bool rc = false; int i; for (i = 0; !rc && i < smcibdev->pattr[ibport - 1].gid_tbl_len; i++) { attr = rdma_get_gid_attr(smcibdev->ibdev, ibport, i); if (IS_ERR(attr)) continue; rcu_read_lock(); if ((!smcrv2 && attr->gid_type == IB_GID_TYPE_ROCE) || (smcrv2 && attr->gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP && !(ipv6_addr_type((const struct in6_addr *)&attr->gid) & IPV6_ADDR_LINKLOCAL))) if (!memcmp(gid, &attr->gid, SMC_GID_SIZE)) rc = true; rcu_read_unlock(); rdma_put_gid_attr(attr); } return rc; } /* check all links if the gid is still defined on smcibdev */ static void smc_ib_gid_check(struct smc_ib_device *smcibdev, u8 ibport) { struct smc_link_group *lgr; int i; spin_lock_bh(&smc_lgr_list.lock); list_for_each_entry(lgr, &smc_lgr_list.list, list) { if (strncmp(smcibdev->pnetid[ibport - 1], lgr->pnet_id, SMC_MAX_PNETID_LEN)) continue; /* lgr is not affected */ if (list_empty(&lgr->list)) continue; for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) { if (lgr->lnk[i].state == SMC_LNK_UNUSED || lgr->lnk[i].smcibdev != smcibdev) continue; if (!smc_ib_check_link_gid(lgr->lnk[i].gid, lgr->smc_version == SMC_V2, smcibdev, ibport)) smcr_port_err(smcibdev, ibport); } } spin_unlock_bh(&smc_lgr_list.lock); } static int smc_ib_remember_port_attr(struct smc_ib_device *smcibdev, u8 ibport) { int rc; memset(&smcibdev->pattr[ibport - 1], 0, sizeof(smcibdev->pattr[ibport - 1])); rc = ib_query_port(smcibdev->ibdev, ibport, &smcibdev->pattr[ibport - 1]); if (rc) goto out; /* the SMC protocol requires specification of the RoCE MAC address */ rc = smc_ib_fill_mac(smcibdev, ibport); if (rc) goto out; if (!smc_ib_is_valid_local_systemid() && smc_ib_port_active(smcibdev, ibport)) /* create unique system identifier */ smc_ib_define_local_systemid(smcibdev, ibport); out: return rc; } /* process context wrapper for might_sleep smc_ib_remember_port_attr */ static void smc_ib_port_event_work(struct work_struct *work) { struct smc_ib_device *smcibdev = container_of( work, struct smc_ib_device, port_event_work); u8 port_idx; for_each_set_bit(port_idx, &smcibdev->port_event_mask, SMC_MAX_PORTS) { smc_ib_remember_port_attr(smcibdev, port_idx + 1); clear_bit(port_idx, &smcibdev->port_event_mask); if (!smc_ib_port_active(smcibdev, port_idx + 1)) { set_bit(port_idx, smcibdev->ports_going_away); smcr_port_err(smcibdev, port_idx + 1); } else { clear_bit(port_idx, smcibdev->ports_going_away); smcr_port_add(smcibdev, port_idx + 1); smc_ib_gid_check(smcibdev, port_idx + 1); } } } /* can be called in IRQ context */ static void smc_ib_global_event_handler(struct ib_event_handler *handler, struct ib_event *ibevent) { struct smc_ib_device *smcibdev; bool schedule = false; u8 port_idx; smcibdev = container_of(handler, struct smc_ib_device, event_handler); switch (ibevent->event) { case IB_EVENT_DEVICE_FATAL: /* terminate all ports on device */ for (port_idx = 0; port_idx < SMC_MAX_PORTS; port_idx++) { set_bit(port_idx, &smcibdev->port_event_mask); if (!test_and_set_bit(port_idx, smcibdev->ports_going_away)) schedule = true; } if (schedule) schedule_work(&smcibdev->port_event_work); break; case IB_EVENT_PORT_ACTIVE: port_idx = ibevent->element.port_num - 1; if (port_idx >= SMC_MAX_PORTS) break; set_bit(port_idx, &smcibdev->port_event_mask); if (test_and_clear_bit(port_idx, smcibdev->ports_going_away)) schedule_work(&smcibdev->port_event_work); break; case IB_EVENT_PORT_ERR: port_idx = ibevent->element.port_num - 1; if (port_idx >= SMC_MAX_PORTS) break; set_bit(port_idx, &smcibdev->port_event_mask); if (!test_and_set_bit(port_idx, smcibdev->ports_going_away)) schedule_work(&smcibdev->port_event_work); break; case IB_EVENT_GID_CHANGE: port_idx = ibevent->element.port_num - 1; if (port_idx >= SMC_MAX_PORTS) break; set_bit(port_idx, &smcibdev->port_event_mask); schedule_work(&smcibdev->port_event_work); break; default: break; } } void smc_ib_dealloc_protection_domain(struct smc_link *lnk) { if (lnk->roce_pd) ib_dealloc_pd(lnk->roce_pd); lnk->roce_pd = NULL; } int smc_ib_create_protection_domain(struct smc_link *lnk) { int rc; lnk->roce_pd = ib_alloc_pd(lnk->smcibdev->ibdev, 0); rc = PTR_ERR_OR_ZERO(lnk->roce_pd); if (IS_ERR(lnk->roce_pd)) lnk->roce_pd = NULL; return rc; } static bool smcr_diag_is_dev_critical(struct smc_lgr_list *smc_lgr, struct smc_ib_device *smcibdev) { struct smc_link_group *lgr; bool rc = false; int i; spin_lock_bh(&smc_lgr->lock); list_for_each_entry(lgr, &smc_lgr->list, list) { if (lgr->is_smcd) continue; for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) { if (lgr->lnk[i].state == SMC_LNK_UNUSED || lgr->lnk[i].smcibdev != smcibdev) continue; if (lgr->type == SMC_LGR_SINGLE || lgr->type == SMC_LGR_ASYMMETRIC_LOCAL) { rc = true; goto out; } } } out: spin_unlock_bh(&smc_lgr->lock); return rc; } static int smc_nl_handle_dev_port(struct sk_buff *skb, struct ib_device *ibdev, struct smc_ib_device *smcibdev, int port) { char smc_pnet[SMC_MAX_PNETID_LEN + 1]; struct nlattr *port_attrs; unsigned char port_state; int lnk_count = 0; port_attrs = nla_nest_start(skb, SMC_NLA_DEV_PORT + port); if (!port_attrs) goto errout; if (nla_put_u8(skb, SMC_NLA_DEV_PORT_PNET_USR, smcibdev->pnetid_by_user[port])) goto errattr; memcpy(smc_pnet, &smcibdev->pnetid[port], SMC_MAX_PNETID_LEN); smc_pnet[SMC_MAX_PNETID_LEN] = 0; if (nla_put_string(skb, SMC_NLA_DEV_PORT_PNETID, smc_pnet)) goto errattr; if (nla_put_u32(skb, SMC_NLA_DEV_PORT_NETDEV, smcibdev->ndev_ifidx[port])) goto errattr; if (nla_put_u8(skb, SMC_NLA_DEV_PORT_VALID, 1)) goto errattr; port_state = smc_ib_port_active(smcibdev, port + 1); if (nla_put_u8(skb, SMC_NLA_DEV_PORT_STATE, port_state)) goto errattr; lnk_count = atomic_read(&smcibdev->lnk_cnt_by_port[port]); if (nla_put_u32(skb, SMC_NLA_DEV_PORT_LNK_CNT, lnk_count)) goto errattr; nla_nest_end(skb, port_attrs); return 0; errattr: nla_nest_cancel(skb, port_attrs); errout: return -EMSGSIZE; } static bool smc_nl_handle_pci_values(const struct smc_pci_dev *smc_pci_dev, struct sk_buff *skb) { if (nla_put_u32(skb, SMC_NLA_DEV_PCI_FID, smc_pci_dev->pci_fid)) return false; if (nla_put_u16(skb, SMC_NLA_DEV_PCI_CHID, smc_pci_dev->pci_pchid)) return false; if (nla_put_u16(skb, SMC_NLA_DEV_PCI_VENDOR, smc_pci_dev->pci_vendor)) return false; if (nla_put_u16(skb, SMC_NLA_DEV_PCI_DEVICE, smc_pci_dev->pci_device)) return false; if (nla_put_string(skb, SMC_NLA_DEV_PCI_ID, smc_pci_dev->pci_id)) return false; return true; } static int smc_nl_handle_smcr_dev(struct smc_ib_device *smcibdev, struct sk_buff *skb, struct netlink_callback *cb) { char smc_ibname[IB_DEVICE_NAME_MAX]; struct smc_pci_dev smc_pci_dev; struct pci_dev *pci_dev; unsigned char is_crit; struct nlattr *attrs; void *nlh; int i; nlh = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, &smc_gen_nl_family, NLM_F_MULTI, SMC_NETLINK_GET_DEV_SMCR); if (!nlh) goto errmsg; attrs = nla_nest_start(skb, SMC_GEN_DEV_SMCR); if (!attrs) goto errout; is_crit = smcr_diag_is_dev_critical(&smc_lgr_list, smcibdev); if (nla_put_u8(skb, SMC_NLA_DEV_IS_CRIT, is_crit)) goto errattr; if (smcibdev->ibdev->dev.parent) { memset(&smc_pci_dev, 0, sizeof(smc_pci_dev)); pci_dev = to_pci_dev(smcibdev->ibdev->dev.parent); smc_set_pci_values(pci_dev, &smc_pci_dev); if (!smc_nl_handle_pci_values(&smc_pci_dev, skb)) goto errattr; } snprintf(smc_ibname, sizeof(smc_ibname), "%s", smcibdev->ibdev->name); if (nla_put_string(skb, SMC_NLA_DEV_IB_NAME, smc_ibname)) goto errattr; for (i = 1; i <= SMC_MAX_PORTS; i++) { if (!rdma_is_port_valid(smcibdev->ibdev, i)) continue; if (smc_nl_handle_dev_port(skb, smcibdev->ibdev, smcibdev, i - 1)) goto errattr; } nla_nest_end(skb, attrs); genlmsg_end(skb, nlh); return 0; errattr: nla_nest_cancel(skb, attrs); errout: genlmsg_cancel(skb, nlh); errmsg: return -EMSGSIZE; } static void smc_nl_prep_smcr_dev(struct smc_ib_devices *dev_list, struct sk_buff *skb, struct netlink_callback *cb) { struct smc_nl_dmp_ctx *cb_ctx = smc_nl_dmp_ctx(cb); struct smc_ib_device *smcibdev; int snum = cb_ctx->pos[0]; int num = 0; mutex_lock(&dev_list->mutex); list_for_each_entry(smcibdev, &dev_list->list, list) { if (num < snum) goto next; if (smc_nl_handle_smcr_dev(smcibdev, skb, cb)) goto errout; next: num++; } errout: mutex_unlock(&dev_list->mutex); cb_ctx->pos[0] = num; } int smcr_nl_get_device(struct sk_buff *skb, struct netlink_callback *cb) { smc_nl_prep_smcr_dev(&smc_ib_devices, skb, cb); return skb->len; } static void smc_ib_qp_event_handler(struct ib_event *ibevent, void *priv) { struct smc_link *lnk = (struct smc_link *)priv; struct smc_ib_device *smcibdev = lnk->smcibdev; u8 port_idx; switch (ibevent->event) { case IB_EVENT_QP_FATAL: case IB_EVENT_QP_ACCESS_ERR: port_idx = ibevent->element.qp->port - 1; if (port_idx >= SMC_MAX_PORTS) break; set_bit(port_idx, &smcibdev->port_event_mask); if (!test_and_set_bit(port_idx, smcibdev->ports_going_away)) schedule_work(&smcibdev->port_event_work); break; default: break; } } void smc_ib_destroy_queue_pair(struct smc_link *lnk) { if (lnk->roce_qp) ib_destroy_qp(lnk->roce_qp); lnk->roce_qp = NULL; } /* create a queue pair within the protection domain for a link */ int smc_ib_create_queue_pair(struct smc_link *lnk) { int sges_per_buf = (lnk->lgr->smc_version == SMC_V2) ? 2 : 1; struct ib_qp_init_attr qp_attr = { .event_handler = smc_ib_qp_event_handler, .qp_context = lnk, .send_cq = lnk->smcibdev->roce_cq_send, .recv_cq = lnk->smcibdev->roce_cq_recv, .srq = NULL, .cap = { /* include unsolicited rdma_writes as well, * there are max. 2 RDMA_WRITE per 1 WR_SEND */ .max_send_wr = SMC_WR_BUF_CNT * 3, .max_recv_wr = SMC_WR_BUF_CNT * 3, .max_send_sge = SMC_IB_MAX_SEND_SGE, .max_recv_sge = sges_per_buf, .max_inline_data = 0, }, .sq_sig_type = IB_SIGNAL_REQ_WR, .qp_type = IB_QPT_RC, }; int rc; lnk->roce_qp = ib_create_qp(lnk->roce_pd, &qp_attr); rc = PTR_ERR_OR_ZERO(lnk->roce_qp); if (IS_ERR(lnk->roce_qp)) lnk->roce_qp = NULL; else smc_wr_remember_qp_attr(lnk); return rc; } void smc_ib_put_memory_region(struct ib_mr *mr) { ib_dereg_mr(mr); } static int smc_ib_map_mr_sg(struct smc_buf_desc *buf_slot, u8 link_idx) { unsigned int offset = 0; int sg_num; /* map the largest prefix of a dma mapped SG list */ sg_num = ib_map_mr_sg(buf_slot->mr[link_idx], buf_slot->sgt[link_idx].sgl, buf_slot->sgt[link_idx].orig_nents, &offset, PAGE_SIZE); return sg_num; } /* Allocate a memory region and map the dma mapped SG list of buf_slot */ int smc_ib_get_memory_region(struct ib_pd *pd, int access_flags, struct smc_buf_desc *buf_slot, u8 link_idx) { if (buf_slot->mr[link_idx]) return 0; /* already done */ buf_slot->mr[link_idx] = ib_alloc_mr(pd, IB_MR_TYPE_MEM_REG, 1 << buf_slot->order); if (IS_ERR(buf_slot->mr[link_idx])) { int rc; rc = PTR_ERR(buf_slot->mr[link_idx]); buf_slot->mr[link_idx] = NULL; return rc; } if (smc_ib_map_mr_sg(buf_slot, link_idx) != buf_slot->sgt[link_idx].orig_nents) return -EINVAL; return 0; } bool smc_ib_is_sg_need_sync(struct smc_link *lnk, struct smc_buf_desc *buf_slot) { struct scatterlist *sg; unsigned int i; bool ret = false; /* for now there is just one DMA address */ for_each_sg(buf_slot->sgt[lnk->link_idx].sgl, sg, buf_slot->sgt[lnk->link_idx].nents, i) { if (!sg_dma_len(sg)) break; if (dma_need_sync(lnk->smcibdev->ibdev->dma_device, sg_dma_address(sg))) { ret = true; goto out; } } out: return ret; } /* synchronize buffer usage for cpu access */ void smc_ib_sync_sg_for_cpu(struct smc_link *lnk, struct smc_buf_desc *buf_slot, enum dma_data_direction data_direction) { struct scatterlist *sg; unsigned int i; if (!(buf_slot->is_dma_need_sync & (1U << lnk->link_idx))) return; /* for now there is just one DMA address */ for_each_sg(buf_slot->sgt[lnk->link_idx].sgl, sg, buf_slot->sgt[lnk->link_idx].nents, i) { if (!sg_dma_len(sg)) break; ib_dma_sync_single_for_cpu(lnk->smcibdev->ibdev, sg_dma_address(sg), sg_dma_len(sg), data_direction); } } /* synchronize buffer usage for device access */ void smc_ib_sync_sg_for_device(struct smc_link *lnk, struct smc_buf_desc *buf_slot, enum dma_data_direction data_direction) { struct scatterlist *sg; unsigned int i; if (!(buf_slot->is_dma_need_sync & (1U << lnk->link_idx))) return; /* for now there is just one DMA address */ for_each_sg(buf_slot->sgt[lnk->link_idx].sgl, sg, buf_slot->sgt[lnk->link_idx].nents, i) { if (!sg_dma_len(sg)) break; ib_dma_sync_single_for_device(lnk->smcibdev->ibdev, sg_dma_address(sg), sg_dma_len(sg), data_direction); } } /* Map a new TX or RX buffer SG-table to DMA */ int smc_ib_buf_map_sg(struct smc_link *lnk, struct smc_buf_desc *buf_slot, enum dma_data_direction data_direction) { int mapped_nents; mapped_nents = ib_dma_map_sg(lnk->smcibdev->ibdev, buf_slot->sgt[lnk->link_idx].sgl, buf_slot->sgt[lnk->link_idx].orig_nents, data_direction); if (!mapped_nents) return -ENOMEM; return mapped_nents; } void smc_ib_buf_unmap_sg(struct smc_link *lnk, struct smc_buf_desc *buf_slot, enum dma_data_direction data_direction) { if (!buf_slot->sgt[lnk->link_idx].sgl->dma_address) return; /* already unmapped */ ib_dma_unmap_sg(lnk->smcibdev->ibdev, buf_slot->sgt[lnk->link_idx].sgl, buf_slot->sgt[lnk->link_idx].orig_nents, data_direction); buf_slot->sgt[lnk->link_idx].sgl->dma_address = 0; } long smc_ib_setup_per_ibdev(struct smc_ib_device *smcibdev) { struct ib_cq_init_attr cqattr = { .cqe = SMC_MAX_CQE, .comp_vector = 0 }; int cqe_size_order, smc_order; long rc; mutex_lock(&smcibdev->mutex); rc = 0; if (smcibdev->initialized) goto out; /* the calculated number of cq entries fits to mlx5 cq allocation */ cqe_size_order = cache_line_size() == 128 ? 7 : 6; smc_order = MAX_ORDER - cqe_size_order; if (SMC_MAX_CQE + 2 > (0x00000001 << smc_order) * PAGE_SIZE) cqattr.cqe = (0x00000001 << smc_order) * PAGE_SIZE - 2; smcibdev->roce_cq_send = ib_create_cq(smcibdev->ibdev, smc_wr_tx_cq_handler, NULL, smcibdev, &cqattr); rc = PTR_ERR_OR_ZERO(smcibdev->roce_cq_send); if (IS_ERR(smcibdev->roce_cq_send)) { smcibdev->roce_cq_send = NULL; goto out; } smcibdev->roce_cq_recv = ib_create_cq(smcibdev->ibdev, smc_wr_rx_cq_handler, NULL, smcibdev, &cqattr); rc = PTR_ERR_OR_ZERO(smcibdev->roce_cq_recv); if (IS_ERR(smcibdev->roce_cq_recv)) { smcibdev->roce_cq_recv = NULL; goto err; } smc_wr_add_dev(smcibdev); smcibdev->initialized = 1; goto out; err: ib_destroy_cq(smcibdev->roce_cq_send); out: mutex_unlock(&smcibdev->mutex); return rc; } static void smc_ib_cleanup_per_ibdev(struct smc_ib_device *smcibdev) { mutex_lock(&smcibdev->mutex); if (!smcibdev->initialized) goto out; smcibdev->initialized = 0; ib_destroy_cq(smcibdev->roce_cq_recv); ib_destroy_cq(smcibdev->roce_cq_send); smc_wr_remove_dev(smcibdev); out: mutex_unlock(&smcibdev->mutex); } static struct ib_client smc_ib_client; static void smc_copy_netdev_ifindex(struct smc_ib_device *smcibdev, int port) { struct ib_device *ibdev = smcibdev->ibdev; struct net_device *ndev; if (!ibdev->ops.get_netdev) return; ndev = ibdev->ops.get_netdev(ibdev, port + 1); if (ndev) { smcibdev->ndev_ifidx[port] = ndev->ifindex; dev_put(ndev); } } void smc_ib_ndev_change(struct net_device *ndev, unsigned long event) { struct smc_ib_device *smcibdev; struct ib_device *libdev; struct net_device *lndev; u8 port_cnt; int i; mutex_lock(&smc_ib_devices.mutex); list_for_each_entry(smcibdev, &smc_ib_devices.list, list) { port_cnt = smcibdev->ibdev->phys_port_cnt; for (i = 0; i < min_t(size_t, port_cnt, SMC_MAX_PORTS); i++) { libdev = smcibdev->ibdev; if (!libdev->ops.get_netdev) continue; lndev = libdev->ops.get_netdev(libdev, i + 1); dev_put(lndev); if (lndev != ndev) continue; if (event == NETDEV_REGISTER) smcibdev->ndev_ifidx[i] = ndev->ifindex; if (event == NETDEV_UNREGISTER) smcibdev->ndev_ifidx[i] = 0; } } mutex_unlock(&smc_ib_devices.mutex); } /* callback function for ib_register_client() */ static int smc_ib_add_dev(struct ib_device *ibdev) { struct smc_ib_device *smcibdev; u8 port_cnt; int i; if (ibdev->node_type != RDMA_NODE_IB_CA) return -EOPNOTSUPP; smcibdev = kzalloc(sizeof(*smcibdev), GFP_KERNEL); if (!smcibdev) return -ENOMEM; smcibdev->ibdev = ibdev; INIT_WORK(&smcibdev->port_event_work, smc_ib_port_event_work); atomic_set(&smcibdev->lnk_cnt, 0); init_waitqueue_head(&smcibdev->lnks_deleted); mutex_init(&smcibdev->mutex); mutex_lock(&smc_ib_devices.mutex); list_add_tail(&smcibdev->list, &smc_ib_devices.list); mutex_unlock(&smc_ib_devices.mutex); ib_set_client_data(ibdev, &smc_ib_client, smcibdev); INIT_IB_EVENT_HANDLER(&smcibdev->event_handler, smcibdev->ibdev, smc_ib_global_event_handler); ib_register_event_handler(&smcibdev->event_handler); /* trigger reading of the port attributes */ port_cnt = smcibdev->ibdev->phys_port_cnt; pr_warn_ratelimited("smc: adding ib device %s with port count %d\n", smcibdev->ibdev->name, port_cnt); for (i = 0; i < min_t(size_t, port_cnt, SMC_MAX_PORTS); i++) { set_bit(i, &smcibdev->port_event_mask); /* determine pnetids of the port */ if (smc_pnetid_by_dev_port(ibdev->dev.parent, i, smcibdev->pnetid[i])) smc_pnetid_by_table_ib(smcibdev, i + 1); smc_copy_netdev_ifindex(smcibdev, i); pr_warn_ratelimited("smc: ib device %s port %d has pnetid " "%.16s%s\n", smcibdev->ibdev->name, i + 1, smcibdev->pnetid[i], smcibdev->pnetid_by_user[i] ? " (user defined)" : ""); } schedule_work(&smcibdev->port_event_work); return 0; } /* callback function for ib_unregister_client() */ static void smc_ib_remove_dev(struct ib_device *ibdev, void *client_data) { struct smc_ib_device *smcibdev = client_data; mutex_lock(&smc_ib_devices.mutex); list_del_init(&smcibdev->list); /* remove from smc_ib_devices */ mutex_unlock(&smc_ib_devices.mutex); pr_warn_ratelimited("smc: removing ib device %s\n", smcibdev->ibdev->name); smc_smcr_terminate_all(smcibdev); smc_ib_cleanup_per_ibdev(smcibdev); ib_unregister_event_handler(&smcibdev->event_handler); cancel_work_sync(&smcibdev->port_event_work); kfree(smcibdev); } static struct ib_client smc_ib_client = { .name = "smc_ib", .add = smc_ib_add_dev, .remove = smc_ib_remove_dev, }; int __init smc_ib_register_client(void) { smc_ib_init_local_systemid(); return ib_register_client(&smc_ib_client); } void smc_ib_unregister_client(void) { ib_unregister_client(&smc_ib_client); }
linux-master
net/smc/smc_ib.c
// SPDX-License-Identifier: GPL-2.0-only /* * Shared Memory Communications over RDMA (SMC-R) and RoCE * * Monitoring SMC transport protocol sockets * * Copyright IBM Corp. 2016 * * Author(s): Ursula Braun <[email protected]> */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/types.h> #include <linux/init.h> #include <linux/sock_diag.h> #include <linux/inet_diag.h> #include <linux/smc_diag.h> #include <net/netlink.h> #include <net/smc.h> #include "smc.h" #include "smc_core.h" struct smc_diag_dump_ctx { int pos[2]; }; static struct smc_diag_dump_ctx *smc_dump_context(struct netlink_callback *cb) { return (struct smc_diag_dump_ctx *)cb->ctx; } static void smc_diag_msg_common_fill(struct smc_diag_msg *r, struct sock *sk) { struct smc_sock *smc = smc_sk(sk); memset(r, 0, sizeof(*r)); r->diag_family = sk->sk_family; sock_diag_save_cookie(sk, r->id.idiag_cookie); if (!smc->clcsock) return; r->id.idiag_sport = htons(smc->clcsock->sk->sk_num); r->id.idiag_dport = smc->clcsock->sk->sk_dport; r->id.idiag_if = smc->clcsock->sk->sk_bound_dev_if; if (sk->sk_protocol == SMCPROTO_SMC) { r->id.idiag_src[0] = smc->clcsock->sk->sk_rcv_saddr; r->id.idiag_dst[0] = smc->clcsock->sk->sk_daddr; #if IS_ENABLED(CONFIG_IPV6) } else if (sk->sk_protocol == SMCPROTO_SMC6) { memcpy(&r->id.idiag_src, &smc->clcsock->sk->sk_v6_rcv_saddr, sizeof(smc->clcsock->sk->sk_v6_rcv_saddr)); memcpy(&r->id.idiag_dst, &smc->clcsock->sk->sk_v6_daddr, sizeof(smc->clcsock->sk->sk_v6_daddr)); #endif } } static int smc_diag_msg_attrs_fill(struct sock *sk, struct sk_buff *skb, struct smc_diag_msg *r, struct user_namespace *user_ns) { if (nla_put_u8(skb, SMC_DIAG_SHUTDOWN, sk->sk_shutdown)) return 1; r->diag_uid = from_kuid_munged(user_ns, sock_i_uid(sk)); r->diag_inode = sock_i_ino(sk); return 0; } static int __smc_diag_dump(struct sock *sk, struct sk_buff *skb, struct netlink_callback *cb, const struct smc_diag_req *req, struct nlattr *bc) { struct smc_sock *smc = smc_sk(sk); struct smc_diag_fallback fallback; struct user_namespace *user_ns; struct smc_diag_msg *r; struct nlmsghdr *nlh; nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, cb->nlh->nlmsg_type, sizeof(*r), NLM_F_MULTI); if (!nlh) return -EMSGSIZE; r = nlmsg_data(nlh); smc_diag_msg_common_fill(r, sk); r->diag_state = sk->sk_state; if (smc->use_fallback) r->diag_mode = SMC_DIAG_MODE_FALLBACK_TCP; else if (smc_conn_lgr_valid(&smc->conn) && smc->conn.lgr->is_smcd) r->diag_mode = SMC_DIAG_MODE_SMCD; else r->diag_mode = SMC_DIAG_MODE_SMCR; user_ns = sk_user_ns(NETLINK_CB(cb->skb).sk); if (smc_diag_msg_attrs_fill(sk, skb, r, user_ns)) goto errout; fallback.reason = smc->fallback_rsn; fallback.peer_diagnosis = smc->peer_diagnosis; if (nla_put(skb, SMC_DIAG_FALLBACK, sizeof(fallback), &fallback) < 0) goto errout; if ((req->diag_ext & (1 << (SMC_DIAG_CONNINFO - 1))) && smc->conn.alert_token_local) { struct smc_connection *conn = &smc->conn; struct smc_diag_conninfo cinfo = { .token = conn->alert_token_local, .sndbuf_size = conn->sndbuf_desc ? conn->sndbuf_desc->len : 0, .rmbe_size = conn->rmb_desc ? conn->rmb_desc->len : 0, .peer_rmbe_size = conn->peer_rmbe_size, .rx_prod.wrap = conn->local_rx_ctrl.prod.wrap, .rx_prod.count = conn->local_rx_ctrl.prod.count, .rx_cons.wrap = conn->local_rx_ctrl.cons.wrap, .rx_cons.count = conn->local_rx_ctrl.cons.count, .tx_prod.wrap = conn->local_tx_ctrl.prod.wrap, .tx_prod.count = conn->local_tx_ctrl.prod.count, .tx_cons.wrap = conn->local_tx_ctrl.cons.wrap, .tx_cons.count = conn->local_tx_ctrl.cons.count, .tx_prod_flags = *(u8 *)&conn->local_tx_ctrl.prod_flags, .tx_conn_state_flags = *(u8 *)&conn->local_tx_ctrl.conn_state_flags, .rx_prod_flags = *(u8 *)&conn->local_rx_ctrl.prod_flags, .rx_conn_state_flags = *(u8 *)&conn->local_rx_ctrl.conn_state_flags, .tx_prep.wrap = conn->tx_curs_prep.wrap, .tx_prep.count = conn->tx_curs_prep.count, .tx_sent.wrap = conn->tx_curs_sent.wrap, .tx_sent.count = conn->tx_curs_sent.count, .tx_fin.wrap = conn->tx_curs_fin.wrap, .tx_fin.count = conn->tx_curs_fin.count, }; if (nla_put(skb, SMC_DIAG_CONNINFO, sizeof(cinfo), &cinfo) < 0) goto errout; } if (smc_conn_lgr_valid(&smc->conn) && !smc->conn.lgr->is_smcd && (req->diag_ext & (1 << (SMC_DIAG_LGRINFO - 1))) && !list_empty(&smc->conn.lgr->list)) { struct smc_link *link = smc->conn.lnk; struct smc_diag_lgrinfo linfo = { .role = smc->conn.lgr->role, .lnk[0].ibport = link->ibport, .lnk[0].link_id = link->link_id, }; memcpy(linfo.lnk[0].ibname, smc->conn.lgr->lnk[0].smcibdev->ibdev->name, sizeof(link->smcibdev->ibdev->name)); smc_gid_be16_convert(linfo.lnk[0].gid, link->gid); smc_gid_be16_convert(linfo.lnk[0].peer_gid, link->peer_gid); if (nla_put(skb, SMC_DIAG_LGRINFO, sizeof(linfo), &linfo) < 0) goto errout; } if (smc_conn_lgr_valid(&smc->conn) && smc->conn.lgr->is_smcd && (req->diag_ext & (1 << (SMC_DIAG_DMBINFO - 1))) && !list_empty(&smc->conn.lgr->list)) { struct smc_connection *conn = &smc->conn; struct smcd_diag_dmbinfo dinfo; struct smcd_dev *smcd = conn->lgr->smcd; memset(&dinfo, 0, sizeof(dinfo)); dinfo.linkid = *((u32 *)conn->lgr->id); dinfo.peer_gid = conn->lgr->peer_gid; dinfo.my_gid = smcd->ops->get_local_gid(smcd); dinfo.token = conn->rmb_desc->token; dinfo.peer_token = conn->peer_token; if (nla_put(skb, SMC_DIAG_DMBINFO, sizeof(dinfo), &dinfo) < 0) goto errout; } nlmsg_end(skb, nlh); return 0; errout: nlmsg_cancel(skb, nlh); return -EMSGSIZE; } static int smc_diag_dump_proto(struct proto *prot, struct sk_buff *skb, struct netlink_callback *cb, int p_type) { struct smc_diag_dump_ctx *cb_ctx = smc_dump_context(cb); struct net *net = sock_net(skb->sk); int snum = cb_ctx->pos[p_type]; struct nlattr *bc = NULL; struct hlist_head *head; int rc = 0, num = 0; struct sock *sk; read_lock(&prot->h.smc_hash->lock); head = &prot->h.smc_hash->ht; if (hlist_empty(head)) goto out; sk_for_each(sk, head) { if (!net_eq(sock_net(sk), net)) continue; if (num < snum) goto next; rc = __smc_diag_dump(sk, skb, cb, nlmsg_data(cb->nlh), bc); if (rc < 0) goto out; next: num++; } out: read_unlock(&prot->h.smc_hash->lock); cb_ctx->pos[p_type] = num; return rc; } static int smc_diag_dump(struct sk_buff *skb, struct netlink_callback *cb) { int rc = 0; rc = smc_diag_dump_proto(&smc_proto, skb, cb, SMCPROTO_SMC); if (!rc) smc_diag_dump_proto(&smc_proto6, skb, cb, SMCPROTO_SMC6); return skb->len; } static int smc_diag_handler_dump(struct sk_buff *skb, struct nlmsghdr *h) { struct net *net = sock_net(skb->sk); if (h->nlmsg_type == SOCK_DIAG_BY_FAMILY && h->nlmsg_flags & NLM_F_DUMP) { { struct netlink_dump_control c = { .dump = smc_diag_dump, .min_dump_alloc = SKB_WITH_OVERHEAD(32768), }; return netlink_dump_start(net->diag_nlsk, skb, h, &c); } } return 0; } static const struct sock_diag_handler smc_diag_handler = { .family = AF_SMC, .dump = smc_diag_handler_dump, }; static int __init smc_diag_init(void) { return sock_diag_register(&smc_diag_handler); } static void __exit smc_diag_exit(void) { sock_diag_unregister(&smc_diag_handler); } module_init(smc_diag_init); module_exit(smc_diag_exit); MODULE_LICENSE("GPL"); MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, 43 /* AF_SMC */); MODULE_ALIAS_GENL_FAMILY(SMCR_GENL_FAMILY_NAME);
linux-master
net/smc/smc_diag.c
// SPDX-License-Identifier: GPL-2.0 /* * Shared Memory Communications over RDMA (SMC-R) and RoCE * * Manage RMBE * copy new RMBE data into user space * * Copyright IBM Corp. 2016 * * Author(s): Ursula Braun <[email protected]> */ #include <linux/net.h> #include <linux/rcupdate.h> #include <linux/sched/signal.h> #include <linux/splice.h> #include <net/sock.h> #include <trace/events/sock.h> #include "smc.h" #include "smc_core.h" #include "smc_cdc.h" #include "smc_tx.h" /* smc_tx_consumer_update() */ #include "smc_rx.h" #include "smc_stats.h" #include "smc_tracepoint.h" /* callback implementation to wakeup consumers blocked with smc_rx_wait(). * indirectly called by smc_cdc_msg_recv_action(). */ static void smc_rx_wake_up(struct sock *sk) { struct socket_wq *wq; trace_sk_data_ready(sk); /* derived from sock_def_readable() */ /* called already in smc_listen_work() */ rcu_read_lock(); wq = rcu_dereference(sk->sk_wq); if (skwq_has_sleeper(wq)) wake_up_interruptible_sync_poll(&wq->wait, EPOLLIN | EPOLLPRI | EPOLLRDNORM | EPOLLRDBAND); sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN); if ((sk->sk_shutdown == SHUTDOWN_MASK) || (sk->sk_state == SMC_CLOSED)) sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_HUP); rcu_read_unlock(); } /* Update consumer cursor * @conn connection to update * @cons consumer cursor * @len number of Bytes consumed * Returns: * 1 if we should end our receive, 0 otherwise */ static int smc_rx_update_consumer(struct smc_sock *smc, union smc_host_cursor cons, size_t len) { struct smc_connection *conn = &smc->conn; struct sock *sk = &smc->sk; bool force = false; int diff, rc = 0; smc_curs_add(conn->rmb_desc->len, &cons, len); /* did we process urgent data? */ if (conn->urg_state == SMC_URG_VALID || conn->urg_rx_skip_pend) { diff = smc_curs_comp(conn->rmb_desc->len, &cons, &conn->urg_curs); if (sock_flag(sk, SOCK_URGINLINE)) { if (diff == 0) { force = true; rc = 1; conn->urg_state = SMC_URG_READ; } } else { if (diff == 1) { /* skip urgent byte */ force = true; smc_curs_add(conn->rmb_desc->len, &cons, 1); conn->urg_rx_skip_pend = false; } else if (diff < -1) /* we read past urgent byte */ conn->urg_state = SMC_URG_READ; } } smc_curs_copy(&conn->local_tx_ctrl.cons, &cons, conn); /* send consumer cursor update if required */ /* similar to advertising new TCP rcv_wnd if required */ smc_tx_consumer_update(conn, force); return rc; } static void smc_rx_update_cons(struct smc_sock *smc, size_t len) { struct smc_connection *conn = &smc->conn; union smc_host_cursor cons; smc_curs_copy(&cons, &conn->local_tx_ctrl.cons, conn); smc_rx_update_consumer(smc, cons, len); } struct smc_spd_priv { struct smc_sock *smc; size_t len; }; static void smc_rx_pipe_buf_release(struct pipe_inode_info *pipe, struct pipe_buffer *buf) { struct smc_spd_priv *priv = (struct smc_spd_priv *)buf->private; struct smc_sock *smc = priv->smc; struct smc_connection *conn; struct sock *sk = &smc->sk; if (sk->sk_state == SMC_CLOSED || sk->sk_state == SMC_PEERFINCLOSEWAIT || sk->sk_state == SMC_APPFINCLOSEWAIT) goto out; conn = &smc->conn; lock_sock(sk); smc_rx_update_cons(smc, priv->len); release_sock(sk); if (atomic_sub_and_test(priv->len, &conn->splice_pending)) smc_rx_wake_up(sk); out: kfree(priv); put_page(buf->page); sock_put(sk); } static const struct pipe_buf_operations smc_pipe_ops = { .release = smc_rx_pipe_buf_release, .get = generic_pipe_buf_get }; static void smc_rx_spd_release(struct splice_pipe_desc *spd, unsigned int i) { put_page(spd->pages[i]); } static int smc_rx_splice(struct pipe_inode_info *pipe, char *src, size_t len, struct smc_sock *smc) { struct smc_link_group *lgr = smc->conn.lgr; int offset = offset_in_page(src); struct partial_page *partial; struct splice_pipe_desc spd; struct smc_spd_priv **priv; struct page **pages; int bytes, nr_pages; int i; nr_pages = !lgr->is_smcd && smc->conn.rmb_desc->is_vm ? PAGE_ALIGN(len + offset) / PAGE_SIZE : 1; pages = kcalloc(nr_pages, sizeof(*pages), GFP_KERNEL); if (!pages) goto out; partial = kcalloc(nr_pages, sizeof(*partial), GFP_KERNEL); if (!partial) goto out_page; priv = kcalloc(nr_pages, sizeof(*priv), GFP_KERNEL); if (!priv) goto out_part; for (i = 0; i < nr_pages; i++) { priv[i] = kzalloc(sizeof(**priv), GFP_KERNEL); if (!priv[i]) goto out_priv; } if (lgr->is_smcd || (!lgr->is_smcd && !smc->conn.rmb_desc->is_vm)) { /* smcd or smcr that uses physically contiguous RMBs */ priv[0]->len = len; priv[0]->smc = smc; partial[0].offset = src - (char *)smc->conn.rmb_desc->cpu_addr; partial[0].len = len; partial[0].private = (unsigned long)priv[0]; pages[0] = smc->conn.rmb_desc->pages; } else { int size, left = len; void *buf = src; /* smcr that uses virtually contiguous RMBs*/ for (i = 0; i < nr_pages; i++) { size = min_t(int, PAGE_SIZE - offset, left); priv[i]->len = size; priv[i]->smc = smc; pages[i] = vmalloc_to_page(buf); partial[i].offset = offset; partial[i].len = size; partial[i].private = (unsigned long)priv[i]; buf += size / sizeof(*buf); left -= size; offset = 0; } } spd.nr_pages_max = nr_pages; spd.nr_pages = nr_pages; spd.pages = pages; spd.partial = partial; spd.ops = &smc_pipe_ops; spd.spd_release = smc_rx_spd_release; bytes = splice_to_pipe(pipe, &spd); if (bytes > 0) { sock_hold(&smc->sk); if (!lgr->is_smcd && smc->conn.rmb_desc->is_vm) { for (i = 0; i < PAGE_ALIGN(bytes + offset) / PAGE_SIZE; i++) get_page(pages[i]); } else { get_page(smc->conn.rmb_desc->pages); } atomic_add(bytes, &smc->conn.splice_pending); } kfree(priv); kfree(partial); kfree(pages); return bytes; out_priv: for (i = (i - 1); i >= 0; i--) kfree(priv[i]); kfree(priv); out_part: kfree(partial); out_page: kfree(pages); out: return -ENOMEM; } static int smc_rx_data_available_and_no_splice_pend(struct smc_connection *conn) { return atomic_read(&conn->bytes_to_rcv) && !atomic_read(&conn->splice_pending); } /* blocks rcvbuf consumer until >=len bytes available or timeout or interrupted * @smc smc socket * @timeo pointer to max seconds to wait, pointer to value 0 for no timeout * @fcrit add'l criterion to evaluate as function pointer * Returns: * 1 if at least 1 byte available in rcvbuf or if socket error/shutdown. * 0 otherwise (nothing in rcvbuf nor timeout, e.g. interrupted). */ int smc_rx_wait(struct smc_sock *smc, long *timeo, int (*fcrit)(struct smc_connection *conn)) { DEFINE_WAIT_FUNC(wait, woken_wake_function); struct smc_connection *conn = &smc->conn; struct smc_cdc_conn_state_flags *cflags = &conn->local_tx_ctrl.conn_state_flags; struct sock *sk = &smc->sk; int rc; if (fcrit(conn)) return 1; sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk); add_wait_queue(sk_sleep(sk), &wait); rc = sk_wait_event(sk, timeo, READ_ONCE(sk->sk_err) || cflags->peer_conn_abort || READ_ONCE(sk->sk_shutdown) & RCV_SHUTDOWN || conn->killed || fcrit(conn), &wait); remove_wait_queue(sk_sleep(sk), &wait); sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk); return rc; } static int smc_rx_recv_urg(struct smc_sock *smc, struct msghdr *msg, int len, int flags) { struct smc_connection *conn = &smc->conn; union smc_host_cursor cons; struct sock *sk = &smc->sk; int rc = 0; if (sock_flag(sk, SOCK_URGINLINE) || !(conn->urg_state == SMC_URG_VALID) || conn->urg_state == SMC_URG_READ) return -EINVAL; SMC_STAT_INC(smc, urg_data_cnt); if (conn->urg_state == SMC_URG_VALID) { if (!(flags & MSG_PEEK)) smc->conn.urg_state = SMC_URG_READ; msg->msg_flags |= MSG_OOB; if (len > 0) { if (!(flags & MSG_TRUNC)) rc = memcpy_to_msg(msg, &conn->urg_rx_byte, 1); len = 1; smc_curs_copy(&cons, &conn->local_tx_ctrl.cons, conn); if (smc_curs_diff(conn->rmb_desc->len, &cons, &conn->urg_curs) > 1) conn->urg_rx_skip_pend = true; /* Urgent Byte was already accounted for, but trigger * skipping the urgent byte in non-inline case */ if (!(flags & MSG_PEEK)) smc_rx_update_consumer(smc, cons, 0); } else { msg->msg_flags |= MSG_TRUNC; } return rc ? -EFAULT : len; } if (sk->sk_state == SMC_CLOSED || sk->sk_shutdown & RCV_SHUTDOWN) return 0; return -EAGAIN; } static bool smc_rx_recvmsg_data_available(struct smc_sock *smc) { struct smc_connection *conn = &smc->conn; if (smc_rx_data_available(conn)) return true; else if (conn->urg_state == SMC_URG_VALID) /* we received a single urgent Byte - skip */ smc_rx_update_cons(smc, 0); return false; } /* smc_rx_recvmsg - receive data from RMBE * @msg: copy data to receive buffer * @pipe: copy data to pipe if set - indicates splice() call * * rcvbuf consumer: main API called by socket layer. * Called under sk lock. */ int smc_rx_recvmsg(struct smc_sock *smc, struct msghdr *msg, struct pipe_inode_info *pipe, size_t len, int flags) { size_t copylen, read_done = 0, read_remaining = len; size_t chunk_len, chunk_off, chunk_len_sum; struct smc_connection *conn = &smc->conn; int (*func)(struct smc_connection *conn); union smc_host_cursor cons; int readable, chunk; char *rcvbuf_base; struct sock *sk; int splbytes; long timeo; int target; /* Read at least these many bytes */ int rc; if (unlikely(flags & MSG_ERRQUEUE)) return -EINVAL; /* future work for sk.sk_family == AF_SMC */ sk = &smc->sk; if (sk->sk_state == SMC_LISTEN) return -ENOTCONN; if (flags & MSG_OOB) return smc_rx_recv_urg(smc, msg, len, flags); timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); target = sock_rcvlowat(sk, flags & MSG_WAITALL, len); readable = atomic_read(&conn->bytes_to_rcv); if (readable >= conn->rmb_desc->len) SMC_STAT_RMB_RX_FULL(smc, !conn->lnk); if (len < readable) SMC_STAT_RMB_RX_SIZE_SMALL(smc, !conn->lnk); /* we currently use 1 RMBE per RMB, so RMBE == RMB base addr */ rcvbuf_base = conn->rx_off + conn->rmb_desc->cpu_addr; do { /* while (read_remaining) */ if (read_done >= target || (pipe && read_done)) break; if (conn->killed) break; if (smc_rx_recvmsg_data_available(smc)) goto copy; if (sk->sk_shutdown & RCV_SHUTDOWN) { /* smc_cdc_msg_recv_action() could have run after * above smc_rx_recvmsg_data_available() */ if (smc_rx_recvmsg_data_available(smc)) goto copy; break; } if (read_done) { if (sk->sk_err || sk->sk_state == SMC_CLOSED || !timeo || signal_pending(current)) break; } else { if (sk->sk_err) { read_done = sock_error(sk); break; } if (sk->sk_state == SMC_CLOSED) { if (!sock_flag(sk, SOCK_DONE)) { /* This occurs when user tries to read * from never connected socket. */ read_done = -ENOTCONN; break; } break; } if (!timeo) return -EAGAIN; if (signal_pending(current)) { read_done = sock_intr_errno(timeo); break; } } if (!smc_rx_data_available(conn)) { smc_rx_wait(smc, &timeo, smc_rx_data_available); continue; } copy: /* initialize variables for 1st iteration of subsequent loop */ /* could be just 1 byte, even after waiting on data above */ readable = atomic_read(&conn->bytes_to_rcv); splbytes = atomic_read(&conn->splice_pending); if (!readable || (msg && splbytes)) { if (splbytes) func = smc_rx_data_available_and_no_splice_pend; else func = smc_rx_data_available; smc_rx_wait(smc, &timeo, func); continue; } smc_curs_copy(&cons, &conn->local_tx_ctrl.cons, conn); /* subsequent splice() calls pick up where previous left */ if (splbytes) smc_curs_add(conn->rmb_desc->len, &cons, splbytes); if (conn->urg_state == SMC_URG_VALID && sock_flag(&smc->sk, SOCK_URGINLINE) && readable > 1) readable--; /* always stop at urgent Byte */ /* not more than what user space asked for */ copylen = min_t(size_t, read_remaining, readable); /* determine chunks where to read from rcvbuf */ /* either unwrapped case, or 1st chunk of wrapped case */ chunk_len = min_t(size_t, copylen, conn->rmb_desc->len - cons.count); chunk_len_sum = chunk_len; chunk_off = cons.count; smc_rmb_sync_sg_for_cpu(conn); for (chunk = 0; chunk < 2; chunk++) { if (!(flags & MSG_TRUNC)) { if (msg) { rc = memcpy_to_msg(msg, rcvbuf_base + chunk_off, chunk_len); } else { rc = smc_rx_splice(pipe, rcvbuf_base + chunk_off, chunk_len, smc); } if (rc < 0) { if (!read_done) read_done = -EFAULT; goto out; } } read_remaining -= chunk_len; read_done += chunk_len; if (chunk_len_sum == copylen) break; /* either on 1st or 2nd iteration */ /* prepare next (== 2nd) iteration */ chunk_len = copylen - chunk_len; /* remainder */ chunk_len_sum += chunk_len; chunk_off = 0; /* modulo offset in recv ring buffer */ } /* update cursors */ if (!(flags & MSG_PEEK)) { /* increased in recv tasklet smc_cdc_msg_rcv() */ smp_mb__before_atomic(); atomic_sub(copylen, &conn->bytes_to_rcv); /* guarantee 0 <= bytes_to_rcv <= rmb_desc->len */ smp_mb__after_atomic(); if (msg && smc_rx_update_consumer(smc, cons, copylen)) goto out; } trace_smc_rx_recvmsg(smc, copylen); } while (read_remaining); out: return read_done; } /* Initialize receive properties on connection establishment. NB: not __init! */ void smc_rx_init(struct smc_sock *smc) { smc->sk.sk_data_ready = smc_rx_wake_up; atomic_set(&smc->conn.splice_pending, 0); smc->conn.urg_state = SMC_URG_READ; }
linux-master
net/smc/smc_rx.c
// SPDX-License-Identifier: GPL-2.0 /* * Shared Memory Communications over RDMA (SMC-R) and RoCE * * Socket Closing - normal and abnormal * * Copyright IBM Corp. 2016 * * Author(s): Ursula Braun <[email protected]> */ #include <linux/workqueue.h> #include <linux/sched/signal.h> #include <net/sock.h> #include <net/tcp.h> #include "smc.h" #include "smc_tx.h" #include "smc_cdc.h" #include "smc_close.h" /* release the clcsock that is assigned to the smc_sock */ void smc_clcsock_release(struct smc_sock *smc) { struct socket *tcp; if (smc->listen_smc && current_work() != &smc->smc_listen_work) cancel_work_sync(&smc->smc_listen_work); mutex_lock(&smc->clcsock_release_lock); if (smc->clcsock) { tcp = smc->clcsock; smc->clcsock = NULL; sock_release(tcp); } mutex_unlock(&smc->clcsock_release_lock); } static void smc_close_cleanup_listen(struct sock *parent) { struct sock *sk; /* Close non-accepted connections */ while ((sk = smc_accept_dequeue(parent, NULL))) smc_close_non_accepted(sk); } /* wait for sndbuf data being transmitted */ static void smc_close_stream_wait(struct smc_sock *smc, long timeout) { DEFINE_WAIT_FUNC(wait, woken_wake_function); struct sock *sk = &smc->sk; if (!timeout) return; if (!smc_tx_prepared_sends(&smc->conn)) return; /* Send out corked data remaining in sndbuf */ smc_tx_pending(&smc->conn); smc->wait_close_tx_prepared = 1; add_wait_queue(sk_sleep(sk), &wait); while (!signal_pending(current) && timeout) { int rc; rc = sk_wait_event(sk, &timeout, !smc_tx_prepared_sends(&smc->conn) || READ_ONCE(sk->sk_err) == ECONNABORTED || READ_ONCE(sk->sk_err) == ECONNRESET || smc->conn.killed, &wait); if (rc) break; } remove_wait_queue(sk_sleep(sk), &wait); smc->wait_close_tx_prepared = 0; } void smc_close_wake_tx_prepared(struct smc_sock *smc) { if (smc->wait_close_tx_prepared) /* wake up socket closing */ smc->sk.sk_state_change(&smc->sk); } static int smc_close_wr(struct smc_connection *conn) { conn->local_tx_ctrl.conn_state_flags.peer_done_writing = 1; return smc_cdc_get_slot_and_msg_send(conn); } static int smc_close_final(struct smc_connection *conn) { if (atomic_read(&conn->bytes_to_rcv)) conn->local_tx_ctrl.conn_state_flags.peer_conn_abort = 1; else conn->local_tx_ctrl.conn_state_flags.peer_conn_closed = 1; if (conn->killed) return -EPIPE; return smc_cdc_get_slot_and_msg_send(conn); } int smc_close_abort(struct smc_connection *conn) { conn->local_tx_ctrl.conn_state_flags.peer_conn_abort = 1; return smc_cdc_get_slot_and_msg_send(conn); } static void smc_close_cancel_work(struct smc_sock *smc) { struct sock *sk = &smc->sk; release_sock(sk); cancel_work_sync(&smc->conn.close_work); cancel_delayed_work_sync(&smc->conn.tx_work); lock_sock(sk); } /* terminate smc socket abnormally - active abort * link group is terminated, i.e. RDMA communication no longer possible */ void smc_close_active_abort(struct smc_sock *smc) { struct sock *sk = &smc->sk; bool release_clcsock = false; if (sk->sk_state != SMC_INIT && smc->clcsock && smc->clcsock->sk) { sk->sk_err = ECONNABORTED; if (smc->clcsock && smc->clcsock->sk) tcp_abort(smc->clcsock->sk, ECONNABORTED); } switch (sk->sk_state) { case SMC_ACTIVE: case SMC_APPCLOSEWAIT1: case SMC_APPCLOSEWAIT2: sk->sk_state = SMC_PEERABORTWAIT; smc_close_cancel_work(smc); if (sk->sk_state != SMC_PEERABORTWAIT) break; sk->sk_state = SMC_CLOSED; sock_put(sk); /* (postponed) passive closing */ break; case SMC_PEERCLOSEWAIT1: case SMC_PEERCLOSEWAIT2: case SMC_PEERFINCLOSEWAIT: sk->sk_state = SMC_PEERABORTWAIT; smc_close_cancel_work(smc); if (sk->sk_state != SMC_PEERABORTWAIT) break; sk->sk_state = SMC_CLOSED; smc_conn_free(&smc->conn); release_clcsock = true; sock_put(sk); /* passive closing */ break; case SMC_PROCESSABORT: case SMC_APPFINCLOSEWAIT: sk->sk_state = SMC_PEERABORTWAIT; smc_close_cancel_work(smc); if (sk->sk_state != SMC_PEERABORTWAIT) break; sk->sk_state = SMC_CLOSED; smc_conn_free(&smc->conn); release_clcsock = true; break; case SMC_INIT: case SMC_PEERABORTWAIT: case SMC_CLOSED: break; } sock_set_flag(sk, SOCK_DEAD); sk->sk_state_change(sk); if (release_clcsock) { release_sock(sk); smc_clcsock_release(smc); lock_sock(sk); } } static inline bool smc_close_sent_any_close(struct smc_connection *conn) { return conn->local_tx_ctrl.conn_state_flags.peer_conn_abort || conn->local_tx_ctrl.conn_state_flags.peer_conn_closed; } int smc_close_active(struct smc_sock *smc) { struct smc_cdc_conn_state_flags *txflags = &smc->conn.local_tx_ctrl.conn_state_flags; struct smc_connection *conn = &smc->conn; struct sock *sk = &smc->sk; int old_state; long timeout; int rc = 0; int rc1 = 0; timeout = current->flags & PF_EXITING ? 0 : sock_flag(sk, SOCK_LINGER) ? sk->sk_lingertime : SMC_MAX_STREAM_WAIT_TIMEOUT; old_state = sk->sk_state; again: switch (sk->sk_state) { case SMC_INIT: sk->sk_state = SMC_CLOSED; break; case SMC_LISTEN: sk->sk_state = SMC_CLOSED; sk->sk_state_change(sk); /* wake up accept */ if (smc->clcsock && smc->clcsock->sk) { write_lock_bh(&smc->clcsock->sk->sk_callback_lock); smc_clcsock_restore_cb(&smc->clcsock->sk->sk_data_ready, &smc->clcsk_data_ready); smc->clcsock->sk->sk_user_data = NULL; write_unlock_bh(&smc->clcsock->sk->sk_callback_lock); rc = kernel_sock_shutdown(smc->clcsock, SHUT_RDWR); } smc_close_cleanup_listen(sk); release_sock(sk); flush_work(&smc->tcp_listen_work); lock_sock(sk); break; case SMC_ACTIVE: smc_close_stream_wait(smc, timeout); release_sock(sk); cancel_delayed_work_sync(&conn->tx_work); lock_sock(sk); if (sk->sk_state == SMC_ACTIVE) { /* send close request */ rc = smc_close_final(conn); sk->sk_state = SMC_PEERCLOSEWAIT1; /* actively shutdown clcsock before peer close it, * prevent peer from entering TIME_WAIT state. */ if (smc->clcsock && smc->clcsock->sk) { rc1 = kernel_sock_shutdown(smc->clcsock, SHUT_RDWR); rc = rc ? rc : rc1; } } else { /* peer event has changed the state */ goto again; } break; case SMC_APPFINCLOSEWAIT: /* socket already shutdown wr or both (active close) */ if (txflags->peer_done_writing && !smc_close_sent_any_close(conn)) { /* just shutdown wr done, send close request */ rc = smc_close_final(conn); } sk->sk_state = SMC_CLOSED; break; case SMC_APPCLOSEWAIT1: case SMC_APPCLOSEWAIT2: if (!smc_cdc_rxed_any_close(conn)) smc_close_stream_wait(smc, timeout); release_sock(sk); cancel_delayed_work_sync(&conn->tx_work); lock_sock(sk); if (sk->sk_state != SMC_APPCLOSEWAIT1 && sk->sk_state != SMC_APPCLOSEWAIT2) goto again; /* confirm close from peer */ rc = smc_close_final(conn); if (smc_cdc_rxed_any_close(conn)) { /* peer has closed the socket already */ sk->sk_state = SMC_CLOSED; sock_put(sk); /* postponed passive closing */ } else { /* peer has just issued a shutdown write */ sk->sk_state = SMC_PEERFINCLOSEWAIT; } break; case SMC_PEERCLOSEWAIT1: case SMC_PEERCLOSEWAIT2: if (txflags->peer_done_writing && !smc_close_sent_any_close(conn)) { /* just shutdown wr done, send close request */ rc = smc_close_final(conn); } /* peer sending PeerConnectionClosed will cause transition */ break; case SMC_PEERFINCLOSEWAIT: /* peer sending PeerConnectionClosed will cause transition */ break; case SMC_PROCESSABORT: rc = smc_close_abort(conn); sk->sk_state = SMC_CLOSED; break; case SMC_PEERABORTWAIT: sk->sk_state = SMC_CLOSED; break; case SMC_CLOSED: /* nothing to do, add tracing in future patch */ break; } if (old_state != sk->sk_state) sk->sk_state_change(sk); return rc; } static void smc_close_passive_abort_received(struct smc_sock *smc) { struct smc_cdc_conn_state_flags *txflags = &smc->conn.local_tx_ctrl.conn_state_flags; struct sock *sk = &smc->sk; switch (sk->sk_state) { case SMC_INIT: case SMC_ACTIVE: case SMC_APPCLOSEWAIT1: sk->sk_state = SMC_PROCESSABORT; sock_put(sk); /* passive closing */ break; case SMC_APPFINCLOSEWAIT: sk->sk_state = SMC_PROCESSABORT; break; case SMC_PEERCLOSEWAIT1: case SMC_PEERCLOSEWAIT2: if (txflags->peer_done_writing && !smc_close_sent_any_close(&smc->conn)) /* just shutdown, but not yet closed locally */ sk->sk_state = SMC_PROCESSABORT; else sk->sk_state = SMC_CLOSED; sock_put(sk); /* passive closing */ break; case SMC_APPCLOSEWAIT2: case SMC_PEERFINCLOSEWAIT: sk->sk_state = SMC_CLOSED; sock_put(sk); /* passive closing */ break; case SMC_PEERABORTWAIT: sk->sk_state = SMC_CLOSED; break; case SMC_PROCESSABORT: /* nothing to do, add tracing in future patch */ break; } } /* Either some kind of closing has been received: peer_conn_closed, * peer_conn_abort, or peer_done_writing * or the link group of the connection terminates abnormally. */ static void smc_close_passive_work(struct work_struct *work) { struct smc_connection *conn = container_of(work, struct smc_connection, close_work); struct smc_sock *smc = container_of(conn, struct smc_sock, conn); struct smc_cdc_conn_state_flags *rxflags; bool release_clcsock = false; struct sock *sk = &smc->sk; int old_state; lock_sock(sk); old_state = sk->sk_state; rxflags = &conn->local_rx_ctrl.conn_state_flags; if (rxflags->peer_conn_abort) { /* peer has not received all data */ smc_close_passive_abort_received(smc); release_sock(sk); cancel_delayed_work_sync(&conn->tx_work); lock_sock(sk); goto wakeup; } switch (sk->sk_state) { case SMC_INIT: sk->sk_state = SMC_APPCLOSEWAIT1; break; case SMC_ACTIVE: sk->sk_state = SMC_APPCLOSEWAIT1; /* postpone sock_put() for passive closing to cover * received SEND_SHUTDOWN as well */ break; case SMC_PEERCLOSEWAIT1: if (rxflags->peer_done_writing) sk->sk_state = SMC_PEERCLOSEWAIT2; fallthrough; /* to check for closing */ case SMC_PEERCLOSEWAIT2: if (!smc_cdc_rxed_any_close(conn)) break; if (sock_flag(sk, SOCK_DEAD) && smc_close_sent_any_close(conn)) { /* smc_release has already been called locally */ sk->sk_state = SMC_CLOSED; } else { /* just shutdown, but not yet closed locally */ sk->sk_state = SMC_APPFINCLOSEWAIT; } sock_put(sk); /* passive closing */ break; case SMC_PEERFINCLOSEWAIT: if (smc_cdc_rxed_any_close(conn)) { sk->sk_state = SMC_CLOSED; sock_put(sk); /* passive closing */ } break; case SMC_APPCLOSEWAIT1: case SMC_APPCLOSEWAIT2: /* postpone sock_put() for passive closing to cover * received SEND_SHUTDOWN as well */ break; case SMC_APPFINCLOSEWAIT: case SMC_PEERABORTWAIT: case SMC_PROCESSABORT: case SMC_CLOSED: /* nothing to do, add tracing in future patch */ break; } wakeup: sk->sk_data_ready(sk); /* wakeup blocked rcvbuf consumers */ sk->sk_write_space(sk); /* wakeup blocked sndbuf producers */ if (old_state != sk->sk_state) { sk->sk_state_change(sk); if ((sk->sk_state == SMC_CLOSED) && (sock_flag(sk, SOCK_DEAD) || !sk->sk_socket)) { smc_conn_free(conn); if (smc->clcsock) release_clcsock = true; } } release_sock(sk); if (release_clcsock) smc_clcsock_release(smc); sock_put(sk); /* sock_hold done by schedulers of close_work */ } int smc_close_shutdown_write(struct smc_sock *smc) { struct smc_connection *conn = &smc->conn; struct sock *sk = &smc->sk; int old_state; long timeout; int rc = 0; timeout = current->flags & PF_EXITING ? 0 : sock_flag(sk, SOCK_LINGER) ? sk->sk_lingertime : SMC_MAX_STREAM_WAIT_TIMEOUT; old_state = sk->sk_state; again: switch (sk->sk_state) { case SMC_ACTIVE: smc_close_stream_wait(smc, timeout); release_sock(sk); cancel_delayed_work_sync(&conn->tx_work); lock_sock(sk); if (sk->sk_state != SMC_ACTIVE) goto again; /* send close wr request */ rc = smc_close_wr(conn); sk->sk_state = SMC_PEERCLOSEWAIT1; break; case SMC_APPCLOSEWAIT1: /* passive close */ if (!smc_cdc_rxed_any_close(conn)) smc_close_stream_wait(smc, timeout); release_sock(sk); cancel_delayed_work_sync(&conn->tx_work); lock_sock(sk); if (sk->sk_state != SMC_APPCLOSEWAIT1) goto again; /* confirm close from peer */ rc = smc_close_wr(conn); sk->sk_state = SMC_APPCLOSEWAIT2; break; case SMC_APPCLOSEWAIT2: case SMC_PEERFINCLOSEWAIT: case SMC_PEERCLOSEWAIT1: case SMC_PEERCLOSEWAIT2: case SMC_APPFINCLOSEWAIT: case SMC_PROCESSABORT: case SMC_PEERABORTWAIT: /* nothing to do, add tracing in future patch */ break; } if (old_state != sk->sk_state) sk->sk_state_change(sk); return rc; } /* Initialize close properties on connection establishment. */ void smc_close_init(struct smc_sock *smc) { INIT_WORK(&smc->conn.close_work, smc_close_passive_work); }
linux-master
net/smc/smc_close.c
// SPDX-License-Identifier: GPL-2.0 /* * Shared Memory Communications over RDMA (SMC-R) and RoCE * * Work Requests exploiting Infiniband API * * Work requests (WR) of type ib_post_send or ib_post_recv respectively * are submitted to either RC SQ or RC RQ respectively * (reliably connected send/receive queue) * and become work queue entries (WQEs). * While an SQ WR/WQE is pending, we track it until transmission completion. * Through a send or receive completion queue (CQ) respectively, * we get completion queue entries (CQEs) [aka work completions (WCs)]. * Since the CQ callback is called from IRQ context, we split work by using * bottom halves implemented by tasklets. * * SMC uses this to exchange LLC (link layer control) * and CDC (connection data control) messages. * * Copyright IBM Corp. 2016 * * Author(s): Steffen Maier <[email protected]> */ #include <linux/atomic.h> #include <linux/hashtable.h> #include <linux/wait.h> #include <rdma/ib_verbs.h> #include <asm/div64.h> #include "smc.h" #include "smc_wr.h" #define SMC_WR_MAX_POLL_CQE 10 /* max. # of compl. queue elements in 1 poll */ #define SMC_WR_RX_HASH_BITS 4 static DEFINE_HASHTABLE(smc_wr_rx_hash, SMC_WR_RX_HASH_BITS); static DEFINE_SPINLOCK(smc_wr_rx_hash_lock); struct smc_wr_tx_pend { /* control data for a pending send request */ u64 wr_id; /* work request id sent */ smc_wr_tx_handler handler; enum ib_wc_status wc_status; /* CQE status */ struct smc_link *link; u32 idx; struct smc_wr_tx_pend_priv priv; u8 compl_requested; }; /******************************** send queue *********************************/ /*------------------------------- completion --------------------------------*/ /* returns true if at least one tx work request is pending on the given link */ static inline bool smc_wr_is_tx_pend(struct smc_link *link) { return !bitmap_empty(link->wr_tx_mask, link->wr_tx_cnt); } /* wait till all pending tx work requests on the given link are completed */ void smc_wr_tx_wait_no_pending_sends(struct smc_link *link) { wait_event(link->wr_tx_wait, !smc_wr_is_tx_pend(link)); } static inline int smc_wr_tx_find_pending_index(struct smc_link *link, u64 wr_id) { u32 i; for (i = 0; i < link->wr_tx_cnt; i++) { if (link->wr_tx_pends[i].wr_id == wr_id) return i; } return link->wr_tx_cnt; } static inline void smc_wr_tx_process_cqe(struct ib_wc *wc) { struct smc_wr_tx_pend pnd_snd; struct smc_link *link; u32 pnd_snd_idx; link = wc->qp->qp_context; if (wc->opcode == IB_WC_REG_MR) { if (wc->status) link->wr_reg_state = FAILED; else link->wr_reg_state = CONFIRMED; smc_wr_wakeup_reg_wait(link); return; } pnd_snd_idx = smc_wr_tx_find_pending_index(link, wc->wr_id); if (pnd_snd_idx == link->wr_tx_cnt) { if (link->lgr->smc_version != SMC_V2 || link->wr_tx_v2_pend->wr_id != wc->wr_id) return; link->wr_tx_v2_pend->wc_status = wc->status; memcpy(&pnd_snd, link->wr_tx_v2_pend, sizeof(pnd_snd)); /* clear the full struct smc_wr_tx_pend including .priv */ memset(link->wr_tx_v2_pend, 0, sizeof(*link->wr_tx_v2_pend)); memset(link->lgr->wr_tx_buf_v2, 0, sizeof(*link->lgr->wr_tx_buf_v2)); } else { link->wr_tx_pends[pnd_snd_idx].wc_status = wc->status; if (link->wr_tx_pends[pnd_snd_idx].compl_requested) complete(&link->wr_tx_compl[pnd_snd_idx]); memcpy(&pnd_snd, &link->wr_tx_pends[pnd_snd_idx], sizeof(pnd_snd)); /* clear the full struct smc_wr_tx_pend including .priv */ memset(&link->wr_tx_pends[pnd_snd_idx], 0, sizeof(link->wr_tx_pends[pnd_snd_idx])); memset(&link->wr_tx_bufs[pnd_snd_idx], 0, sizeof(link->wr_tx_bufs[pnd_snd_idx])); if (!test_and_clear_bit(pnd_snd_idx, link->wr_tx_mask)) return; } if (wc->status) { if (link->lgr->smc_version == SMC_V2) { memset(link->wr_tx_v2_pend, 0, sizeof(*link->wr_tx_v2_pend)); memset(link->lgr->wr_tx_buf_v2, 0, sizeof(*link->lgr->wr_tx_buf_v2)); } /* terminate link */ smcr_link_down_cond_sched(link); } if (pnd_snd.handler) pnd_snd.handler(&pnd_snd.priv, link, wc->status); wake_up(&link->wr_tx_wait); } static void smc_wr_tx_tasklet_fn(struct tasklet_struct *t) { struct smc_ib_device *dev = from_tasklet(dev, t, send_tasklet); struct ib_wc wc[SMC_WR_MAX_POLL_CQE]; int i = 0, rc; int polled = 0; again: polled++; do { memset(&wc, 0, sizeof(wc)); rc = ib_poll_cq(dev->roce_cq_send, SMC_WR_MAX_POLL_CQE, wc); if (polled == 1) { ib_req_notify_cq(dev->roce_cq_send, IB_CQ_NEXT_COMP | IB_CQ_REPORT_MISSED_EVENTS); } if (!rc) break; for (i = 0; i < rc; i++) smc_wr_tx_process_cqe(&wc[i]); } while (rc > 0); if (polled == 1) goto again; } void smc_wr_tx_cq_handler(struct ib_cq *ib_cq, void *cq_context) { struct smc_ib_device *dev = (struct smc_ib_device *)cq_context; tasklet_schedule(&dev->send_tasklet); } /*---------------------------- request submission ---------------------------*/ static inline int smc_wr_tx_get_free_slot_index(struct smc_link *link, u32 *idx) { *idx = link->wr_tx_cnt; if (!smc_link_sendable(link)) return -ENOLINK; for_each_clear_bit(*idx, link->wr_tx_mask, link->wr_tx_cnt) { if (!test_and_set_bit(*idx, link->wr_tx_mask)) return 0; } *idx = link->wr_tx_cnt; return -EBUSY; } /** * smc_wr_tx_get_free_slot() - returns buffer for message assembly, * and sets info for pending transmit tracking * @link: Pointer to smc_link used to later send the message. * @handler: Send completion handler function pointer. * @wr_buf: Out value returns pointer to message buffer. * @wr_rdma_buf: Out value returns pointer to rdma work request. * @wr_pend_priv: Out value returns pointer serving as handler context. * * Return: 0 on success, or -errno on error. */ int smc_wr_tx_get_free_slot(struct smc_link *link, smc_wr_tx_handler handler, struct smc_wr_buf **wr_buf, struct smc_rdma_wr **wr_rdma_buf, struct smc_wr_tx_pend_priv **wr_pend_priv) { struct smc_link_group *lgr = smc_get_lgr(link); struct smc_wr_tx_pend *wr_pend; u32 idx = link->wr_tx_cnt; struct ib_send_wr *wr_ib; u64 wr_id; int rc; *wr_buf = NULL; *wr_pend_priv = NULL; if (in_softirq() || lgr->terminating) { rc = smc_wr_tx_get_free_slot_index(link, &idx); if (rc) return rc; } else { rc = wait_event_interruptible_timeout( link->wr_tx_wait, !smc_link_sendable(link) || lgr->terminating || (smc_wr_tx_get_free_slot_index(link, &idx) != -EBUSY), SMC_WR_TX_WAIT_FREE_SLOT_TIME); if (!rc) { /* timeout - terminate link */ smcr_link_down_cond_sched(link); return -EPIPE; } if (idx == link->wr_tx_cnt) return -EPIPE; } wr_id = smc_wr_tx_get_next_wr_id(link); wr_pend = &link->wr_tx_pends[idx]; wr_pend->wr_id = wr_id; wr_pend->handler = handler; wr_pend->link = link; wr_pend->idx = idx; wr_ib = &link->wr_tx_ibs[idx]; wr_ib->wr_id = wr_id; *wr_buf = &link->wr_tx_bufs[idx]; if (wr_rdma_buf) *wr_rdma_buf = &link->wr_tx_rdmas[idx]; *wr_pend_priv = &wr_pend->priv; return 0; } int smc_wr_tx_get_v2_slot(struct smc_link *link, smc_wr_tx_handler handler, struct smc_wr_v2_buf **wr_buf, struct smc_wr_tx_pend_priv **wr_pend_priv) { struct smc_wr_tx_pend *wr_pend; struct ib_send_wr *wr_ib; u64 wr_id; if (link->wr_tx_v2_pend->idx == link->wr_tx_cnt) return -EBUSY; *wr_buf = NULL; *wr_pend_priv = NULL; wr_id = smc_wr_tx_get_next_wr_id(link); wr_pend = link->wr_tx_v2_pend; wr_pend->wr_id = wr_id; wr_pend->handler = handler; wr_pend->link = link; wr_pend->idx = link->wr_tx_cnt; wr_ib = link->wr_tx_v2_ib; wr_ib->wr_id = wr_id; *wr_buf = link->lgr->wr_tx_buf_v2; *wr_pend_priv = &wr_pend->priv; return 0; } int smc_wr_tx_put_slot(struct smc_link *link, struct smc_wr_tx_pend_priv *wr_pend_priv) { struct smc_wr_tx_pend *pend; pend = container_of(wr_pend_priv, struct smc_wr_tx_pend, priv); if (pend->idx < link->wr_tx_cnt) { u32 idx = pend->idx; /* clear the full struct smc_wr_tx_pend including .priv */ memset(&link->wr_tx_pends[idx], 0, sizeof(link->wr_tx_pends[idx])); memset(&link->wr_tx_bufs[idx], 0, sizeof(link->wr_tx_bufs[idx])); test_and_clear_bit(idx, link->wr_tx_mask); wake_up(&link->wr_tx_wait); return 1; } else if (link->lgr->smc_version == SMC_V2 && pend->idx == link->wr_tx_cnt) { /* Large v2 buffer */ memset(&link->wr_tx_v2_pend, 0, sizeof(link->wr_tx_v2_pend)); memset(&link->lgr->wr_tx_buf_v2, 0, sizeof(link->lgr->wr_tx_buf_v2)); return 1; } return 0; } /* Send prepared WR slot via ib_post_send. * @priv: pointer to smc_wr_tx_pend_priv identifying prepared message buffer */ int smc_wr_tx_send(struct smc_link *link, struct smc_wr_tx_pend_priv *priv) { struct smc_wr_tx_pend *pend; int rc; ib_req_notify_cq(link->smcibdev->roce_cq_send, IB_CQ_NEXT_COMP | IB_CQ_REPORT_MISSED_EVENTS); pend = container_of(priv, struct smc_wr_tx_pend, priv); rc = ib_post_send(link->roce_qp, &link->wr_tx_ibs[pend->idx], NULL); if (rc) { smc_wr_tx_put_slot(link, priv); smcr_link_down_cond_sched(link); } return rc; } int smc_wr_tx_v2_send(struct smc_link *link, struct smc_wr_tx_pend_priv *priv, int len) { int rc; link->wr_tx_v2_ib->sg_list[0].length = len; ib_req_notify_cq(link->smcibdev->roce_cq_send, IB_CQ_NEXT_COMP | IB_CQ_REPORT_MISSED_EVENTS); rc = ib_post_send(link->roce_qp, link->wr_tx_v2_ib, NULL); if (rc) { smc_wr_tx_put_slot(link, priv); smcr_link_down_cond_sched(link); } return rc; } /* Send prepared WR slot via ib_post_send and wait for send completion * notification. * @priv: pointer to smc_wr_tx_pend_priv identifying prepared message buffer */ int smc_wr_tx_send_wait(struct smc_link *link, struct smc_wr_tx_pend_priv *priv, unsigned long timeout) { struct smc_wr_tx_pend *pend; u32 pnd_idx; int rc; pend = container_of(priv, struct smc_wr_tx_pend, priv); pend->compl_requested = 1; pnd_idx = pend->idx; init_completion(&link->wr_tx_compl[pnd_idx]); rc = smc_wr_tx_send(link, priv); if (rc) return rc; /* wait for completion by smc_wr_tx_process_cqe() */ rc = wait_for_completion_interruptible_timeout( &link->wr_tx_compl[pnd_idx], timeout); if (rc <= 0) rc = -ENODATA; if (rc > 0) rc = 0; return rc; } /* Register a memory region and wait for result. */ int smc_wr_reg_send(struct smc_link *link, struct ib_mr *mr) { int rc; ib_req_notify_cq(link->smcibdev->roce_cq_send, IB_CQ_NEXT_COMP | IB_CQ_REPORT_MISSED_EVENTS); link->wr_reg_state = POSTED; link->wr_reg.wr.wr_id = (u64)(uintptr_t)mr; link->wr_reg.mr = mr; link->wr_reg.key = mr->rkey; rc = ib_post_send(link->roce_qp, &link->wr_reg.wr, NULL); if (rc) return rc; percpu_ref_get(&link->wr_reg_refs); rc = wait_event_interruptible_timeout(link->wr_reg_wait, (link->wr_reg_state != POSTED), SMC_WR_REG_MR_WAIT_TIME); percpu_ref_put(&link->wr_reg_refs); if (!rc) { /* timeout - terminate link */ smcr_link_down_cond_sched(link); return -EPIPE; } if (rc == -ERESTARTSYS) return -EINTR; switch (link->wr_reg_state) { case CONFIRMED: rc = 0; break; case FAILED: rc = -EIO; break; case POSTED: rc = -EPIPE; break; } return rc; } /****************************** receive queue ********************************/ int smc_wr_rx_register_handler(struct smc_wr_rx_handler *handler) { struct smc_wr_rx_handler *h_iter; int rc = 0; spin_lock(&smc_wr_rx_hash_lock); hash_for_each_possible(smc_wr_rx_hash, h_iter, list, handler->type) { if (h_iter->type == handler->type) { rc = -EEXIST; goto out_unlock; } } hash_add(smc_wr_rx_hash, &handler->list, handler->type); out_unlock: spin_unlock(&smc_wr_rx_hash_lock); return rc; } /* Demultiplex a received work request based on the message type to its handler. * Relies on smc_wr_rx_hash having been completely filled before any IB WRs, * and not being modified any more afterwards so we don't need to lock it. */ static inline void smc_wr_rx_demultiplex(struct ib_wc *wc) { struct smc_link *link = (struct smc_link *)wc->qp->qp_context; struct smc_wr_rx_handler *handler; struct smc_wr_rx_hdr *wr_rx; u64 temp_wr_id; u32 index; if (wc->byte_len < sizeof(*wr_rx)) return; /* short message */ temp_wr_id = wc->wr_id; index = do_div(temp_wr_id, link->wr_rx_cnt); wr_rx = (struct smc_wr_rx_hdr *)&link->wr_rx_bufs[index]; hash_for_each_possible(smc_wr_rx_hash, handler, list, wr_rx->type) { if (handler->type == wr_rx->type) handler->handler(wc, wr_rx); } } static inline void smc_wr_rx_process_cqes(struct ib_wc wc[], int num) { struct smc_link *link; int i; for (i = 0; i < num; i++) { link = wc[i].qp->qp_context; link->wr_rx_id_compl = wc[i].wr_id; if (wc[i].status == IB_WC_SUCCESS) { link->wr_rx_tstamp = jiffies; smc_wr_rx_demultiplex(&wc[i]); smc_wr_rx_post(link); /* refill WR RX */ } else { /* handle status errors */ switch (wc[i].status) { case IB_WC_RETRY_EXC_ERR: case IB_WC_RNR_RETRY_EXC_ERR: case IB_WC_WR_FLUSH_ERR: smcr_link_down_cond_sched(link); if (link->wr_rx_id_compl == link->wr_rx_id) wake_up(&link->wr_rx_empty_wait); break; default: smc_wr_rx_post(link); /* refill WR RX */ break; } } } } static void smc_wr_rx_tasklet_fn(struct tasklet_struct *t) { struct smc_ib_device *dev = from_tasklet(dev, t, recv_tasklet); struct ib_wc wc[SMC_WR_MAX_POLL_CQE]; int polled = 0; int rc; again: polled++; do { memset(&wc, 0, sizeof(wc)); rc = ib_poll_cq(dev->roce_cq_recv, SMC_WR_MAX_POLL_CQE, wc); if (polled == 1) { ib_req_notify_cq(dev->roce_cq_recv, IB_CQ_SOLICITED_MASK | IB_CQ_REPORT_MISSED_EVENTS); } if (!rc) break; smc_wr_rx_process_cqes(&wc[0], rc); } while (rc > 0); if (polled == 1) goto again; } void smc_wr_rx_cq_handler(struct ib_cq *ib_cq, void *cq_context) { struct smc_ib_device *dev = (struct smc_ib_device *)cq_context; tasklet_schedule(&dev->recv_tasklet); } int smc_wr_rx_post_init(struct smc_link *link) { u32 i; int rc = 0; for (i = 0; i < link->wr_rx_cnt; i++) rc = smc_wr_rx_post(link); return rc; } /***************************** init, exit, misc ******************************/ void smc_wr_remember_qp_attr(struct smc_link *lnk) { struct ib_qp_attr *attr = &lnk->qp_attr; struct ib_qp_init_attr init_attr; memset(attr, 0, sizeof(*attr)); memset(&init_attr, 0, sizeof(init_attr)); ib_query_qp(lnk->roce_qp, attr, IB_QP_STATE | IB_QP_CUR_STATE | IB_QP_PKEY_INDEX | IB_QP_PORT | IB_QP_QKEY | IB_QP_AV | IB_QP_PATH_MTU | IB_QP_TIMEOUT | IB_QP_RETRY_CNT | IB_QP_RNR_RETRY | IB_QP_RQ_PSN | IB_QP_ALT_PATH | IB_QP_MIN_RNR_TIMER | IB_QP_SQ_PSN | IB_QP_PATH_MIG_STATE | IB_QP_CAP | IB_QP_DEST_QPN, &init_attr); lnk->wr_tx_cnt = min_t(size_t, SMC_WR_BUF_CNT, lnk->qp_attr.cap.max_send_wr); lnk->wr_rx_cnt = min_t(size_t, SMC_WR_BUF_CNT * 3, lnk->qp_attr.cap.max_recv_wr); } static void smc_wr_init_sge(struct smc_link *lnk) { int sges_per_buf = (lnk->lgr->smc_version == SMC_V2) ? 2 : 1; bool send_inline = (lnk->qp_attr.cap.max_inline_data > SMC_WR_TX_SIZE); u32 i; for (i = 0; i < lnk->wr_tx_cnt; i++) { lnk->wr_tx_sges[i].addr = send_inline ? (uintptr_t)(&lnk->wr_tx_bufs[i]) : lnk->wr_tx_dma_addr + i * SMC_WR_BUF_SIZE; lnk->wr_tx_sges[i].length = SMC_WR_TX_SIZE; lnk->wr_tx_sges[i].lkey = lnk->roce_pd->local_dma_lkey; lnk->wr_tx_rdma_sges[i].tx_rdma_sge[0].wr_tx_rdma_sge[0].lkey = lnk->roce_pd->local_dma_lkey; lnk->wr_tx_rdma_sges[i].tx_rdma_sge[0].wr_tx_rdma_sge[1].lkey = lnk->roce_pd->local_dma_lkey; lnk->wr_tx_rdma_sges[i].tx_rdma_sge[1].wr_tx_rdma_sge[0].lkey = lnk->roce_pd->local_dma_lkey; lnk->wr_tx_rdma_sges[i].tx_rdma_sge[1].wr_tx_rdma_sge[1].lkey = lnk->roce_pd->local_dma_lkey; lnk->wr_tx_ibs[i].next = NULL; lnk->wr_tx_ibs[i].sg_list = &lnk->wr_tx_sges[i]; lnk->wr_tx_ibs[i].num_sge = 1; lnk->wr_tx_ibs[i].opcode = IB_WR_SEND; lnk->wr_tx_ibs[i].send_flags = IB_SEND_SIGNALED | IB_SEND_SOLICITED; if (send_inline) lnk->wr_tx_ibs[i].send_flags |= IB_SEND_INLINE; lnk->wr_tx_rdmas[i].wr_tx_rdma[0].wr.opcode = IB_WR_RDMA_WRITE; lnk->wr_tx_rdmas[i].wr_tx_rdma[1].wr.opcode = IB_WR_RDMA_WRITE; lnk->wr_tx_rdmas[i].wr_tx_rdma[0].wr.sg_list = lnk->wr_tx_rdma_sges[i].tx_rdma_sge[0].wr_tx_rdma_sge; lnk->wr_tx_rdmas[i].wr_tx_rdma[1].wr.sg_list = lnk->wr_tx_rdma_sges[i].tx_rdma_sge[1].wr_tx_rdma_sge; } if (lnk->lgr->smc_version == SMC_V2) { lnk->wr_tx_v2_sge->addr = lnk->wr_tx_v2_dma_addr; lnk->wr_tx_v2_sge->length = SMC_WR_BUF_V2_SIZE; lnk->wr_tx_v2_sge->lkey = lnk->roce_pd->local_dma_lkey; lnk->wr_tx_v2_ib->next = NULL; lnk->wr_tx_v2_ib->sg_list = lnk->wr_tx_v2_sge; lnk->wr_tx_v2_ib->num_sge = 1; lnk->wr_tx_v2_ib->opcode = IB_WR_SEND; lnk->wr_tx_v2_ib->send_flags = IB_SEND_SIGNALED | IB_SEND_SOLICITED; } /* With SMC-Rv2 there can be messages larger than SMC_WR_TX_SIZE. * Each ib_recv_wr gets 2 sges, the second one is a spillover buffer * and the same buffer for all sges. When a larger message arrived then * the content of the first small sge is copied to the beginning of * the larger spillover buffer, allowing easy data mapping. */ for (i = 0; i < lnk->wr_rx_cnt; i++) { int x = i * sges_per_buf; lnk->wr_rx_sges[x].addr = lnk->wr_rx_dma_addr + i * SMC_WR_BUF_SIZE; lnk->wr_rx_sges[x].length = SMC_WR_TX_SIZE; lnk->wr_rx_sges[x].lkey = lnk->roce_pd->local_dma_lkey; if (lnk->lgr->smc_version == SMC_V2) { lnk->wr_rx_sges[x + 1].addr = lnk->wr_rx_v2_dma_addr + SMC_WR_TX_SIZE; lnk->wr_rx_sges[x + 1].length = SMC_WR_BUF_V2_SIZE - SMC_WR_TX_SIZE; lnk->wr_rx_sges[x + 1].lkey = lnk->roce_pd->local_dma_lkey; } lnk->wr_rx_ibs[i].next = NULL; lnk->wr_rx_ibs[i].sg_list = &lnk->wr_rx_sges[x]; lnk->wr_rx_ibs[i].num_sge = sges_per_buf; } lnk->wr_reg.wr.next = NULL; lnk->wr_reg.wr.num_sge = 0; lnk->wr_reg.wr.send_flags = IB_SEND_SIGNALED; lnk->wr_reg.wr.opcode = IB_WR_REG_MR; lnk->wr_reg.access = IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE; } void smc_wr_free_link(struct smc_link *lnk) { struct ib_device *ibdev; if (!lnk->smcibdev) return; ibdev = lnk->smcibdev->ibdev; smc_wr_drain_cq(lnk); smc_wr_wakeup_reg_wait(lnk); smc_wr_wakeup_tx_wait(lnk); smc_wr_tx_wait_no_pending_sends(lnk); percpu_ref_kill(&lnk->wr_reg_refs); wait_for_completion(&lnk->reg_ref_comp); percpu_ref_kill(&lnk->wr_tx_refs); wait_for_completion(&lnk->tx_ref_comp); if (lnk->wr_rx_dma_addr) { ib_dma_unmap_single(ibdev, lnk->wr_rx_dma_addr, SMC_WR_BUF_SIZE * lnk->wr_rx_cnt, DMA_FROM_DEVICE); lnk->wr_rx_dma_addr = 0; } if (lnk->wr_rx_v2_dma_addr) { ib_dma_unmap_single(ibdev, lnk->wr_rx_v2_dma_addr, SMC_WR_BUF_V2_SIZE, DMA_FROM_DEVICE); lnk->wr_rx_v2_dma_addr = 0; } if (lnk->wr_tx_dma_addr) { ib_dma_unmap_single(ibdev, lnk->wr_tx_dma_addr, SMC_WR_BUF_SIZE * lnk->wr_tx_cnt, DMA_TO_DEVICE); lnk->wr_tx_dma_addr = 0; } if (lnk->wr_tx_v2_dma_addr) { ib_dma_unmap_single(ibdev, lnk->wr_tx_v2_dma_addr, SMC_WR_BUF_V2_SIZE, DMA_TO_DEVICE); lnk->wr_tx_v2_dma_addr = 0; } } void smc_wr_free_lgr_mem(struct smc_link_group *lgr) { if (lgr->smc_version < SMC_V2) return; kfree(lgr->wr_rx_buf_v2); lgr->wr_rx_buf_v2 = NULL; kfree(lgr->wr_tx_buf_v2); lgr->wr_tx_buf_v2 = NULL; } void smc_wr_free_link_mem(struct smc_link *lnk) { kfree(lnk->wr_tx_v2_ib); lnk->wr_tx_v2_ib = NULL; kfree(lnk->wr_tx_v2_sge); lnk->wr_tx_v2_sge = NULL; kfree(lnk->wr_tx_v2_pend); lnk->wr_tx_v2_pend = NULL; kfree(lnk->wr_tx_compl); lnk->wr_tx_compl = NULL; kfree(lnk->wr_tx_pends); lnk->wr_tx_pends = NULL; bitmap_free(lnk->wr_tx_mask); lnk->wr_tx_mask = NULL; kfree(lnk->wr_tx_sges); lnk->wr_tx_sges = NULL; kfree(lnk->wr_tx_rdma_sges); lnk->wr_tx_rdma_sges = NULL; kfree(lnk->wr_rx_sges); lnk->wr_rx_sges = NULL; kfree(lnk->wr_tx_rdmas); lnk->wr_tx_rdmas = NULL; kfree(lnk->wr_rx_ibs); lnk->wr_rx_ibs = NULL; kfree(lnk->wr_tx_ibs); lnk->wr_tx_ibs = NULL; kfree(lnk->wr_tx_bufs); lnk->wr_tx_bufs = NULL; kfree(lnk->wr_rx_bufs); lnk->wr_rx_bufs = NULL; } int smc_wr_alloc_lgr_mem(struct smc_link_group *lgr) { if (lgr->smc_version < SMC_V2) return 0; lgr->wr_rx_buf_v2 = kzalloc(SMC_WR_BUF_V2_SIZE, GFP_KERNEL); if (!lgr->wr_rx_buf_v2) return -ENOMEM; lgr->wr_tx_buf_v2 = kzalloc(SMC_WR_BUF_V2_SIZE, GFP_KERNEL); if (!lgr->wr_tx_buf_v2) { kfree(lgr->wr_rx_buf_v2); return -ENOMEM; } return 0; } int smc_wr_alloc_link_mem(struct smc_link *link) { int sges_per_buf = link->lgr->smc_version == SMC_V2 ? 2 : 1; /* allocate link related memory */ link->wr_tx_bufs = kcalloc(SMC_WR_BUF_CNT, SMC_WR_BUF_SIZE, GFP_KERNEL); if (!link->wr_tx_bufs) goto no_mem; link->wr_rx_bufs = kcalloc(SMC_WR_BUF_CNT * 3, SMC_WR_BUF_SIZE, GFP_KERNEL); if (!link->wr_rx_bufs) goto no_mem_wr_tx_bufs; link->wr_tx_ibs = kcalloc(SMC_WR_BUF_CNT, sizeof(link->wr_tx_ibs[0]), GFP_KERNEL); if (!link->wr_tx_ibs) goto no_mem_wr_rx_bufs; link->wr_rx_ibs = kcalloc(SMC_WR_BUF_CNT * 3, sizeof(link->wr_rx_ibs[0]), GFP_KERNEL); if (!link->wr_rx_ibs) goto no_mem_wr_tx_ibs; link->wr_tx_rdmas = kcalloc(SMC_WR_BUF_CNT, sizeof(link->wr_tx_rdmas[0]), GFP_KERNEL); if (!link->wr_tx_rdmas) goto no_mem_wr_rx_ibs; link->wr_tx_rdma_sges = kcalloc(SMC_WR_BUF_CNT, sizeof(link->wr_tx_rdma_sges[0]), GFP_KERNEL); if (!link->wr_tx_rdma_sges) goto no_mem_wr_tx_rdmas; link->wr_tx_sges = kcalloc(SMC_WR_BUF_CNT, sizeof(link->wr_tx_sges[0]), GFP_KERNEL); if (!link->wr_tx_sges) goto no_mem_wr_tx_rdma_sges; link->wr_rx_sges = kcalloc(SMC_WR_BUF_CNT * 3, sizeof(link->wr_rx_sges[0]) * sges_per_buf, GFP_KERNEL); if (!link->wr_rx_sges) goto no_mem_wr_tx_sges; link->wr_tx_mask = bitmap_zalloc(SMC_WR_BUF_CNT, GFP_KERNEL); if (!link->wr_tx_mask) goto no_mem_wr_rx_sges; link->wr_tx_pends = kcalloc(SMC_WR_BUF_CNT, sizeof(link->wr_tx_pends[0]), GFP_KERNEL); if (!link->wr_tx_pends) goto no_mem_wr_tx_mask; link->wr_tx_compl = kcalloc(SMC_WR_BUF_CNT, sizeof(link->wr_tx_compl[0]), GFP_KERNEL); if (!link->wr_tx_compl) goto no_mem_wr_tx_pends; if (link->lgr->smc_version == SMC_V2) { link->wr_tx_v2_ib = kzalloc(sizeof(*link->wr_tx_v2_ib), GFP_KERNEL); if (!link->wr_tx_v2_ib) goto no_mem_tx_compl; link->wr_tx_v2_sge = kzalloc(sizeof(*link->wr_tx_v2_sge), GFP_KERNEL); if (!link->wr_tx_v2_sge) goto no_mem_v2_ib; link->wr_tx_v2_pend = kzalloc(sizeof(*link->wr_tx_v2_pend), GFP_KERNEL); if (!link->wr_tx_v2_pend) goto no_mem_v2_sge; } return 0; no_mem_v2_sge: kfree(link->wr_tx_v2_sge); no_mem_v2_ib: kfree(link->wr_tx_v2_ib); no_mem_tx_compl: kfree(link->wr_tx_compl); no_mem_wr_tx_pends: kfree(link->wr_tx_pends); no_mem_wr_tx_mask: kfree(link->wr_tx_mask); no_mem_wr_rx_sges: kfree(link->wr_rx_sges); no_mem_wr_tx_sges: kfree(link->wr_tx_sges); no_mem_wr_tx_rdma_sges: kfree(link->wr_tx_rdma_sges); no_mem_wr_tx_rdmas: kfree(link->wr_tx_rdmas); no_mem_wr_rx_ibs: kfree(link->wr_rx_ibs); no_mem_wr_tx_ibs: kfree(link->wr_tx_ibs); no_mem_wr_rx_bufs: kfree(link->wr_rx_bufs); no_mem_wr_tx_bufs: kfree(link->wr_tx_bufs); no_mem: return -ENOMEM; } void smc_wr_remove_dev(struct smc_ib_device *smcibdev) { tasklet_kill(&smcibdev->recv_tasklet); tasklet_kill(&smcibdev->send_tasklet); } void smc_wr_add_dev(struct smc_ib_device *smcibdev) { tasklet_setup(&smcibdev->recv_tasklet, smc_wr_rx_tasklet_fn); tasklet_setup(&smcibdev->send_tasklet, smc_wr_tx_tasklet_fn); } static void smcr_wr_tx_refs_free(struct percpu_ref *ref) { struct smc_link *lnk = container_of(ref, struct smc_link, wr_tx_refs); complete(&lnk->tx_ref_comp); } static void smcr_wr_reg_refs_free(struct percpu_ref *ref) { struct smc_link *lnk = container_of(ref, struct smc_link, wr_reg_refs); complete(&lnk->reg_ref_comp); } int smc_wr_create_link(struct smc_link *lnk) { struct ib_device *ibdev = lnk->smcibdev->ibdev; int rc = 0; smc_wr_tx_set_wr_id(&lnk->wr_tx_id, 0); lnk->wr_rx_id = 0; lnk->wr_rx_dma_addr = ib_dma_map_single( ibdev, lnk->wr_rx_bufs, SMC_WR_BUF_SIZE * lnk->wr_rx_cnt, DMA_FROM_DEVICE); if (ib_dma_mapping_error(ibdev, lnk->wr_rx_dma_addr)) { lnk->wr_rx_dma_addr = 0; rc = -EIO; goto out; } if (lnk->lgr->smc_version == SMC_V2) { lnk->wr_rx_v2_dma_addr = ib_dma_map_single(ibdev, lnk->lgr->wr_rx_buf_v2, SMC_WR_BUF_V2_SIZE, DMA_FROM_DEVICE); if (ib_dma_mapping_error(ibdev, lnk->wr_rx_v2_dma_addr)) { lnk->wr_rx_v2_dma_addr = 0; rc = -EIO; goto dma_unmap; } lnk->wr_tx_v2_dma_addr = ib_dma_map_single(ibdev, lnk->lgr->wr_tx_buf_v2, SMC_WR_BUF_V2_SIZE, DMA_TO_DEVICE); if (ib_dma_mapping_error(ibdev, lnk->wr_tx_v2_dma_addr)) { lnk->wr_tx_v2_dma_addr = 0; rc = -EIO; goto dma_unmap; } } lnk->wr_tx_dma_addr = ib_dma_map_single( ibdev, lnk->wr_tx_bufs, SMC_WR_BUF_SIZE * lnk->wr_tx_cnt, DMA_TO_DEVICE); if (ib_dma_mapping_error(ibdev, lnk->wr_tx_dma_addr)) { rc = -EIO; goto dma_unmap; } smc_wr_init_sge(lnk); bitmap_zero(lnk->wr_tx_mask, SMC_WR_BUF_CNT); init_waitqueue_head(&lnk->wr_tx_wait); rc = percpu_ref_init(&lnk->wr_tx_refs, smcr_wr_tx_refs_free, 0, GFP_KERNEL); if (rc) goto dma_unmap; init_completion(&lnk->tx_ref_comp); init_waitqueue_head(&lnk->wr_reg_wait); rc = percpu_ref_init(&lnk->wr_reg_refs, smcr_wr_reg_refs_free, 0, GFP_KERNEL); if (rc) goto dma_unmap; init_completion(&lnk->reg_ref_comp); init_waitqueue_head(&lnk->wr_rx_empty_wait); return rc; dma_unmap: if (lnk->wr_rx_v2_dma_addr) { ib_dma_unmap_single(ibdev, lnk->wr_rx_v2_dma_addr, SMC_WR_BUF_V2_SIZE, DMA_FROM_DEVICE); lnk->wr_rx_v2_dma_addr = 0; } if (lnk->wr_tx_v2_dma_addr) { ib_dma_unmap_single(ibdev, lnk->wr_tx_v2_dma_addr, SMC_WR_BUF_V2_SIZE, DMA_TO_DEVICE); lnk->wr_tx_v2_dma_addr = 0; } ib_dma_unmap_single(ibdev, lnk->wr_rx_dma_addr, SMC_WR_BUF_SIZE * lnk->wr_rx_cnt, DMA_FROM_DEVICE); lnk->wr_rx_dma_addr = 0; out: return rc; }
linux-master
net/smc/smc_wr.c
// SPDX-License-Identifier: GPL-2.0 /* * Shared Memory Communications over RDMA (SMC-R) and RoCE * * Connection Data Control (CDC) * handles flow control * * Copyright IBM Corp. 2016 * * Author(s): Ursula Braun <[email protected]> */ #include <linux/spinlock.h> #include "smc.h" #include "smc_wr.h" #include "smc_cdc.h" #include "smc_tx.h" #include "smc_rx.h" #include "smc_close.h" /********************************** send *************************************/ /* handler for send/transmission completion of a CDC msg */ static void smc_cdc_tx_handler(struct smc_wr_tx_pend_priv *pnd_snd, struct smc_link *link, enum ib_wc_status wc_status) { struct smc_cdc_tx_pend *cdcpend = (struct smc_cdc_tx_pend *)pnd_snd; struct smc_connection *conn = cdcpend->conn; struct smc_sock *smc; int diff; smc = container_of(conn, struct smc_sock, conn); bh_lock_sock(&smc->sk); if (!wc_status) { diff = smc_curs_diff(cdcpend->conn->sndbuf_desc->len, &cdcpend->conn->tx_curs_fin, &cdcpend->cursor); /* sndbuf_space is decreased in smc_sendmsg */ smp_mb__before_atomic(); atomic_add(diff, &cdcpend->conn->sndbuf_space); /* guarantee 0 <= sndbuf_space <= sndbuf_desc->len */ smp_mb__after_atomic(); smc_curs_copy(&conn->tx_curs_fin, &cdcpend->cursor, conn); smc_curs_copy(&conn->local_tx_ctrl_fin, &cdcpend->p_cursor, conn); conn->tx_cdc_seq_fin = cdcpend->ctrl_seq; } if (atomic_dec_and_test(&conn->cdc_pend_tx_wr)) { /* If user owns the sock_lock, mark the connection need sending. * User context will later try to send when it release sock_lock * in smc_release_cb() */ if (sock_owned_by_user(&smc->sk)) conn->tx_in_release_sock = true; else smc_tx_pending(conn); if (unlikely(wq_has_sleeper(&conn->cdc_pend_tx_wq))) wake_up(&conn->cdc_pend_tx_wq); } WARN_ON(atomic_read(&conn->cdc_pend_tx_wr) < 0); smc_tx_sndbuf_nonfull(smc); bh_unlock_sock(&smc->sk); } int smc_cdc_get_free_slot(struct smc_connection *conn, struct smc_link *link, struct smc_wr_buf **wr_buf, struct smc_rdma_wr **wr_rdma_buf, struct smc_cdc_tx_pend **pend) { int rc; rc = smc_wr_tx_get_free_slot(link, smc_cdc_tx_handler, wr_buf, wr_rdma_buf, (struct smc_wr_tx_pend_priv **)pend); if (conn->killed) { /* abnormal termination */ if (!rc) smc_wr_tx_put_slot(link, (struct smc_wr_tx_pend_priv *)(*pend)); rc = -EPIPE; } return rc; } static inline void smc_cdc_add_pending_send(struct smc_connection *conn, struct smc_cdc_tx_pend *pend) { BUILD_BUG_ON_MSG( sizeof(struct smc_cdc_msg) > SMC_WR_BUF_SIZE, "must increase SMC_WR_BUF_SIZE to at least sizeof(struct smc_cdc_msg)"); BUILD_BUG_ON_MSG( offsetofend(struct smc_cdc_msg, reserved) > SMC_WR_TX_SIZE, "must adapt SMC_WR_TX_SIZE to sizeof(struct smc_cdc_msg); if not all smc_wr upper layer protocols use the same message size any more, must start to set link->wr_tx_sges[i].length on each individual smc_wr_tx_send()"); BUILD_BUG_ON_MSG( sizeof(struct smc_cdc_tx_pend) > SMC_WR_TX_PEND_PRIV_SIZE, "must increase SMC_WR_TX_PEND_PRIV_SIZE to at least sizeof(struct smc_cdc_tx_pend)"); pend->conn = conn; pend->cursor = conn->tx_curs_sent; pend->p_cursor = conn->local_tx_ctrl.prod; pend->ctrl_seq = conn->tx_cdc_seq; } int smc_cdc_msg_send(struct smc_connection *conn, struct smc_wr_buf *wr_buf, struct smc_cdc_tx_pend *pend) { struct smc_link *link = conn->lnk; union smc_host_cursor cfed; int rc; if (unlikely(!READ_ONCE(conn->sndbuf_desc))) return -ENOBUFS; smc_cdc_add_pending_send(conn, pend); conn->tx_cdc_seq++; conn->local_tx_ctrl.seqno = conn->tx_cdc_seq; smc_host_msg_to_cdc((struct smc_cdc_msg *)wr_buf, conn, &cfed); atomic_inc(&conn->cdc_pend_tx_wr); smp_mb__after_atomic(); /* Make sure cdc_pend_tx_wr added before post */ rc = smc_wr_tx_send(link, (struct smc_wr_tx_pend_priv *)pend); if (!rc) { smc_curs_copy(&conn->rx_curs_confirmed, &cfed, conn); conn->local_rx_ctrl.prod_flags.cons_curs_upd_req = 0; } else { conn->tx_cdc_seq--; conn->local_tx_ctrl.seqno = conn->tx_cdc_seq; atomic_dec(&conn->cdc_pend_tx_wr); } return rc; } /* send a validation msg indicating the move of a conn to an other QP link */ int smcr_cdc_msg_send_validation(struct smc_connection *conn, struct smc_cdc_tx_pend *pend, struct smc_wr_buf *wr_buf) { struct smc_host_cdc_msg *local = &conn->local_tx_ctrl; struct smc_link *link = conn->lnk; struct smc_cdc_msg *peer; int rc; peer = (struct smc_cdc_msg *)wr_buf; peer->common.type = local->common.type; peer->len = local->len; peer->seqno = htons(conn->tx_cdc_seq_fin); /* seqno last compl. tx */ peer->token = htonl(local->token); peer->prod_flags.failover_validation = 1; /* We need to set pend->conn here to make sure smc_cdc_tx_handler() * can handle properly */ smc_cdc_add_pending_send(conn, pend); atomic_inc(&conn->cdc_pend_tx_wr); smp_mb__after_atomic(); /* Make sure cdc_pend_tx_wr added before post */ rc = smc_wr_tx_send(link, (struct smc_wr_tx_pend_priv *)pend); if (unlikely(rc)) atomic_dec(&conn->cdc_pend_tx_wr); return rc; } static int smcr_cdc_get_slot_and_msg_send(struct smc_connection *conn) { struct smc_cdc_tx_pend *pend; struct smc_wr_buf *wr_buf; struct smc_link *link; bool again = false; int rc; again: link = conn->lnk; if (!smc_wr_tx_link_hold(link)) return -ENOLINK; rc = smc_cdc_get_free_slot(conn, link, &wr_buf, NULL, &pend); if (rc) goto put_out; spin_lock_bh(&conn->send_lock); if (link != conn->lnk) { /* link of connection changed, try again one time*/ spin_unlock_bh(&conn->send_lock); smc_wr_tx_put_slot(link, (struct smc_wr_tx_pend_priv *)pend); smc_wr_tx_link_put(link); if (again) return -ENOLINK; again = true; goto again; } rc = smc_cdc_msg_send(conn, wr_buf, pend); spin_unlock_bh(&conn->send_lock); put_out: smc_wr_tx_link_put(link); return rc; } int smc_cdc_get_slot_and_msg_send(struct smc_connection *conn) { int rc; if (!smc_conn_lgr_valid(conn) || (conn->lgr->is_smcd && conn->lgr->peer_shutdown)) return -EPIPE; if (conn->lgr->is_smcd) { spin_lock_bh(&conn->send_lock); rc = smcd_cdc_msg_send(conn); spin_unlock_bh(&conn->send_lock); } else { rc = smcr_cdc_get_slot_and_msg_send(conn); } return rc; } void smc_cdc_wait_pend_tx_wr(struct smc_connection *conn) { wait_event(conn->cdc_pend_tx_wq, !atomic_read(&conn->cdc_pend_tx_wr)); } /* Send a SMC-D CDC header. * This increments the free space available in our send buffer. * Also update the confirmed receive buffer with what was sent to the peer. */ int smcd_cdc_msg_send(struct smc_connection *conn) { struct smc_sock *smc = container_of(conn, struct smc_sock, conn); union smc_host_cursor curs; struct smcd_cdc_msg cdc; int rc, diff; memset(&cdc, 0, sizeof(cdc)); cdc.common.type = SMC_CDC_MSG_TYPE; curs.acurs.counter = atomic64_read(&conn->local_tx_ctrl.prod.acurs); cdc.prod.wrap = curs.wrap; cdc.prod.count = curs.count; curs.acurs.counter = atomic64_read(&conn->local_tx_ctrl.cons.acurs); cdc.cons.wrap = curs.wrap; cdc.cons.count = curs.count; cdc.cons.prod_flags = conn->local_tx_ctrl.prod_flags; cdc.cons.conn_state_flags = conn->local_tx_ctrl.conn_state_flags; rc = smcd_tx_ism_write(conn, &cdc, sizeof(cdc), 0, 1); if (rc) return rc; smc_curs_copy(&conn->rx_curs_confirmed, &curs, conn); conn->local_rx_ctrl.prod_flags.cons_curs_upd_req = 0; /* Calculate transmitted data and increment free send buffer space */ diff = smc_curs_diff(conn->sndbuf_desc->len, &conn->tx_curs_fin, &conn->tx_curs_sent); /* increased by confirmed number of bytes */ smp_mb__before_atomic(); atomic_add(diff, &conn->sndbuf_space); /* guarantee 0 <= sndbuf_space <= sndbuf_desc->len */ smp_mb__after_atomic(); smc_curs_copy(&conn->tx_curs_fin, &conn->tx_curs_sent, conn); smc_tx_sndbuf_nonfull(smc); return rc; } /********************************* receive ***********************************/ static inline bool smc_cdc_before(u16 seq1, u16 seq2) { return (s16)(seq1 - seq2) < 0; } static void smc_cdc_handle_urg_data_arrival(struct smc_sock *smc, int *diff_prod) { struct smc_connection *conn = &smc->conn; char *base; /* new data included urgent business */ smc_curs_copy(&conn->urg_curs, &conn->local_rx_ctrl.prod, conn); conn->urg_state = SMC_URG_VALID; if (!sock_flag(&smc->sk, SOCK_URGINLINE)) /* we'll skip the urgent byte, so don't account for it */ (*diff_prod)--; base = (char *)conn->rmb_desc->cpu_addr + conn->rx_off; if (conn->urg_curs.count) conn->urg_rx_byte = *(base + conn->urg_curs.count - 1); else conn->urg_rx_byte = *(base + conn->rmb_desc->len - 1); sk_send_sigurg(&smc->sk); } static void smc_cdc_msg_validate(struct smc_sock *smc, struct smc_cdc_msg *cdc, struct smc_link *link) { struct smc_connection *conn = &smc->conn; u16 recv_seq = ntohs(cdc->seqno); s16 diff; /* check that seqnum was seen before */ diff = conn->local_rx_ctrl.seqno - recv_seq; if (diff < 0) { /* diff larger than 0x7fff */ /* drop connection */ conn->out_of_sync = 1; /* prevent any further receives */ spin_lock_bh(&conn->send_lock); conn->local_tx_ctrl.conn_state_flags.peer_conn_abort = 1; conn->lnk = link; spin_unlock_bh(&conn->send_lock); sock_hold(&smc->sk); /* sock_put in abort_work */ if (!queue_work(smc_close_wq, &conn->abort_work)) sock_put(&smc->sk); } } static void smc_cdc_msg_recv_action(struct smc_sock *smc, struct smc_cdc_msg *cdc) { union smc_host_cursor cons_old, prod_old; struct smc_connection *conn = &smc->conn; int diff_cons, diff_prod; smc_curs_copy(&prod_old, &conn->local_rx_ctrl.prod, conn); smc_curs_copy(&cons_old, &conn->local_rx_ctrl.cons, conn); smc_cdc_msg_to_host(&conn->local_rx_ctrl, cdc, conn); diff_cons = smc_curs_diff(conn->peer_rmbe_size, &cons_old, &conn->local_rx_ctrl.cons); if (diff_cons) { /* peer_rmbe_space is decreased during data transfer with RDMA * write */ smp_mb__before_atomic(); atomic_add(diff_cons, &conn->peer_rmbe_space); /* guarantee 0 <= peer_rmbe_space <= peer_rmbe_size */ smp_mb__after_atomic(); } diff_prod = smc_curs_diff(conn->rmb_desc->len, &prod_old, &conn->local_rx_ctrl.prod); if (diff_prod) { if (conn->local_rx_ctrl.prod_flags.urg_data_present) smc_cdc_handle_urg_data_arrival(smc, &diff_prod); /* bytes_to_rcv is decreased in smc_recvmsg */ smp_mb__before_atomic(); atomic_add(diff_prod, &conn->bytes_to_rcv); /* guarantee 0 <= bytes_to_rcv <= rmb_desc->len */ smp_mb__after_atomic(); smc->sk.sk_data_ready(&smc->sk); } else { if (conn->local_rx_ctrl.prod_flags.write_blocked) smc->sk.sk_data_ready(&smc->sk); if (conn->local_rx_ctrl.prod_flags.urg_data_pending) conn->urg_state = SMC_URG_NOTYET; } /* trigger sndbuf consumer: RDMA write into peer RMBE and CDC */ if ((diff_cons && smc_tx_prepared_sends(conn)) || conn->local_rx_ctrl.prod_flags.cons_curs_upd_req || conn->local_rx_ctrl.prod_flags.urg_data_pending) { if (!sock_owned_by_user(&smc->sk)) smc_tx_pending(conn); else conn->tx_in_release_sock = true; } if (diff_cons && conn->urg_tx_pend && atomic_read(&conn->peer_rmbe_space) == conn->peer_rmbe_size) { /* urg data confirmed by peer, indicate we're ready for more */ conn->urg_tx_pend = false; smc->sk.sk_write_space(&smc->sk); } if (conn->local_rx_ctrl.conn_state_flags.peer_conn_abort) { smc->sk.sk_err = ECONNRESET; conn->local_tx_ctrl.conn_state_flags.peer_conn_abort = 1; } if (smc_cdc_rxed_any_close_or_senddone(conn)) { smc->sk.sk_shutdown |= RCV_SHUTDOWN; if (smc->clcsock && smc->clcsock->sk) smc->clcsock->sk->sk_shutdown |= RCV_SHUTDOWN; sock_set_flag(&smc->sk, SOCK_DONE); sock_hold(&smc->sk); /* sock_put in close_work */ if (!queue_work(smc_close_wq, &conn->close_work)) sock_put(&smc->sk); } } /* called under tasklet context */ static void smc_cdc_msg_recv(struct smc_sock *smc, struct smc_cdc_msg *cdc) { sock_hold(&smc->sk); bh_lock_sock(&smc->sk); smc_cdc_msg_recv_action(smc, cdc); bh_unlock_sock(&smc->sk); sock_put(&smc->sk); /* no free sk in softirq-context */ } /* Schedule a tasklet for this connection. Triggered from the ISM device IRQ * handler to indicate update in the DMBE. * * Context: * - tasklet context */ static void smcd_cdc_rx_tsklet(struct tasklet_struct *t) { struct smc_connection *conn = from_tasklet(conn, t, rx_tsklet); struct smcd_cdc_msg *data_cdc; struct smcd_cdc_msg cdc; struct smc_sock *smc; if (!conn || conn->killed) return; data_cdc = (struct smcd_cdc_msg *)conn->rmb_desc->cpu_addr; smcd_curs_copy(&cdc.prod, &data_cdc->prod, conn); smcd_curs_copy(&cdc.cons, &data_cdc->cons, conn); smc = container_of(conn, struct smc_sock, conn); smc_cdc_msg_recv(smc, (struct smc_cdc_msg *)&cdc); } /* Initialize receive tasklet. Called from ISM device IRQ handler to start * receiver side. */ void smcd_cdc_rx_init(struct smc_connection *conn) { tasklet_setup(&conn->rx_tsklet, smcd_cdc_rx_tsklet); } /***************************** init, exit, misc ******************************/ static void smc_cdc_rx_handler(struct ib_wc *wc, void *buf) { struct smc_link *link = (struct smc_link *)wc->qp->qp_context; struct smc_cdc_msg *cdc = buf; struct smc_connection *conn; struct smc_link_group *lgr; struct smc_sock *smc; if (wc->byte_len < offsetof(struct smc_cdc_msg, reserved)) return; /* short message */ if (cdc->len != SMC_WR_TX_SIZE) return; /* invalid message */ /* lookup connection */ lgr = smc_get_lgr(link); read_lock_bh(&lgr->conns_lock); conn = smc_lgr_find_conn(ntohl(cdc->token), lgr); read_unlock_bh(&lgr->conns_lock); if (!conn || conn->out_of_sync) return; smc = container_of(conn, struct smc_sock, conn); if (cdc->prod_flags.failover_validation) { smc_cdc_msg_validate(smc, cdc, link); return; } if (smc_cdc_before(ntohs(cdc->seqno), conn->local_rx_ctrl.seqno)) /* received seqno is old */ return; smc_cdc_msg_recv(smc, cdc); } static struct smc_wr_rx_handler smc_cdc_rx_handlers[] = { { .handler = smc_cdc_rx_handler, .type = SMC_CDC_MSG_TYPE }, { .handler = NULL, } }; int __init smc_cdc_init(void) { struct smc_wr_rx_handler *handler; int rc = 0; for (handler = smc_cdc_rx_handlers; handler->handler; handler++) { INIT_HLIST_NODE(&handler->list); rc = smc_wr_rx_register_handler(handler); if (rc) break; } return rc; }
linux-master
net/smc/smc_cdc.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * LAPB release 002 * * This code REQUIRES 2.1.15 or higher/ NET3.038 * * History * LAPB 001 Jonathan Naylor Started Coding */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/errno.h> #include <linux/types.h> #include <linux/socket.h> #include <linux/in.h> #include <linux/kernel.h> #include <linux/timer.h> #include <linux/string.h> #include <linux/sockios.h> #include <linux/net.h> #include <linux/inet.h> #include <linux/skbuff.h> #include <linux/slab.h> #include <net/sock.h> #include <linux/uaccess.h> #include <linux/fcntl.h> #include <linux/mm.h> #include <linux/interrupt.h> #include <net/lapb.h> /* * This routine purges all the queues of frames. */ void lapb_clear_queues(struct lapb_cb *lapb) { skb_queue_purge(&lapb->write_queue); skb_queue_purge(&lapb->ack_queue); } /* * This routine purges the input queue of those frames that have been * acknowledged. This replaces the boxes labelled "V(a) <- N(r)" on the * SDL diagram. */ void lapb_frames_acked(struct lapb_cb *lapb, unsigned short nr) { struct sk_buff *skb; int modulus; modulus = (lapb->mode & LAPB_EXTENDED) ? LAPB_EMODULUS : LAPB_SMODULUS; /* * Remove all the ack-ed frames from the ack queue. */ if (lapb->va != nr) while (skb_peek(&lapb->ack_queue) && lapb->va != nr) { skb = skb_dequeue(&lapb->ack_queue); kfree_skb(skb); lapb->va = (lapb->va + 1) % modulus; } } void lapb_requeue_frames(struct lapb_cb *lapb) { struct sk_buff *skb, *skb_prev = NULL; /* * Requeue all the un-ack-ed frames on the output queue to be picked * up by lapb_kick called from the timer. This arrangement handles the * possibility of an empty output queue. */ while ((skb = skb_dequeue(&lapb->ack_queue)) != NULL) { if (!skb_prev) skb_queue_head(&lapb->write_queue, skb); else skb_append(skb_prev, skb, &lapb->write_queue); skb_prev = skb; } } /* * Validate that the value of nr is between va and vs. Return true or * false for testing. */ int lapb_validate_nr(struct lapb_cb *lapb, unsigned short nr) { unsigned short vc = lapb->va; int modulus; modulus = (lapb->mode & LAPB_EXTENDED) ? LAPB_EMODULUS : LAPB_SMODULUS; while (vc != lapb->vs) { if (nr == vc) return 1; vc = (vc + 1) % modulus; } return nr == lapb->vs; } /* * This routine is the centralised routine for parsing the control * information for the different frame formats. */ int lapb_decode(struct lapb_cb *lapb, struct sk_buff *skb, struct lapb_frame *frame) { frame->type = LAPB_ILLEGAL; lapb_dbg(2, "(%p) S%d RX %3ph\n", lapb->dev, lapb->state, skb->data); /* We always need to look at 2 bytes, sometimes we need * to look at 3 and those cases are handled below. */ if (!pskb_may_pull(skb, 2)) return -1; if (lapb->mode & LAPB_MLP) { if (lapb->mode & LAPB_DCE) { if (skb->data[0] == LAPB_ADDR_D) frame->cr = LAPB_COMMAND; if (skb->data[0] == LAPB_ADDR_C) frame->cr = LAPB_RESPONSE; } else { if (skb->data[0] == LAPB_ADDR_C) frame->cr = LAPB_COMMAND; if (skb->data[0] == LAPB_ADDR_D) frame->cr = LAPB_RESPONSE; } } else { if (lapb->mode & LAPB_DCE) { if (skb->data[0] == LAPB_ADDR_B) frame->cr = LAPB_COMMAND; if (skb->data[0] == LAPB_ADDR_A) frame->cr = LAPB_RESPONSE; } else { if (skb->data[0] == LAPB_ADDR_A) frame->cr = LAPB_COMMAND; if (skb->data[0] == LAPB_ADDR_B) frame->cr = LAPB_RESPONSE; } } skb_pull(skb, 1); if (lapb->mode & LAPB_EXTENDED) { if (!(skb->data[0] & LAPB_S)) { if (!pskb_may_pull(skb, 2)) return -1; /* * I frame - carries NR/NS/PF */ frame->type = LAPB_I; frame->ns = (skb->data[0] >> 1) & 0x7F; frame->nr = (skb->data[1] >> 1) & 0x7F; frame->pf = skb->data[1] & LAPB_EPF; frame->control[0] = skb->data[0]; frame->control[1] = skb->data[1]; skb_pull(skb, 2); } else if ((skb->data[0] & LAPB_U) == 1) { if (!pskb_may_pull(skb, 2)) return -1; /* * S frame - take out PF/NR */ frame->type = skb->data[0] & 0x0F; frame->nr = (skb->data[1] >> 1) & 0x7F; frame->pf = skb->data[1] & LAPB_EPF; frame->control[0] = skb->data[0]; frame->control[1] = skb->data[1]; skb_pull(skb, 2); } else if ((skb->data[0] & LAPB_U) == 3) { /* * U frame - take out PF */ frame->type = skb->data[0] & ~LAPB_SPF; frame->pf = skb->data[0] & LAPB_SPF; frame->control[0] = skb->data[0]; frame->control[1] = 0x00; skb_pull(skb, 1); } } else { if (!(skb->data[0] & LAPB_S)) { /* * I frame - carries NR/NS/PF */ frame->type = LAPB_I; frame->ns = (skb->data[0] >> 1) & 0x07; frame->nr = (skb->data[0] >> 5) & 0x07; frame->pf = skb->data[0] & LAPB_SPF; } else if ((skb->data[0] & LAPB_U) == 1) { /* * S frame - take out PF/NR */ frame->type = skb->data[0] & 0x0F; frame->nr = (skb->data[0] >> 5) & 0x07; frame->pf = skb->data[0] & LAPB_SPF; } else if ((skb->data[0] & LAPB_U) == 3) { /* * U frame - take out PF */ frame->type = skb->data[0] & ~LAPB_SPF; frame->pf = skb->data[0] & LAPB_SPF; } frame->control[0] = skb->data[0]; skb_pull(skb, 1); } return 0; } /* * This routine is called when the HDLC layer internally generates a * command or response for the remote machine ( eg. RR, UA etc. ). * Only supervisory or unnumbered frames are processed, FRMRs are handled * by lapb_transmit_frmr below. */ void lapb_send_control(struct lapb_cb *lapb, int frametype, int poll_bit, int type) { struct sk_buff *skb; unsigned char *dptr; if ((skb = alloc_skb(LAPB_HEADER_LEN + 3, GFP_ATOMIC)) == NULL) return; skb_reserve(skb, LAPB_HEADER_LEN + 1); if (lapb->mode & LAPB_EXTENDED) { if ((frametype & LAPB_U) == LAPB_U) { dptr = skb_put(skb, 1); *dptr = frametype; *dptr |= poll_bit ? LAPB_SPF : 0; } else { dptr = skb_put(skb, 2); dptr[0] = frametype; dptr[1] = (lapb->vr << 1); dptr[1] |= poll_bit ? LAPB_EPF : 0; } } else { dptr = skb_put(skb, 1); *dptr = frametype; *dptr |= poll_bit ? LAPB_SPF : 0; if ((frametype & LAPB_U) == LAPB_S) /* S frames carry NR */ *dptr |= (lapb->vr << 5); } lapb_transmit_buffer(lapb, skb, type); } /* * This routine generates FRMRs based on information previously stored in * the LAPB control block. */ void lapb_transmit_frmr(struct lapb_cb *lapb) { struct sk_buff *skb; unsigned char *dptr; if ((skb = alloc_skb(LAPB_HEADER_LEN + 7, GFP_ATOMIC)) == NULL) return; skb_reserve(skb, LAPB_HEADER_LEN + 1); if (lapb->mode & LAPB_EXTENDED) { dptr = skb_put(skb, 6); *dptr++ = LAPB_FRMR; *dptr++ = lapb->frmr_data.control[0]; *dptr++ = lapb->frmr_data.control[1]; *dptr++ = (lapb->vs << 1) & 0xFE; *dptr = (lapb->vr << 1) & 0xFE; if (lapb->frmr_data.cr == LAPB_RESPONSE) *dptr |= 0x01; dptr++; *dptr++ = lapb->frmr_type; lapb_dbg(1, "(%p) S%d TX FRMR %5ph\n", lapb->dev, lapb->state, &skb->data[1]); } else { dptr = skb_put(skb, 4); *dptr++ = LAPB_FRMR; *dptr++ = lapb->frmr_data.control[0]; *dptr = (lapb->vs << 1) & 0x0E; *dptr |= (lapb->vr << 5) & 0xE0; if (lapb->frmr_data.cr == LAPB_RESPONSE) *dptr |= 0x10; dptr++; *dptr++ = lapb->frmr_type; lapb_dbg(1, "(%p) S%d TX FRMR %3ph\n", lapb->dev, lapb->state, &skb->data[1]); } lapb_transmit_buffer(lapb, skb, LAPB_RESPONSE); }
linux-master
net/lapb/lapb_subr.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * LAPB release 002 * * This code REQUIRES 2.1.15 or higher/ NET3.038 * * History * LAPB 001 Jonathan Naulor Started Coding * LAPB 002 Jonathan Naylor New timer architecture. * 2000-10-29 Henner Eisen lapb_data_indication() return status. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/errno.h> #include <linux/types.h> #include <linux/socket.h> #include <linux/in.h> #include <linux/kernel.h> #include <linux/timer.h> #include <linux/string.h> #include <linux/sockios.h> #include <linux/net.h> #include <linux/inet.h> #include <linux/netdevice.h> #include <linux/skbuff.h> #include <linux/slab.h> #include <net/sock.h> #include <linux/uaccess.h> #include <linux/fcntl.h> #include <linux/mm.h> #include <linux/interrupt.h> #include <net/lapb.h> /* * State machine for state 0, Disconnected State. * The handling of the timer(s) is in file lapb_timer.c. */ static void lapb_state0_machine(struct lapb_cb *lapb, struct sk_buff *skb, struct lapb_frame *frame) { switch (frame->type) { case LAPB_SABM: lapb_dbg(1, "(%p) S0 RX SABM(%d)\n", lapb->dev, frame->pf); if (lapb->mode & LAPB_EXTENDED) { lapb_dbg(1, "(%p) S0 TX DM(%d)\n", lapb->dev, frame->pf); lapb_send_control(lapb, LAPB_DM, frame->pf, LAPB_RESPONSE); } else { lapb_dbg(1, "(%p) S0 TX UA(%d)\n", lapb->dev, frame->pf); lapb_dbg(0, "(%p) S0 -> S3\n", lapb->dev); lapb_send_control(lapb, LAPB_UA, frame->pf, LAPB_RESPONSE); lapb_stop_t1timer(lapb); lapb_stop_t2timer(lapb); lapb->state = LAPB_STATE_3; lapb->condition = 0x00; lapb->n2count = 0; lapb->vs = 0; lapb->vr = 0; lapb->va = 0; lapb_connect_indication(lapb, LAPB_OK); } break; case LAPB_SABME: lapb_dbg(1, "(%p) S0 RX SABME(%d)\n", lapb->dev, frame->pf); if (lapb->mode & LAPB_EXTENDED) { lapb_dbg(1, "(%p) S0 TX UA(%d)\n", lapb->dev, frame->pf); lapb_dbg(0, "(%p) S0 -> S3\n", lapb->dev); lapb_send_control(lapb, LAPB_UA, frame->pf, LAPB_RESPONSE); lapb_stop_t1timer(lapb); lapb_stop_t2timer(lapb); lapb->state = LAPB_STATE_3; lapb->condition = 0x00; lapb->n2count = 0; lapb->vs = 0; lapb->vr = 0; lapb->va = 0; lapb_connect_indication(lapb, LAPB_OK); } else { lapb_dbg(1, "(%p) S0 TX DM(%d)\n", lapb->dev, frame->pf); lapb_send_control(lapb, LAPB_DM, frame->pf, LAPB_RESPONSE); } break; case LAPB_DISC: lapb_dbg(1, "(%p) S0 RX DISC(%d)\n", lapb->dev, frame->pf); lapb_dbg(1, "(%p) S0 TX UA(%d)\n", lapb->dev, frame->pf); lapb_send_control(lapb, LAPB_UA, frame->pf, LAPB_RESPONSE); break; default: break; } kfree_skb(skb); } /* * State machine for state 1, Awaiting Connection State. * The handling of the timer(s) is in file lapb_timer.c. */ static void lapb_state1_machine(struct lapb_cb *lapb, struct sk_buff *skb, struct lapb_frame *frame) { switch (frame->type) { case LAPB_SABM: lapb_dbg(1, "(%p) S1 RX SABM(%d)\n", lapb->dev, frame->pf); if (lapb->mode & LAPB_EXTENDED) { lapb_dbg(1, "(%p) S1 TX DM(%d)\n", lapb->dev, frame->pf); lapb_send_control(lapb, LAPB_DM, frame->pf, LAPB_RESPONSE); } else { lapb_dbg(1, "(%p) S1 TX UA(%d)\n", lapb->dev, frame->pf); lapb_send_control(lapb, LAPB_UA, frame->pf, LAPB_RESPONSE); } break; case LAPB_SABME: lapb_dbg(1, "(%p) S1 RX SABME(%d)\n", lapb->dev, frame->pf); if (lapb->mode & LAPB_EXTENDED) { lapb_dbg(1, "(%p) S1 TX UA(%d)\n", lapb->dev, frame->pf); lapb_send_control(lapb, LAPB_UA, frame->pf, LAPB_RESPONSE); } else { lapb_dbg(1, "(%p) S1 TX DM(%d)\n", lapb->dev, frame->pf); lapb_send_control(lapb, LAPB_DM, frame->pf, LAPB_RESPONSE); } break; case LAPB_DISC: lapb_dbg(1, "(%p) S1 RX DISC(%d)\n", lapb->dev, frame->pf); lapb_dbg(1, "(%p) S1 TX DM(%d)\n", lapb->dev, frame->pf); lapb_send_control(lapb, LAPB_DM, frame->pf, LAPB_RESPONSE); break; case LAPB_UA: lapb_dbg(1, "(%p) S1 RX UA(%d)\n", lapb->dev, frame->pf); if (frame->pf) { lapb_dbg(0, "(%p) S1 -> S3\n", lapb->dev); lapb_stop_t1timer(lapb); lapb_stop_t2timer(lapb); lapb->state = LAPB_STATE_3; lapb->condition = 0x00; lapb->n2count = 0; lapb->vs = 0; lapb->vr = 0; lapb->va = 0; lapb_connect_confirmation(lapb, LAPB_OK); } break; case LAPB_DM: lapb_dbg(1, "(%p) S1 RX DM(%d)\n", lapb->dev, frame->pf); if (frame->pf) { lapb_dbg(0, "(%p) S1 -> S0\n", lapb->dev); lapb_clear_queues(lapb); lapb->state = LAPB_STATE_0; lapb_start_t1timer(lapb); lapb_stop_t2timer(lapb); lapb_disconnect_indication(lapb, LAPB_REFUSED); } break; } kfree_skb(skb); } /* * State machine for state 2, Awaiting Release State. * The handling of the timer(s) is in file lapb_timer.c */ static void lapb_state2_machine(struct lapb_cb *lapb, struct sk_buff *skb, struct lapb_frame *frame) { switch (frame->type) { case LAPB_SABM: case LAPB_SABME: lapb_dbg(1, "(%p) S2 RX {SABM,SABME}(%d)\n", lapb->dev, frame->pf); lapb_dbg(1, "(%p) S2 TX DM(%d)\n", lapb->dev, frame->pf); lapb_send_control(lapb, LAPB_DM, frame->pf, LAPB_RESPONSE); break; case LAPB_DISC: lapb_dbg(1, "(%p) S2 RX DISC(%d)\n", lapb->dev, frame->pf); lapb_dbg(1, "(%p) S2 TX UA(%d)\n", lapb->dev, frame->pf); lapb_send_control(lapb, LAPB_UA, frame->pf, LAPB_RESPONSE); break; case LAPB_UA: lapb_dbg(1, "(%p) S2 RX UA(%d)\n", lapb->dev, frame->pf); if (frame->pf) { lapb_dbg(0, "(%p) S2 -> S0\n", lapb->dev); lapb->state = LAPB_STATE_0; lapb_start_t1timer(lapb); lapb_stop_t2timer(lapb); lapb_disconnect_confirmation(lapb, LAPB_OK); } break; case LAPB_DM: lapb_dbg(1, "(%p) S2 RX DM(%d)\n", lapb->dev, frame->pf); if (frame->pf) { lapb_dbg(0, "(%p) S2 -> S0\n", lapb->dev); lapb->state = LAPB_STATE_0; lapb_start_t1timer(lapb); lapb_stop_t2timer(lapb); lapb_disconnect_confirmation(lapb, LAPB_NOTCONNECTED); } break; case LAPB_I: case LAPB_REJ: case LAPB_RNR: case LAPB_RR: lapb_dbg(1, "(%p) S2 RX {I,REJ,RNR,RR}(%d)\n", lapb->dev, frame->pf); lapb_dbg(1, "(%p) S2 RX DM(%d)\n", lapb->dev, frame->pf); if (frame->pf) lapb_send_control(lapb, LAPB_DM, frame->pf, LAPB_RESPONSE); break; } kfree_skb(skb); } /* * State machine for state 3, Connected State. * The handling of the timer(s) is in file lapb_timer.c */ static void lapb_state3_machine(struct lapb_cb *lapb, struct sk_buff *skb, struct lapb_frame *frame) { int queued = 0; int modulus = (lapb->mode & LAPB_EXTENDED) ? LAPB_EMODULUS : LAPB_SMODULUS; switch (frame->type) { case LAPB_SABM: lapb_dbg(1, "(%p) S3 RX SABM(%d)\n", lapb->dev, frame->pf); if (lapb->mode & LAPB_EXTENDED) { lapb_dbg(1, "(%p) S3 TX DM(%d)\n", lapb->dev, frame->pf); lapb_send_control(lapb, LAPB_DM, frame->pf, LAPB_RESPONSE); } else { lapb_dbg(1, "(%p) S3 TX UA(%d)\n", lapb->dev, frame->pf); lapb_send_control(lapb, LAPB_UA, frame->pf, LAPB_RESPONSE); lapb_stop_t1timer(lapb); lapb_stop_t2timer(lapb); lapb->condition = 0x00; lapb->n2count = 0; lapb->vs = 0; lapb->vr = 0; lapb->va = 0; lapb_requeue_frames(lapb); } break; case LAPB_SABME: lapb_dbg(1, "(%p) S3 RX SABME(%d)\n", lapb->dev, frame->pf); if (lapb->mode & LAPB_EXTENDED) { lapb_dbg(1, "(%p) S3 TX UA(%d)\n", lapb->dev, frame->pf); lapb_send_control(lapb, LAPB_UA, frame->pf, LAPB_RESPONSE); lapb_stop_t1timer(lapb); lapb_stop_t2timer(lapb); lapb->condition = 0x00; lapb->n2count = 0; lapb->vs = 0; lapb->vr = 0; lapb->va = 0; lapb_requeue_frames(lapb); } else { lapb_dbg(1, "(%p) S3 TX DM(%d)\n", lapb->dev, frame->pf); lapb_send_control(lapb, LAPB_DM, frame->pf, LAPB_RESPONSE); } break; case LAPB_DISC: lapb_dbg(1, "(%p) S3 RX DISC(%d)\n", lapb->dev, frame->pf); lapb_dbg(0, "(%p) S3 -> S0\n", lapb->dev); lapb_clear_queues(lapb); lapb_send_control(lapb, LAPB_UA, frame->pf, LAPB_RESPONSE); lapb_start_t1timer(lapb); lapb_stop_t2timer(lapb); lapb->state = LAPB_STATE_0; lapb_disconnect_indication(lapb, LAPB_OK); break; case LAPB_DM: lapb_dbg(1, "(%p) S3 RX DM(%d)\n", lapb->dev, frame->pf); lapb_dbg(0, "(%p) S3 -> S0\n", lapb->dev); lapb_clear_queues(lapb); lapb->state = LAPB_STATE_0; lapb_start_t1timer(lapb); lapb_stop_t2timer(lapb); lapb_disconnect_indication(lapb, LAPB_NOTCONNECTED); break; case LAPB_RNR: lapb_dbg(1, "(%p) S3 RX RNR(%d) R%d\n", lapb->dev, frame->pf, frame->nr); lapb->condition |= LAPB_PEER_RX_BUSY_CONDITION; lapb_check_need_response(lapb, frame->cr, frame->pf); if (lapb_validate_nr(lapb, frame->nr)) { lapb_check_iframes_acked(lapb, frame->nr); } else { lapb->frmr_data = *frame; lapb->frmr_type = LAPB_FRMR_Z; lapb_transmit_frmr(lapb); lapb_dbg(0, "(%p) S3 -> S4\n", lapb->dev); lapb_start_t1timer(lapb); lapb_stop_t2timer(lapb); lapb->state = LAPB_STATE_4; lapb->n2count = 0; } break; case LAPB_RR: lapb_dbg(1, "(%p) S3 RX RR(%d) R%d\n", lapb->dev, frame->pf, frame->nr); lapb->condition &= ~LAPB_PEER_RX_BUSY_CONDITION; lapb_check_need_response(lapb, frame->cr, frame->pf); if (lapb_validate_nr(lapb, frame->nr)) { lapb_check_iframes_acked(lapb, frame->nr); } else { lapb->frmr_data = *frame; lapb->frmr_type = LAPB_FRMR_Z; lapb_transmit_frmr(lapb); lapb_dbg(0, "(%p) S3 -> S4\n", lapb->dev); lapb_start_t1timer(lapb); lapb_stop_t2timer(lapb); lapb->state = LAPB_STATE_4; lapb->n2count = 0; } break; case LAPB_REJ: lapb_dbg(1, "(%p) S3 RX REJ(%d) R%d\n", lapb->dev, frame->pf, frame->nr); lapb->condition &= ~LAPB_PEER_RX_BUSY_CONDITION; lapb_check_need_response(lapb, frame->cr, frame->pf); if (lapb_validate_nr(lapb, frame->nr)) { lapb_frames_acked(lapb, frame->nr); lapb_stop_t1timer(lapb); lapb->n2count = 0; lapb_requeue_frames(lapb); } else { lapb->frmr_data = *frame; lapb->frmr_type = LAPB_FRMR_Z; lapb_transmit_frmr(lapb); lapb_dbg(0, "(%p) S3 -> S4\n", lapb->dev); lapb_start_t1timer(lapb); lapb_stop_t2timer(lapb); lapb->state = LAPB_STATE_4; lapb->n2count = 0; } break; case LAPB_I: lapb_dbg(1, "(%p) S3 RX I(%d) S%d R%d\n", lapb->dev, frame->pf, frame->ns, frame->nr); if (!lapb_validate_nr(lapb, frame->nr)) { lapb->frmr_data = *frame; lapb->frmr_type = LAPB_FRMR_Z; lapb_transmit_frmr(lapb); lapb_dbg(0, "(%p) S3 -> S4\n", lapb->dev); lapb_start_t1timer(lapb); lapb_stop_t2timer(lapb); lapb->state = LAPB_STATE_4; lapb->n2count = 0; break; } if (lapb->condition & LAPB_PEER_RX_BUSY_CONDITION) lapb_frames_acked(lapb, frame->nr); else lapb_check_iframes_acked(lapb, frame->nr); if (frame->ns == lapb->vr) { int cn; cn = lapb_data_indication(lapb, skb); queued = 1; /* * If upper layer has dropped the frame, we * basically ignore any further protocol * processing. This will cause the peer * to re-transmit the frame later like * a frame lost on the wire. */ if (cn == NET_RX_DROP) { pr_debug("rx congestion\n"); break; } lapb->vr = (lapb->vr + 1) % modulus; lapb->condition &= ~LAPB_REJECT_CONDITION; if (frame->pf) lapb_enquiry_response(lapb); else { if (!(lapb->condition & LAPB_ACK_PENDING_CONDITION)) { lapb->condition |= LAPB_ACK_PENDING_CONDITION; lapb_start_t2timer(lapb); } } } else { if (lapb->condition & LAPB_REJECT_CONDITION) { if (frame->pf) lapb_enquiry_response(lapb); } else { lapb_dbg(1, "(%p) S3 TX REJ(%d) R%d\n", lapb->dev, frame->pf, lapb->vr); lapb->condition |= LAPB_REJECT_CONDITION; lapb_send_control(lapb, LAPB_REJ, frame->pf, LAPB_RESPONSE); lapb->condition &= ~LAPB_ACK_PENDING_CONDITION; } } break; case LAPB_FRMR: lapb_dbg(1, "(%p) S3 RX FRMR(%d) %5ph\n", lapb->dev, frame->pf, skb->data); lapb_establish_data_link(lapb); lapb_dbg(0, "(%p) S3 -> S1\n", lapb->dev); lapb_requeue_frames(lapb); lapb->state = LAPB_STATE_1; break; case LAPB_ILLEGAL: lapb_dbg(1, "(%p) S3 RX ILLEGAL(%d)\n", lapb->dev, frame->pf); lapb->frmr_data = *frame; lapb->frmr_type = LAPB_FRMR_W; lapb_transmit_frmr(lapb); lapb_dbg(0, "(%p) S3 -> S4\n", lapb->dev); lapb_start_t1timer(lapb); lapb_stop_t2timer(lapb); lapb->state = LAPB_STATE_4; lapb->n2count = 0; break; } if (!queued) kfree_skb(skb); } /* * State machine for state 4, Frame Reject State. * The handling of the timer(s) is in file lapb_timer.c. */ static void lapb_state4_machine(struct lapb_cb *lapb, struct sk_buff *skb, struct lapb_frame *frame) { switch (frame->type) { case LAPB_SABM: lapb_dbg(1, "(%p) S4 RX SABM(%d)\n", lapb->dev, frame->pf); if (lapb->mode & LAPB_EXTENDED) { lapb_dbg(1, "(%p) S4 TX DM(%d)\n", lapb->dev, frame->pf); lapb_send_control(lapb, LAPB_DM, frame->pf, LAPB_RESPONSE); } else { lapb_dbg(1, "(%p) S4 TX UA(%d)\n", lapb->dev, frame->pf); lapb_dbg(0, "(%p) S4 -> S3\n", lapb->dev); lapb_send_control(lapb, LAPB_UA, frame->pf, LAPB_RESPONSE); lapb_stop_t1timer(lapb); lapb_stop_t2timer(lapb); lapb->state = LAPB_STATE_3; lapb->condition = 0x00; lapb->n2count = 0; lapb->vs = 0; lapb->vr = 0; lapb->va = 0; lapb_connect_indication(lapb, LAPB_OK); } break; case LAPB_SABME: lapb_dbg(1, "(%p) S4 RX SABME(%d)\n", lapb->dev, frame->pf); if (lapb->mode & LAPB_EXTENDED) { lapb_dbg(1, "(%p) S4 TX UA(%d)\n", lapb->dev, frame->pf); lapb_dbg(0, "(%p) S4 -> S3\n", lapb->dev); lapb_send_control(lapb, LAPB_UA, frame->pf, LAPB_RESPONSE); lapb_stop_t1timer(lapb); lapb_stop_t2timer(lapb); lapb->state = LAPB_STATE_3; lapb->condition = 0x00; lapb->n2count = 0; lapb->vs = 0; lapb->vr = 0; lapb->va = 0; lapb_connect_indication(lapb, LAPB_OK); } else { lapb_dbg(1, "(%p) S4 TX DM(%d)\n", lapb->dev, frame->pf); lapb_send_control(lapb, LAPB_DM, frame->pf, LAPB_RESPONSE); } break; } kfree_skb(skb); } /* * Process an incoming LAPB frame */ void lapb_data_input(struct lapb_cb *lapb, struct sk_buff *skb) { struct lapb_frame frame; if (lapb_decode(lapb, skb, &frame) < 0) { kfree_skb(skb); return; } switch (lapb->state) { case LAPB_STATE_0: lapb_state0_machine(lapb, skb, &frame); break; case LAPB_STATE_1: lapb_state1_machine(lapb, skb, &frame); break; case LAPB_STATE_2: lapb_state2_machine(lapb, skb, &frame); break; case LAPB_STATE_3: lapb_state3_machine(lapb, skb, &frame); break; case LAPB_STATE_4: lapb_state4_machine(lapb, skb, &frame); break; } lapb_kick(lapb); }
linux-master
net/lapb/lapb_in.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * LAPB release 002 * * This code REQUIRES 2.1.15 or higher/ NET3.038 * * History * LAPB 001 Jonathan Naylor Started Coding * LAPB 002 Jonathan Naylor New timer architecture. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/errno.h> #include <linux/types.h> #include <linux/socket.h> #include <linux/in.h> #include <linux/kernel.h> #include <linux/timer.h> #include <linux/string.h> #include <linux/sockios.h> #include <linux/net.h> #include <linux/inet.h> #include <linux/skbuff.h> #include <linux/slab.h> #include <net/sock.h> #include <linux/uaccess.h> #include <linux/fcntl.h> #include <linux/mm.h> #include <linux/interrupt.h> #include <net/lapb.h> /* * This procedure is passed a buffer descriptor for an iframe. It builds * the rest of the control part of the frame and then writes it out. */ static void lapb_send_iframe(struct lapb_cb *lapb, struct sk_buff *skb, int poll_bit) { unsigned char *frame; if (!skb) return; if (lapb->mode & LAPB_EXTENDED) { frame = skb_push(skb, 2); frame[0] = LAPB_I; frame[0] |= lapb->vs << 1; frame[1] = poll_bit ? LAPB_EPF : 0; frame[1] |= lapb->vr << 1; } else { frame = skb_push(skb, 1); *frame = LAPB_I; *frame |= poll_bit ? LAPB_SPF : 0; *frame |= lapb->vr << 5; *frame |= lapb->vs << 1; } lapb_dbg(1, "(%p) S%d TX I(%d) S%d R%d\n", lapb->dev, lapb->state, poll_bit, lapb->vs, lapb->vr); lapb_transmit_buffer(lapb, skb, LAPB_COMMAND); } void lapb_kick(struct lapb_cb *lapb) { struct sk_buff *skb, *skbn; unsigned short modulus, start, end; modulus = (lapb->mode & LAPB_EXTENDED) ? LAPB_EMODULUS : LAPB_SMODULUS; start = !skb_peek(&lapb->ack_queue) ? lapb->va : lapb->vs; end = (lapb->va + lapb->window) % modulus; if (!(lapb->condition & LAPB_PEER_RX_BUSY_CONDITION) && start != end && skb_peek(&lapb->write_queue)) { lapb->vs = start; /* * Dequeue the frame and copy it. */ skb = skb_dequeue(&lapb->write_queue); do { skbn = skb_copy(skb, GFP_ATOMIC); if (!skbn) { skb_queue_head(&lapb->write_queue, skb); break; } if (skb->sk) skb_set_owner_w(skbn, skb->sk); /* * Transmit the frame copy. */ lapb_send_iframe(lapb, skbn, LAPB_POLLOFF); lapb->vs = (lapb->vs + 1) % modulus; /* * Requeue the original data frame. */ skb_queue_tail(&lapb->ack_queue, skb); } while (lapb->vs != end && (skb = skb_dequeue(&lapb->write_queue)) != NULL); lapb->condition &= ~LAPB_ACK_PENDING_CONDITION; if (!lapb_t1timer_running(lapb)) lapb_start_t1timer(lapb); } } void lapb_transmit_buffer(struct lapb_cb *lapb, struct sk_buff *skb, int type) { unsigned char *ptr; ptr = skb_push(skb, 1); if (lapb->mode & LAPB_MLP) { if (lapb->mode & LAPB_DCE) { if (type == LAPB_COMMAND) *ptr = LAPB_ADDR_C; if (type == LAPB_RESPONSE) *ptr = LAPB_ADDR_D; } else { if (type == LAPB_COMMAND) *ptr = LAPB_ADDR_D; if (type == LAPB_RESPONSE) *ptr = LAPB_ADDR_C; } } else { if (lapb->mode & LAPB_DCE) { if (type == LAPB_COMMAND) *ptr = LAPB_ADDR_A; if (type == LAPB_RESPONSE) *ptr = LAPB_ADDR_B; } else { if (type == LAPB_COMMAND) *ptr = LAPB_ADDR_B; if (type == LAPB_RESPONSE) *ptr = LAPB_ADDR_A; } } lapb_dbg(2, "(%p) S%d TX %3ph\n", lapb->dev, lapb->state, skb->data); if (!lapb_data_transmit(lapb, skb)) kfree_skb(skb); } void lapb_establish_data_link(struct lapb_cb *lapb) { lapb->condition = 0x00; lapb->n2count = 0; if (lapb->mode & LAPB_EXTENDED) { lapb_dbg(1, "(%p) S%d TX SABME(1)\n", lapb->dev, lapb->state); lapb_send_control(lapb, LAPB_SABME, LAPB_POLLON, LAPB_COMMAND); } else { lapb_dbg(1, "(%p) S%d TX SABM(1)\n", lapb->dev, lapb->state); lapb_send_control(lapb, LAPB_SABM, LAPB_POLLON, LAPB_COMMAND); } lapb_start_t1timer(lapb); lapb_stop_t2timer(lapb); } void lapb_enquiry_response(struct lapb_cb *lapb) { lapb_dbg(1, "(%p) S%d TX RR(1) R%d\n", lapb->dev, lapb->state, lapb->vr); lapb_send_control(lapb, LAPB_RR, LAPB_POLLON, LAPB_RESPONSE); lapb->condition &= ~LAPB_ACK_PENDING_CONDITION; } void lapb_timeout_response(struct lapb_cb *lapb) { lapb_dbg(1, "(%p) S%d TX RR(0) R%d\n", lapb->dev, lapb->state, lapb->vr); lapb_send_control(lapb, LAPB_RR, LAPB_POLLOFF, LAPB_RESPONSE); lapb->condition &= ~LAPB_ACK_PENDING_CONDITION; } void lapb_check_iframes_acked(struct lapb_cb *lapb, unsigned short nr) { if (lapb->vs == nr) { lapb_frames_acked(lapb, nr); lapb_stop_t1timer(lapb); lapb->n2count = 0; } else if (lapb->va != nr) { lapb_frames_acked(lapb, nr); lapb_start_t1timer(lapb); } } void lapb_check_need_response(struct lapb_cb *lapb, int type, int pf) { if (type == LAPB_COMMAND && pf) lapb_enquiry_response(lapb); }
linux-master
net/lapb/lapb_out.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * LAPB release 002 * * This code REQUIRES 2.1.15 or higher/ NET3.038 * * History * LAPB 001 Jonathan Naylor Started Coding * LAPB 002 Jonathan Naylor New timer architecture. * 2000-10-29 Henner Eisen lapb_data_indication() return status. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/errno.h> #include <linux/types.h> #include <linux/socket.h> #include <linux/in.h> #include <linux/kernel.h> #include <linux/jiffies.h> #include <linux/timer.h> #include <linux/string.h> #include <linux/sockios.h> #include <linux/net.h> #include <linux/inet.h> #include <linux/if_arp.h> #include <linux/skbuff.h> #include <linux/slab.h> #include <net/sock.h> #include <linux/uaccess.h> #include <linux/fcntl.h> #include <linux/mm.h> #include <linux/interrupt.h> #include <linux/stat.h> #include <linux/init.h> #include <net/lapb.h> static LIST_HEAD(lapb_list); static DEFINE_RWLOCK(lapb_list_lock); /* * Free an allocated lapb control block. */ static void lapb_free_cb(struct lapb_cb *lapb) { kfree(lapb); } static __inline__ void lapb_hold(struct lapb_cb *lapb) { refcount_inc(&lapb->refcnt); } static __inline__ void lapb_put(struct lapb_cb *lapb) { if (refcount_dec_and_test(&lapb->refcnt)) lapb_free_cb(lapb); } /* * Socket removal during an interrupt is now safe. */ static void __lapb_remove_cb(struct lapb_cb *lapb) { if (lapb->node.next) { list_del(&lapb->node); lapb_put(lapb); } } /* * Add a socket to the bound sockets list. */ static void __lapb_insert_cb(struct lapb_cb *lapb) { list_add(&lapb->node, &lapb_list); lapb_hold(lapb); } static struct lapb_cb *__lapb_devtostruct(struct net_device *dev) { struct lapb_cb *lapb, *use = NULL; list_for_each_entry(lapb, &lapb_list, node) { if (lapb->dev == dev) { use = lapb; break; } } if (use) lapb_hold(use); return use; } static struct lapb_cb *lapb_devtostruct(struct net_device *dev) { struct lapb_cb *rc; read_lock_bh(&lapb_list_lock); rc = __lapb_devtostruct(dev); read_unlock_bh(&lapb_list_lock); return rc; } /* * Create an empty LAPB control block. */ static struct lapb_cb *lapb_create_cb(void) { struct lapb_cb *lapb = kzalloc(sizeof(*lapb), GFP_ATOMIC); if (!lapb) goto out; skb_queue_head_init(&lapb->write_queue); skb_queue_head_init(&lapb->ack_queue); timer_setup(&lapb->t1timer, NULL, 0); timer_setup(&lapb->t2timer, NULL, 0); lapb->t1timer_running = false; lapb->t2timer_running = false; lapb->t1 = LAPB_DEFAULT_T1; lapb->t2 = LAPB_DEFAULT_T2; lapb->n2 = LAPB_DEFAULT_N2; lapb->mode = LAPB_DEFAULT_MODE; lapb->window = LAPB_DEFAULT_WINDOW; lapb->state = LAPB_STATE_0; spin_lock_init(&lapb->lock); refcount_set(&lapb->refcnt, 1); out: return lapb; } int lapb_register(struct net_device *dev, const struct lapb_register_struct *callbacks) { struct lapb_cb *lapb; int rc = LAPB_BADTOKEN; write_lock_bh(&lapb_list_lock); lapb = __lapb_devtostruct(dev); if (lapb) { lapb_put(lapb); goto out; } lapb = lapb_create_cb(); rc = LAPB_NOMEM; if (!lapb) goto out; lapb->dev = dev; lapb->callbacks = callbacks; __lapb_insert_cb(lapb); lapb_start_t1timer(lapb); rc = LAPB_OK; out: write_unlock_bh(&lapb_list_lock); return rc; } EXPORT_SYMBOL(lapb_register); int lapb_unregister(struct net_device *dev) { struct lapb_cb *lapb; int rc = LAPB_BADTOKEN; write_lock_bh(&lapb_list_lock); lapb = __lapb_devtostruct(dev); if (!lapb) goto out; lapb_put(lapb); /* Wait for other refs to "lapb" to drop */ while (refcount_read(&lapb->refcnt) > 2) usleep_range(1, 10); spin_lock_bh(&lapb->lock); lapb_stop_t1timer(lapb); lapb_stop_t2timer(lapb); lapb_clear_queues(lapb); spin_unlock_bh(&lapb->lock); /* Wait for running timers to stop */ del_timer_sync(&lapb->t1timer); del_timer_sync(&lapb->t2timer); __lapb_remove_cb(lapb); lapb_put(lapb); rc = LAPB_OK; out: write_unlock_bh(&lapb_list_lock); return rc; } EXPORT_SYMBOL(lapb_unregister); int lapb_getparms(struct net_device *dev, struct lapb_parms_struct *parms) { int rc = LAPB_BADTOKEN; struct lapb_cb *lapb = lapb_devtostruct(dev); if (!lapb) goto out; spin_lock_bh(&lapb->lock); parms->t1 = lapb->t1 / HZ; parms->t2 = lapb->t2 / HZ; parms->n2 = lapb->n2; parms->n2count = lapb->n2count; parms->state = lapb->state; parms->window = lapb->window; parms->mode = lapb->mode; if (!timer_pending(&lapb->t1timer)) parms->t1timer = 0; else parms->t1timer = (lapb->t1timer.expires - jiffies) / HZ; if (!timer_pending(&lapb->t2timer)) parms->t2timer = 0; else parms->t2timer = (lapb->t2timer.expires - jiffies) / HZ; spin_unlock_bh(&lapb->lock); lapb_put(lapb); rc = LAPB_OK; out: return rc; } EXPORT_SYMBOL(lapb_getparms); int lapb_setparms(struct net_device *dev, struct lapb_parms_struct *parms) { int rc = LAPB_BADTOKEN; struct lapb_cb *lapb = lapb_devtostruct(dev); if (!lapb) goto out; spin_lock_bh(&lapb->lock); rc = LAPB_INVALUE; if (parms->t1 < 1 || parms->t2 < 1 || parms->n2 < 1) goto out_put; if (lapb->state == LAPB_STATE_0) { if (parms->mode & LAPB_EXTENDED) { if (parms->window < 1 || parms->window > 127) goto out_put; } else { if (parms->window < 1 || parms->window > 7) goto out_put; } lapb->mode = parms->mode; lapb->window = parms->window; } lapb->t1 = parms->t1 * HZ; lapb->t2 = parms->t2 * HZ; lapb->n2 = parms->n2; rc = LAPB_OK; out_put: spin_unlock_bh(&lapb->lock); lapb_put(lapb); out: return rc; } EXPORT_SYMBOL(lapb_setparms); int lapb_connect_request(struct net_device *dev) { struct lapb_cb *lapb = lapb_devtostruct(dev); int rc = LAPB_BADTOKEN; if (!lapb) goto out; spin_lock_bh(&lapb->lock); rc = LAPB_OK; if (lapb->state == LAPB_STATE_1) goto out_put; rc = LAPB_CONNECTED; if (lapb->state == LAPB_STATE_3 || lapb->state == LAPB_STATE_4) goto out_put; lapb_establish_data_link(lapb); lapb_dbg(0, "(%p) S0 -> S1\n", lapb->dev); lapb->state = LAPB_STATE_1; rc = LAPB_OK; out_put: spin_unlock_bh(&lapb->lock); lapb_put(lapb); out: return rc; } EXPORT_SYMBOL(lapb_connect_request); static int __lapb_disconnect_request(struct lapb_cb *lapb) { switch (lapb->state) { case LAPB_STATE_0: return LAPB_NOTCONNECTED; case LAPB_STATE_1: lapb_dbg(1, "(%p) S1 TX DISC(1)\n", lapb->dev); lapb_dbg(0, "(%p) S1 -> S0\n", lapb->dev); lapb_send_control(lapb, LAPB_DISC, LAPB_POLLON, LAPB_COMMAND); lapb->state = LAPB_STATE_0; lapb_start_t1timer(lapb); return LAPB_NOTCONNECTED; case LAPB_STATE_2: return LAPB_OK; } lapb_clear_queues(lapb); lapb->n2count = 0; lapb_send_control(lapb, LAPB_DISC, LAPB_POLLON, LAPB_COMMAND); lapb_start_t1timer(lapb); lapb_stop_t2timer(lapb); lapb->state = LAPB_STATE_2; lapb_dbg(1, "(%p) S3 DISC(1)\n", lapb->dev); lapb_dbg(0, "(%p) S3 -> S2\n", lapb->dev); return LAPB_OK; } int lapb_disconnect_request(struct net_device *dev) { struct lapb_cb *lapb = lapb_devtostruct(dev); int rc = LAPB_BADTOKEN; if (!lapb) goto out; spin_lock_bh(&lapb->lock); rc = __lapb_disconnect_request(lapb); spin_unlock_bh(&lapb->lock); lapb_put(lapb); out: return rc; } EXPORT_SYMBOL(lapb_disconnect_request); int lapb_data_request(struct net_device *dev, struct sk_buff *skb) { struct lapb_cb *lapb = lapb_devtostruct(dev); int rc = LAPB_BADTOKEN; if (!lapb) goto out; spin_lock_bh(&lapb->lock); rc = LAPB_NOTCONNECTED; if (lapb->state != LAPB_STATE_3 && lapb->state != LAPB_STATE_4) goto out_put; skb_queue_tail(&lapb->write_queue, skb); lapb_kick(lapb); rc = LAPB_OK; out_put: spin_unlock_bh(&lapb->lock); lapb_put(lapb); out: return rc; } EXPORT_SYMBOL(lapb_data_request); int lapb_data_received(struct net_device *dev, struct sk_buff *skb) { struct lapb_cb *lapb = lapb_devtostruct(dev); int rc = LAPB_BADTOKEN; if (lapb) { spin_lock_bh(&lapb->lock); lapb_data_input(lapb, skb); spin_unlock_bh(&lapb->lock); lapb_put(lapb); rc = LAPB_OK; } return rc; } EXPORT_SYMBOL(lapb_data_received); void lapb_connect_confirmation(struct lapb_cb *lapb, int reason) { if (lapb->callbacks->connect_confirmation) lapb->callbacks->connect_confirmation(lapb->dev, reason); } void lapb_connect_indication(struct lapb_cb *lapb, int reason) { if (lapb->callbacks->connect_indication) lapb->callbacks->connect_indication(lapb->dev, reason); } void lapb_disconnect_confirmation(struct lapb_cb *lapb, int reason) { if (lapb->callbacks->disconnect_confirmation) lapb->callbacks->disconnect_confirmation(lapb->dev, reason); } void lapb_disconnect_indication(struct lapb_cb *lapb, int reason) { if (lapb->callbacks->disconnect_indication) lapb->callbacks->disconnect_indication(lapb->dev, reason); } int lapb_data_indication(struct lapb_cb *lapb, struct sk_buff *skb) { if (lapb->callbacks->data_indication) return lapb->callbacks->data_indication(lapb->dev, skb); kfree_skb(skb); return NET_RX_SUCCESS; /* For now; must be != NET_RX_DROP */ } int lapb_data_transmit(struct lapb_cb *lapb, struct sk_buff *skb) { int used = 0; if (lapb->callbacks->data_transmit) { lapb->callbacks->data_transmit(lapb->dev, skb); used = 1; } return used; } /* Handle device status changes. */ static int lapb_device_event(struct notifier_block *this, unsigned long event, void *ptr) { struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct lapb_cb *lapb; if (!net_eq(dev_net(dev), &init_net)) return NOTIFY_DONE; if (dev->type != ARPHRD_X25) return NOTIFY_DONE; lapb = lapb_devtostruct(dev); if (!lapb) return NOTIFY_DONE; spin_lock_bh(&lapb->lock); switch (event) { case NETDEV_UP: lapb_dbg(0, "(%p) Interface up: %s\n", dev, dev->name); if (netif_carrier_ok(dev)) { lapb_dbg(0, "(%p): Carrier is already up: %s\n", dev, dev->name); if (lapb->mode & LAPB_DCE) { lapb_start_t1timer(lapb); } else { if (lapb->state == LAPB_STATE_0) { lapb->state = LAPB_STATE_1; lapb_establish_data_link(lapb); } } } break; case NETDEV_GOING_DOWN: if (netif_carrier_ok(dev)) __lapb_disconnect_request(lapb); break; case NETDEV_DOWN: lapb_dbg(0, "(%p) Interface down: %s\n", dev, dev->name); lapb_dbg(0, "(%p) S%d -> S0\n", dev, lapb->state); lapb_clear_queues(lapb); lapb->state = LAPB_STATE_0; lapb->n2count = 0; lapb_stop_t1timer(lapb); lapb_stop_t2timer(lapb); break; case NETDEV_CHANGE: if (netif_carrier_ok(dev)) { lapb_dbg(0, "(%p): Carrier detected: %s\n", dev, dev->name); if (lapb->mode & LAPB_DCE) { lapb_start_t1timer(lapb); } else { if (lapb->state == LAPB_STATE_0) { lapb->state = LAPB_STATE_1; lapb_establish_data_link(lapb); } } } else { lapb_dbg(0, "(%p) Carrier lost: %s\n", dev, dev->name); lapb_dbg(0, "(%p) S%d -> S0\n", dev, lapb->state); lapb_clear_queues(lapb); lapb->state = LAPB_STATE_0; lapb->n2count = 0; lapb_stop_t1timer(lapb); lapb_stop_t2timer(lapb); } break; } spin_unlock_bh(&lapb->lock); lapb_put(lapb); return NOTIFY_DONE; } static struct notifier_block lapb_dev_notifier = { .notifier_call = lapb_device_event, }; static int __init lapb_init(void) { return register_netdevice_notifier(&lapb_dev_notifier); } static void __exit lapb_exit(void) { WARN_ON(!list_empty(&lapb_list)); unregister_netdevice_notifier(&lapb_dev_notifier); } MODULE_AUTHOR("Jonathan Naylor <[email protected]>"); MODULE_DESCRIPTION("The X.25 Link Access Procedure B link layer protocol"); MODULE_LICENSE("GPL"); module_init(lapb_init); module_exit(lapb_exit);
linux-master
net/lapb/lapb_iface.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * LAPB release 002 * * This code REQUIRES 2.1.15 or higher/ NET3.038 * * History * LAPB 001 Jonathan Naylor Started Coding * LAPB 002 Jonathan Naylor New timer architecture. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/errno.h> #include <linux/types.h> #include <linux/socket.h> #include <linux/in.h> #include <linux/kernel.h> #include <linux/jiffies.h> #include <linux/timer.h> #include <linux/string.h> #include <linux/sockios.h> #include <linux/net.h> #include <linux/inet.h> #include <linux/skbuff.h> #include <net/sock.h> #include <linux/uaccess.h> #include <linux/fcntl.h> #include <linux/mm.h> #include <linux/interrupt.h> #include <net/lapb.h> static void lapb_t1timer_expiry(struct timer_list *); static void lapb_t2timer_expiry(struct timer_list *); void lapb_start_t1timer(struct lapb_cb *lapb) { del_timer(&lapb->t1timer); lapb->t1timer.function = lapb_t1timer_expiry; lapb->t1timer.expires = jiffies + lapb->t1; lapb->t1timer_running = true; add_timer(&lapb->t1timer); } void lapb_start_t2timer(struct lapb_cb *lapb) { del_timer(&lapb->t2timer); lapb->t2timer.function = lapb_t2timer_expiry; lapb->t2timer.expires = jiffies + lapb->t2; lapb->t2timer_running = true; add_timer(&lapb->t2timer); } void lapb_stop_t1timer(struct lapb_cb *lapb) { lapb->t1timer_running = false; del_timer(&lapb->t1timer); } void lapb_stop_t2timer(struct lapb_cb *lapb) { lapb->t2timer_running = false; del_timer(&lapb->t2timer); } int lapb_t1timer_running(struct lapb_cb *lapb) { return lapb->t1timer_running; } static void lapb_t2timer_expiry(struct timer_list *t) { struct lapb_cb *lapb = from_timer(lapb, t, t2timer); spin_lock_bh(&lapb->lock); if (timer_pending(&lapb->t2timer)) /* A new timer has been set up */ goto out; if (!lapb->t2timer_running) /* The timer has been stopped */ goto out; if (lapb->condition & LAPB_ACK_PENDING_CONDITION) { lapb->condition &= ~LAPB_ACK_PENDING_CONDITION; lapb_timeout_response(lapb); } lapb->t2timer_running = false; out: spin_unlock_bh(&lapb->lock); } static void lapb_t1timer_expiry(struct timer_list *t) { struct lapb_cb *lapb = from_timer(lapb, t, t1timer); spin_lock_bh(&lapb->lock); if (timer_pending(&lapb->t1timer)) /* A new timer has been set up */ goto out; if (!lapb->t1timer_running) /* The timer has been stopped */ goto out; switch (lapb->state) { /* * If we are a DCE, send DM up to N2 times, then switch to * STATE_1 and send SABM(E). */ case LAPB_STATE_0: if (lapb->mode & LAPB_DCE && lapb->n2count != lapb->n2) { lapb->n2count++; lapb_send_control(lapb, LAPB_DM, LAPB_POLLOFF, LAPB_RESPONSE); } else { lapb->state = LAPB_STATE_1; lapb_establish_data_link(lapb); } break; /* * Awaiting connection state, send SABM(E), up to N2 times. */ case LAPB_STATE_1: if (lapb->n2count == lapb->n2) { lapb_clear_queues(lapb); lapb->state = LAPB_STATE_0; lapb_disconnect_indication(lapb, LAPB_TIMEDOUT); lapb_dbg(0, "(%p) S1 -> S0\n", lapb->dev); lapb->t1timer_running = false; goto out; } else { lapb->n2count++; if (lapb->mode & LAPB_EXTENDED) { lapb_dbg(1, "(%p) S1 TX SABME(1)\n", lapb->dev); lapb_send_control(lapb, LAPB_SABME, LAPB_POLLON, LAPB_COMMAND); } else { lapb_dbg(1, "(%p) S1 TX SABM(1)\n", lapb->dev); lapb_send_control(lapb, LAPB_SABM, LAPB_POLLON, LAPB_COMMAND); } } break; /* * Awaiting disconnection state, send DISC, up to N2 times. */ case LAPB_STATE_2: if (lapb->n2count == lapb->n2) { lapb_clear_queues(lapb); lapb->state = LAPB_STATE_0; lapb_disconnect_confirmation(lapb, LAPB_TIMEDOUT); lapb_dbg(0, "(%p) S2 -> S0\n", lapb->dev); lapb->t1timer_running = false; goto out; } else { lapb->n2count++; lapb_dbg(1, "(%p) S2 TX DISC(1)\n", lapb->dev); lapb_send_control(lapb, LAPB_DISC, LAPB_POLLON, LAPB_COMMAND); } break; /* * Data transfer state, restransmit I frames, up to N2 times. */ case LAPB_STATE_3: if (lapb->n2count == lapb->n2) { lapb_clear_queues(lapb); lapb->state = LAPB_STATE_0; lapb_stop_t2timer(lapb); lapb_disconnect_indication(lapb, LAPB_TIMEDOUT); lapb_dbg(0, "(%p) S3 -> S0\n", lapb->dev); lapb->t1timer_running = false; goto out; } else { lapb->n2count++; lapb_requeue_frames(lapb); lapb_kick(lapb); } break; /* * Frame reject state, restransmit FRMR frames, up to N2 times. */ case LAPB_STATE_4: if (lapb->n2count == lapb->n2) { lapb_clear_queues(lapb); lapb->state = LAPB_STATE_0; lapb_disconnect_indication(lapb, LAPB_TIMEDOUT); lapb_dbg(0, "(%p) S4 -> S0\n", lapb->dev); lapb->t1timer_running = false; goto out; } else { lapb->n2count++; lapb_transmit_frmr(lapb); } break; } lapb_start_t1timer(lapb); out: spin_unlock_bh(&lapb->lock); }
linux-master
net/lapb/lapb_timer.c