2020-12-10 00:06:03 +02:00
|
|
|
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
|
|
|
|
/*
|
2024-02-05 21:21:03 +02:00
|
|
|
* Copyright (C) 2012-2014, 2018-2024 Intel Corporation
|
2020-12-10 00:06:03 +02:00
|
|
|
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
|
|
|
|
* Copyright (C) 2016-2017 Intel Deutschland GmbH
|
|
|
|
*/
|
2013-01-24 14:25:36 +01:00
|
|
|
#include <linux/module.h>
|
2021-12-21 11:39:41 -08:00
|
|
|
#include <linux/rtnetlink.h>
|
2014-03-18 21:15:06 +02:00
|
|
|
#include <linux/vmalloc.h>
|
2013-01-24 14:25:36 +01:00
|
|
|
#include <net/mac80211.h>
|
|
|
|
|
2017-06-01 10:32:17 +02:00
|
|
|
#include "fw/notif-wait.h"
|
2013-01-24 14:25:36 +01:00
|
|
|
#include "iwl-trans.h"
|
|
|
|
#include "iwl-op-mode.h"
|
2017-06-01 10:22:09 +02:00
|
|
|
#include "fw/img.h"
|
2013-01-24 14:25:36 +01:00
|
|
|
#include "iwl-debug.h"
|
|
|
|
#include "iwl-drv.h"
|
|
|
|
#include "iwl-modparams.h"
|
|
|
|
#include "mvm.h"
|
|
|
|
#include "iwl-phy-db.h"
|
|
|
|
#include "iwl-eeprom-parse.h"
|
|
|
|
#include "iwl-csr.h"
|
|
|
|
#include "iwl-io.h"
|
|
|
|
#include "iwl-prph.h"
|
|
|
|
#include "rs.h"
|
2017-06-02 15:15:53 +02:00
|
|
|
#include "fw/api/scan.h"
|
2022-02-05 11:21:31 +02:00
|
|
|
#include "fw/api/rfi.h"
|
2013-01-24 14:25:36 +01:00
|
|
|
#include "time-event.h"
|
2015-10-15 18:18:09 +03:00
|
|
|
#include "fw-api.h"
|
2017-09-28 15:29:27 +03:00
|
|
|
#include "fw/acpi.h"
|
2021-12-04 17:49:37 +02:00
|
|
|
#include "fw/uefi.h"
|
2023-03-20 12:33:04 +02:00
|
|
|
#include "time-sync.h"
|
2013-01-24 14:25:36 +01:00
|
|
|
|
|
|
|
#define DRV_DESCRIPTION "The new Intel(R) wireless AGN driver for Linux"
|
|
|
|
MODULE_DESCRIPTION(DRV_DESCRIPTION);
|
|
|
|
MODULE_LICENSE("GPL");
|
2022-01-30 11:53:00 +02:00
|
|
|
MODULE_IMPORT_NS(IWLWIFI);
|
2013-01-24 14:25:36 +01:00
|
|
|
|
|
|
|
static const struct iwl_op_mode_ops iwl_mvm_ops;
|
2015-05-22 13:41:07 +02:00
|
|
|
static const struct iwl_op_mode_ops iwl_mvm_ops_mq;
|
2013-01-24 14:25:36 +01:00
|
|
|
|
|
|
|
struct iwl_mvm_mod_params iwlmvm_mod_params = {
|
|
|
|
.power_scheme = IWL_POWER_SCHEME_BPS,
|
|
|
|
/* rest of fields are 0 by default */
|
|
|
|
};
|
|
|
|
|
2018-03-23 15:54:37 -07:00
|
|
|
module_param_named(init_dbg, iwlmvm_mod_params.init_dbg, bool, 0444);
|
2013-01-24 14:25:36 +01:00
|
|
|
MODULE_PARM_DESC(init_dbg,
|
|
|
|
"set to true to debug an ASSERT in INIT fw (default: false");
|
2018-03-23 15:54:37 -07:00
|
|
|
module_param_named(power_scheme, iwlmvm_mod_params.power_scheme, int, 0444);
|
2013-01-24 14:25:36 +01:00
|
|
|
MODULE_PARM_DESC(power_scheme,
|
|
|
|
"power management scheme: 1-active, 2-balanced, 3-low power, default: 2");
|
|
|
|
|
|
|
|
/*
|
|
|
|
* module init and exit functions
|
|
|
|
*/
|
|
|
|
static int __init iwl_mvm_init(void)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = iwl_mvm_rate_control_register();
|
|
|
|
if (ret) {
|
|
|
|
pr_err("Unable to register rate control algorithm: %d\n", ret);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = iwl_opmode_register("iwlmvm", &iwl_mvm_ops);
|
2017-11-05 18:49:48 +02:00
|
|
|
if (ret)
|
2013-01-24 14:25:36 +01:00
|
|
|
pr_err("Unable to register MVM op_mode: %d\n", ret);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
module_init(iwl_mvm_init);
|
|
|
|
|
|
|
|
static void __exit iwl_mvm_exit(void)
|
|
|
|
{
|
|
|
|
iwl_opmode_deregister("iwlmvm");
|
|
|
|
iwl_mvm_rate_control_unregister();
|
|
|
|
}
|
|
|
|
module_exit(iwl_mvm_exit);
|
|
|
|
|
|
|
|
static void iwl_mvm_nic_config(struct iwl_op_mode *op_mode)
|
|
|
|
{
|
|
|
|
struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
|
|
|
|
u8 radio_cfg_type, radio_cfg_step, radio_cfg_dash;
|
2021-12-07 16:05:51 +02:00
|
|
|
u32 reg_val;
|
2014-12-08 21:13:14 +02:00
|
|
|
u32 phy_config = iwl_mvm_get_phy_config(mvm);
|
|
|
|
|
|
|
|
radio_cfg_type = (phy_config & FW_PHY_CFG_RADIO_TYPE) >>
|
|
|
|
FW_PHY_CFG_RADIO_TYPE_POS;
|
|
|
|
radio_cfg_step = (phy_config & FW_PHY_CFG_RADIO_STEP) >>
|
|
|
|
FW_PHY_CFG_RADIO_STEP_POS;
|
|
|
|
radio_cfg_dash = (phy_config & FW_PHY_CFG_RADIO_DASH) >>
|
|
|
|
FW_PHY_CFG_RADIO_DASH_POS;
|
2013-01-24 14:25:36 +01:00
|
|
|
|
2022-12-05 10:35:44 +02:00
|
|
|
IWL_DEBUG_INFO(mvm, "Radio type=0x%x-0x%x-0x%x\n", radio_cfg_type,
|
|
|
|
radio_cfg_step, radio_cfg_dash);
|
|
|
|
|
|
|
|
if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
|
|
|
|
return;
|
|
|
|
|
2013-01-24 14:25:36 +01:00
|
|
|
/* SKU control */
|
2021-12-07 16:05:51 +02:00
|
|
|
reg_val = CSR_HW_REV_STEP_DASH(mvm->trans->hw_rev);
|
2013-01-24 14:25:36 +01:00
|
|
|
|
|
|
|
/* radio configuration */
|
|
|
|
reg_val |= radio_cfg_type << CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE;
|
|
|
|
reg_val |= radio_cfg_step << CSR_HW_IF_CONFIG_REG_POS_PHY_STEP;
|
|
|
|
reg_val |= radio_cfg_dash << CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;
|
|
|
|
|
|
|
|
WARN_ON((radio_cfg_type << CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE) &
|
|
|
|
~CSR_HW_IF_CONFIG_REG_MSK_PHY_TYPE);
|
|
|
|
|
2014-05-08 16:30:24 +03:00
|
|
|
/*
|
2017-03-22 14:07:50 +02:00
|
|
|
* TODO: Bits 7-8 of CSR in 8000 HW family and higher set the ADC
|
|
|
|
* sampling, and shouldn't be set to any non-zero value.
|
|
|
|
* The same is supposed to be true of the other HW, but unsetting
|
|
|
|
* them (such as the 7260) causes automatic tests to fail on seemingly
|
|
|
|
* unrelated errors. Need to further investigate this, but for now
|
|
|
|
* we'll separate cases.
|
2014-05-08 16:30:24 +03:00
|
|
|
*/
|
2019-07-12 15:03:48 +03:00
|
|
|
if (mvm->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_8000)
|
2014-05-08 16:30:24 +03:00
|
|
|
reg_val |= CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI;
|
2013-01-24 14:25:36 +01:00
|
|
|
|
2021-08-02 21:58:47 +03:00
|
|
|
if (iwl_fw_dbg_is_d3_debug_enabled(&mvm->fwrt))
|
2018-01-29 11:05:37 +02:00
|
|
|
reg_val |= CSR_HW_IF_CONFIG_REG_D3_DEBUG;
|
|
|
|
|
2013-01-13 13:31:10 +02:00
|
|
|
iwl_trans_set_bits_mask(mvm->trans, CSR_HW_IF_CONFIG_REG,
|
2021-12-07 16:05:51 +02:00
|
|
|
CSR_HW_IF_CONFIG_REG_MSK_MAC_STEP_DASH |
|
2013-01-13 13:31:10 +02:00
|
|
|
CSR_HW_IF_CONFIG_REG_MSK_PHY_TYPE |
|
|
|
|
CSR_HW_IF_CONFIG_REG_MSK_PHY_STEP |
|
|
|
|
CSR_HW_IF_CONFIG_REG_MSK_PHY_DASH |
|
|
|
|
CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
|
2018-01-29 11:05:37 +02:00
|
|
|
CSR_HW_IF_CONFIG_REG_BIT_MAC_SI |
|
|
|
|
CSR_HW_IF_CONFIG_REG_D3_DEBUG,
|
2013-01-13 13:31:10 +02:00
|
|
|
reg_val);
|
2013-01-24 14:25:36 +01:00
|
|
|
|
|
|
|
/*
|
|
|
|
* W/A : NIC is stuck in a reset state after Early PCIe power off
|
|
|
|
* (PCIe power is lost before PERST# is asserted), causing ME FW
|
|
|
|
* to lose ownership and not being able to obtain it back.
|
|
|
|
*/
|
2015-05-11 11:04:34 +03:00
|
|
|
if (!mvm->trans->cfg->apmg_not_supported)
|
2013-12-29 14:09:59 +02:00
|
|
|
iwl_set_bits_mask_prph(mvm->trans, APMG_PS_CTRL_REG,
|
|
|
|
APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
|
|
|
|
~APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
|
2013-01-24 14:25:36 +01:00
|
|
|
}
|
|
|
|
|
2024-05-05 09:19:51 +03:00
|
|
|
static void iwl_mvm_rx_esr_mode_notif(struct iwl_mvm *mvm,
|
|
|
|
struct iwl_rx_cmd_buffer *rxb)
|
|
|
|
{
|
|
|
|
struct iwl_rx_packet *pkt = rxb_addr(rxb);
|
|
|
|
struct iwl_mvm_esr_mode_notif *notif = (void *)pkt->data;
|
|
|
|
struct ieee80211_vif *vif = iwl_mvm_get_bss_vif(mvm);
|
|
|
|
|
|
|
|
/* FW recommendations is only for entering EMLSR */
|
|
|
|
if (!vif || iwl_mvm_vif_from_mac80211(vif)->esr_active)
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (le32_to_cpu(notif->action) == ESR_RECOMMEND_ENTER)
|
|
|
|
iwl_mvm_unblock_esr(mvm, vif, IWL_MVM_ESR_BLOCKED_FW);
|
|
|
|
else
|
|
|
|
iwl_mvm_block_esr(mvm, vif, IWL_MVM_ESR_BLOCKED_FW,
|
|
|
|
iwl_mvm_get_primary_link(vif));
|
|
|
|
}
|
|
|
|
|
2021-01-17 13:10:32 +02:00
|
|
|
static void iwl_mvm_rx_monitor_notif(struct iwl_mvm *mvm,
|
|
|
|
struct iwl_rx_cmd_buffer *rxb)
|
|
|
|
{
|
|
|
|
struct iwl_rx_packet *pkt = rxb_addr(rxb);
|
|
|
|
struct iwl_datapath_monitor_notif *notif = (void *)pkt->data;
|
|
|
|
struct ieee80211_supported_band *sband;
|
|
|
|
const struct ieee80211_sta_he_cap *he_cap;
|
|
|
|
struct ieee80211_vif *vif;
|
|
|
|
|
|
|
|
if (notif->type != cpu_to_le32(IWL_DP_MON_NOTIF_TYPE_EXT_CCA))
|
|
|
|
return;
|
|
|
|
|
|
|
|
vif = iwl_mvm_get_vif_by_macid(mvm, notif->mac_id);
|
|
|
|
if (!vif || vif->type != NL80211_IFTYPE_STATION)
|
|
|
|
return;
|
|
|
|
|
wifi: mac80211: introduce 'channel request'
For channel contexts, mac80211 currently uses the cfg80211
chandef struct (control channel, center freq(s), width) to
define towards drivers and internally how these behave. In
fact, there are _two_ such structs used, where the min_def
can reduce bandwidth according to the stations connected.
Unfortunately, with EHT this is longer be sufficient, at
least not for all hardware. EHT requires that non-AP STAs
that are connected to an AP with a lower bandwidth than it
(the AP) advertises (e.g. 160 MHz STA connected to 320 MHz
AP) still be able to receive downlink OFDMA and respond to
trigger frames for uplink OFDMA that specify the position
and bandwidth for the non-AP STA relative to the channel
the AP is using. Therefore, they need to be aware of this,
and at least for some hardware (e.g. Intel) this awareness
is in the hardware. As a result, use of the "same" channel
may need to be split over two channel contexts where they
differ by the AP being used.
As a first step, introduce a concept of a channel request
('chanreq') for each interface, to control the context it
requests. This step does nothing but reorganise the code,
so that later the AP's chandef can be added to the request
in order to handle the EHT case described above.
Link: https://msgid.link/20240129194108.2e88e48bd2e9.I4256183debe975c5ed71621611206fdbb69ba330@changeid
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
2024-01-29 19:34:40 +01:00
|
|
|
if (!vif->bss_conf.chanreq.oper.chan ||
|
|
|
|
vif->bss_conf.chanreq.oper.chan->band != NL80211_BAND_2GHZ ||
|
|
|
|
vif->bss_conf.chanreq.oper.width < NL80211_CHAN_WIDTH_40)
|
2021-01-17 13:10:32 +02:00
|
|
|
return;
|
|
|
|
|
wifi: mac80211: move interface config to new struct
We'll use bss_conf for per-link configuration later, so
move out all the non-link-specific data out into a new
struct ieee80211_vif_cfg used in the vif.
Some adjustments were done with the following spatch:
@@
expression sdata;
struct ieee80211_vif *vifp;
identifier var = { assoc, ibss_joined, aid, arp_addr_list, arp_addr_cnt, ssid, ssid_len, s1g, ibss_creator };
@@
(
-sdata->vif.bss_conf.var
+sdata->vif.cfg.var
|
-vifp->bss_conf.var
+vifp->cfg.var
)
@bss_conf@
struct ieee80211_bss_conf *bss_conf;
identifier var = { assoc, ibss_joined, aid, arp_addr_list, arp_addr_cnt, ssid, ssid_len, s1g, ibss_creator };
@@
-bss_conf->var
+vif_cfg->var
(though more manual fixups were needed, e.g. replacing
"vif_cfg->" by "vif->cfg." in many files.)
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
2022-05-10 17:05:04 +02:00
|
|
|
if (!vif->cfg.assoc)
|
2021-01-17 13:10:32 +02:00
|
|
|
return;
|
|
|
|
|
|
|
|
/* this shouldn't happen *again*, ignore it */
|
|
|
|
if (mvm->cca_40mhz_workaround)
|
|
|
|
return;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We'll decrement this on disconnect - so set to 2 since we'll
|
|
|
|
* still have to disconnect from the current AP first.
|
|
|
|
*/
|
|
|
|
mvm->cca_40mhz_workaround = 2;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This capability manipulation isn't really ideal, but it's the
|
|
|
|
* easiest choice - otherwise we'd have to do some major changes
|
|
|
|
* in mac80211 to support this, which isn't worth it. This does
|
|
|
|
* mean that userspace may have outdated information, but that's
|
|
|
|
* actually not an issue at all.
|
|
|
|
*/
|
|
|
|
sband = mvm->hw->wiphy->bands[NL80211_BAND_2GHZ];
|
|
|
|
|
|
|
|
WARN_ON(!sband->ht_cap.ht_supported);
|
|
|
|
WARN_ON(!(sband->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40));
|
|
|
|
sband->ht_cap.cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40;
|
|
|
|
|
wifi: mac80211: add helpers to access sband iftype data
There's quite a bit of code accessing sband iftype data
(HE, HE 6 GHz, EHT) and we always need to remember to use
the ieee80211_vif_type_p2p() helper. Add new helpers to
directly get it from the sband/vif rather than having to
call ieee80211_vif_type_p2p().
Convert most code with the following spatch:
@@
expression vif, sband;
@@
-ieee80211_get_he_iftype_cap(sband, ieee80211_vif_type_p2p(vif))
+ieee80211_get_he_iftype_cap_vif(sband, vif)
@@
expression vif, sband;
@@
-ieee80211_get_eht_iftype_cap(sband, ieee80211_vif_type_p2p(vif))
+ieee80211_get_eht_iftype_cap_vif(sband, vif)
@@
expression vif, sband;
@@
-ieee80211_get_he_6ghz_capa(sband, ieee80211_vif_type_p2p(vif))
+ieee80211_get_he_6ghz_capa_vif(sband, vif)
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Signed-off-by: Gregory Greenman <gregory.greenman@intel.com>
Link: https://lore.kernel.org/r/20230604120651.db099f49e764.Ie892966c49e22c7b7ee1073bc684f142debfdc84@changeid
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
2023-06-04 12:11:24 +03:00
|
|
|
he_cap = ieee80211_get_he_iftype_cap_vif(sband, vif);
|
2021-01-17 13:10:32 +02:00
|
|
|
|
|
|
|
if (he_cap) {
|
|
|
|
/* we know that ours is writable */
|
2022-01-28 15:34:26 +02:00
|
|
|
struct ieee80211_sta_he_cap *he = (void *)(uintptr_t)he_cap;
|
2021-01-17 13:10:32 +02:00
|
|
|
|
|
|
|
WARN_ON(!he->has_he);
|
|
|
|
WARN_ON(!(he->he_cap_elem.phy_cap_info[0] &
|
|
|
|
IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G));
|
|
|
|
he->he_cap_elem.phy_cap_info[0] &=
|
|
|
|
~IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G;
|
|
|
|
}
|
|
|
|
|
|
|
|
ieee80211_disconnect(vif, true);
|
|
|
|
}
|
|
|
|
|
2023-03-28 10:59:01 +03:00
|
|
|
void iwl_mvm_update_link_smps(struct ieee80211_vif *vif,
|
|
|
|
struct ieee80211_bss_conf *link_conf)
|
2021-06-17 10:08:44 +03:00
|
|
|
{
|
|
|
|
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
|
|
|
|
struct iwl_mvm *mvm = mvmvif->mvm;
|
2021-08-02 17:28:20 +03:00
|
|
|
enum ieee80211_smps_mode mode = IEEE80211_SMPS_AUTOMATIC;
|
2021-06-17 10:08:44 +03:00
|
|
|
|
2023-03-28 10:59:01 +03:00
|
|
|
if (!link_conf)
|
|
|
|
return;
|
|
|
|
|
2021-08-02 17:28:20 +03:00
|
|
|
if (mvm->fw_static_smps_request &&
|
wifi: mac80211: introduce 'channel request'
For channel contexts, mac80211 currently uses the cfg80211
chandef struct (control channel, center freq(s), width) to
define towards drivers and internally how these behave. In
fact, there are _two_ such structs used, where the min_def
can reduce bandwidth according to the stations connected.
Unfortunately, with EHT this is longer be sufficient, at
least not for all hardware. EHT requires that non-AP STAs
that are connected to an AP with a lower bandwidth than it
(the AP) advertises (e.g. 160 MHz STA connected to 320 MHz
AP) still be able to receive downlink OFDMA and respond to
trigger frames for uplink OFDMA that specify the position
and bandwidth for the non-AP STA relative to the channel
the AP is using. Therefore, they need to be aware of this,
and at least for some hardware (e.g. Intel) this awareness
is in the hardware. As a result, use of the "same" channel
may need to be split over two channel contexts where they
differ by the AP being used.
As a first step, introduce a concept of a channel request
('chanreq') for each interface, to control the context it
requests. This step does nothing but reorganise the code,
so that later the AP's chandef can be added to the request
in order to handle the EHT case described above.
Link: https://msgid.link/20240129194108.2e88e48bd2e9.I4256183debe975c5ed71621611206fdbb69ba330@changeid
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
2024-01-29 19:34:40 +01:00
|
|
|
link_conf->chanreq.oper.width == NL80211_CHAN_WIDTH_160 &&
|
2023-03-28 10:59:01 +03:00
|
|
|
link_conf->he_support)
|
2021-08-02 17:28:20 +03:00
|
|
|
mode = IEEE80211_SMPS_STATIC;
|
|
|
|
|
2023-03-28 10:59:01 +03:00
|
|
|
iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_FW, mode,
|
|
|
|
link_conf->link_id);
|
2021-06-17 10:08:44 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
static void iwl_mvm_intf_dual_chain_req(void *data, u8 *mac,
|
|
|
|
struct ieee80211_vif *vif)
|
|
|
|
{
|
2023-03-28 10:59:01 +03:00
|
|
|
struct ieee80211_bss_conf *link_conf;
|
|
|
|
unsigned int link_id;
|
|
|
|
|
|
|
|
rcu_read_lock();
|
|
|
|
|
|
|
|
for_each_vif_active_link(vif, link_conf, link_id)
|
|
|
|
iwl_mvm_update_link_smps(vif, link_conf);
|
|
|
|
|
|
|
|
rcu_read_unlock();
|
2021-06-17 10:08:44 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
static void iwl_mvm_rx_thermal_dual_chain_req(struct iwl_mvm *mvm,
|
|
|
|
struct iwl_rx_cmd_buffer *rxb)
|
|
|
|
{
|
|
|
|
struct iwl_rx_packet *pkt = rxb_addr(rxb);
|
|
|
|
struct iwl_thermal_dual_chain_request *req = (void *)pkt->data;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We could pass it to the iterator data, but also need to remember
|
|
|
|
* it for new interfaces that are added while in this state.
|
|
|
|
*/
|
|
|
|
mvm->fw_static_smps_request =
|
|
|
|
req->event == cpu_to_le32(THERMAL_DUAL_CHAIN_REQ_DISABLE);
|
2022-01-29 13:16:15 +02:00
|
|
|
ieee80211_iterate_interfaces(mvm->hw,
|
|
|
|
IEEE80211_IFACE_SKIP_SDATA_NOT_IN_DRIVER,
|
2021-06-17 10:08:44 +03:00
|
|
|
iwl_mvm_intf_dual_chain_req, NULL);
|
|
|
|
}
|
|
|
|
|
2016-03-03 15:35:34 +02:00
|
|
|
/**
|
2024-01-28 08:53:50 +02:00
|
|
|
* enum iwl_rx_handler_context: context for Rx handler
|
2016-03-03 15:35:34 +02:00
|
|
|
* @RX_HANDLER_SYNC : this means that it will be called in the Rx path
|
|
|
|
* which can't acquire mvm->mutex.
|
|
|
|
* @RX_HANDLER_ASYNC_LOCKED : If the handler needs to hold mvm->mutex
|
|
|
|
* (and only in this case!), it should be set as ASYNC. In that case,
|
|
|
|
* it will be called from a worker with mvm->mutex held.
|
|
|
|
* @RX_HANDLER_ASYNC_UNLOCKED : in case the handler needs to lock the
|
|
|
|
* mutex itself, it will be called from a worker without mvm->mutex held.
|
2024-01-23 20:08:23 +02:00
|
|
|
* @RX_HANDLER_ASYNC_LOCKED_WIPHY: If the handler needs to hold the wiphy lock
|
|
|
|
* and mvm->mutex. Will be handled with the wiphy_work queue infra
|
|
|
|
* instead of regular work queue.
|
2016-03-03 15:35:34 +02:00
|
|
|
*/
|
|
|
|
enum iwl_rx_handler_context {
|
|
|
|
RX_HANDLER_SYNC,
|
|
|
|
RX_HANDLER_ASYNC_LOCKED,
|
|
|
|
RX_HANDLER_ASYNC_UNLOCKED,
|
2024-01-23 20:08:23 +02:00
|
|
|
RX_HANDLER_ASYNC_LOCKED_WIPHY,
|
2016-03-03 15:35:34 +02:00
|
|
|
};
|
|
|
|
|
|
|
|
/**
|
2024-01-28 08:53:50 +02:00
|
|
|
* struct iwl_rx_handlers: handler for FW notification
|
2016-03-03 15:35:34 +02:00
|
|
|
* @cmd_id: command id
|
2021-12-19 11:01:26 +02:00
|
|
|
* @min_size: minimum size to expect for the notification
|
2016-03-03 15:35:34 +02:00
|
|
|
* @context: see &iwl_rx_handler_context
|
|
|
|
* @fn: the function is called when notification is received
|
|
|
|
*/
|
2013-01-24 14:25:36 +01:00
|
|
|
struct iwl_rx_handlers {
|
2021-01-17 13:10:28 +02:00
|
|
|
u16 cmd_id, min_size;
|
2016-03-03 15:35:34 +02:00
|
|
|
enum iwl_rx_handler_context context;
|
2015-06-23 21:22:09 +02:00
|
|
|
void (*fn)(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
|
2013-01-24 14:25:36 +01:00
|
|
|
};
|
|
|
|
|
2021-01-17 13:10:28 +02:00
|
|
|
#define RX_HANDLER_NO_SIZE(_cmd_id, _fn, _context) \
|
|
|
|
{ .cmd_id = _cmd_id, .fn = _fn, .context = _context, }
|
|
|
|
#define RX_HANDLER_GRP_NO_SIZE(_grp, _cmd, _fn, _context) \
|
|
|
|
{ .cmd_id = WIDE_ID(_grp, _cmd), .fn = _fn, .context = _context, }
|
|
|
|
#define RX_HANDLER(_cmd_id, _fn, _context, _struct) \
|
|
|
|
{ .cmd_id = _cmd_id, .fn = _fn, \
|
|
|
|
.context = _context, .min_size = sizeof(_struct), }
|
|
|
|
#define RX_HANDLER_GRP(_grp, _cmd, _fn, _context, _struct) \
|
|
|
|
{ .cmd_id = WIDE_ID(_grp, _cmd), .fn = _fn, \
|
|
|
|
.context = _context, .min_size = sizeof(_struct), }
|
2013-01-24 14:25:36 +01:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Handlers for fw notifications
|
|
|
|
* Convention: RX_HANDLER(CMD_NAME, iwl_mvm_rx_CMD_NAME
|
|
|
|
* This list should be in order of frequency for performance purposes.
|
|
|
|
*
|
2016-03-03 15:35:34 +02:00
|
|
|
* The handler can be one from three contexts, see &iwl_rx_handler_context
|
2013-01-24 14:25:36 +01:00
|
|
|
*/
|
|
|
|
static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
|
2021-01-17 13:10:28 +02:00
|
|
|
RX_HANDLER(TX_CMD, iwl_mvm_rx_tx_cmd, RX_HANDLER_SYNC,
|
|
|
|
struct iwl_mvm_tx_resp),
|
|
|
|
RX_HANDLER(BA_NOTIF, iwl_mvm_rx_ba_notif, RX_HANDLER_SYNC,
|
|
|
|
struct iwl_mvm_ba_notif),
|
2016-03-03 15:35:34 +02:00
|
|
|
|
2017-11-02 04:07:52 +02:00
|
|
|
RX_HANDLER_GRP(DATA_PATH_GROUP, TLC_MNG_UPDATE_NOTIF,
|
2021-01-17 13:10:28 +02:00
|
|
|
iwl_mvm_tlc_update_notif, RX_HANDLER_SYNC,
|
|
|
|
struct iwl_tlc_update_notif),
|
2017-11-02 04:07:52 +02:00
|
|
|
|
2016-03-03 15:35:34 +02:00
|
|
|
RX_HANDLER(BT_PROFILE_NOTIFICATION, iwl_mvm_rx_bt_coex_notif,
|
2024-01-31 22:56:32 +02:00
|
|
|
RX_HANDLER_ASYNC_LOCKED_WIPHY,
|
|
|
|
struct iwl_bt_coex_profile_notif),
|
2021-01-17 13:10:28 +02:00
|
|
|
RX_HANDLER_NO_SIZE(BEACON_NOTIFICATION, iwl_mvm_rx_beacon_notif,
|
|
|
|
RX_HANDLER_ASYNC_LOCKED),
|
|
|
|
RX_HANDLER_NO_SIZE(STATISTICS_NOTIFICATION, iwl_mvm_rx_statistics,
|
|
|
|
RX_HANDLER_ASYNC_LOCKED),
|
2013-01-17 14:20:29 +02:00
|
|
|
|
2023-10-22 17:55:47 +03:00
|
|
|
RX_HANDLER_GRP(STATISTICS_GROUP, STATISTICS_OPER_NOTIF,
|
|
|
|
iwl_mvm_handle_rx_system_oper_stats,
|
2024-01-31 22:56:32 +02:00
|
|
|
RX_HANDLER_ASYNC_LOCKED_WIPHY,
|
2023-10-22 17:55:47 +03:00
|
|
|
struct iwl_system_statistics_notif_oper),
|
|
|
|
RX_HANDLER_GRP(STATISTICS_GROUP, STATISTICS_OPER_PART1_NOTIF,
|
|
|
|
iwl_mvm_handle_rx_system_oper_part1_stats,
|
|
|
|
RX_HANDLER_ASYNC_LOCKED,
|
|
|
|
struct iwl_system_statistics_part1_notif_oper),
|
|
|
|
RX_HANDLER_GRP(SYSTEM_GROUP, SYSTEM_STATISTICS_END_NOTIF,
|
|
|
|
iwl_mvm_handle_rx_system_end_stats_notif,
|
|
|
|
RX_HANDLER_ASYNC_LOCKED,
|
|
|
|
struct iwl_system_statistics_end_notif),
|
|
|
|
|
2015-07-22 11:38:40 +03:00
|
|
|
RX_HANDLER(BA_WINDOW_STATUS_NOTIFICATION_ID,
|
2021-01-17 13:10:28 +02:00
|
|
|
iwl_mvm_window_status_notif, RX_HANDLER_SYNC,
|
|
|
|
struct iwl_ba_window_status_notif),
|
2015-07-22 11:38:40 +03:00
|
|
|
|
2016-03-03 15:35:34 +02:00
|
|
|
RX_HANDLER(TIME_EVENT_NOTIFICATION, iwl_mvm_rx_time_event_notif,
|
2021-01-17 13:10:28 +02:00
|
|
|
RX_HANDLER_SYNC, struct iwl_time_event_notif),
|
2019-07-11 21:44:42 +03:00
|
|
|
RX_HANDLER_GRP(MAC_CONF_GROUP, SESSION_PROTECTION_NOTIF,
|
2021-01-17 13:10:28 +02:00
|
|
|
iwl_mvm_rx_session_protect_notif, RX_HANDLER_SYNC,
|
|
|
|
struct iwl_mvm_session_prot_notif),
|
2016-03-03 15:35:34 +02:00
|
|
|
RX_HANDLER(MCC_CHUB_UPDATE_CMD, iwl_mvm_rx_chub_update_mcc,
|
2021-01-17 13:10:28 +02:00
|
|
|
RX_HANDLER_ASYNC_LOCKED, struct iwl_mcc_chub_notif),
|
2013-06-02 20:54:48 +03:00
|
|
|
|
2021-01-17 13:10:28 +02:00
|
|
|
RX_HANDLER(EOSP_NOTIFICATION, iwl_mvm_rx_eosp_notif, RX_HANDLER_SYNC,
|
|
|
|
struct iwl_mvm_eosp_notification),
|
2013-02-15 22:23:18 +01:00
|
|
|
|
2014-12-09 19:15:49 +02:00
|
|
|
RX_HANDLER(SCAN_ITERATION_COMPLETE,
|
2021-01-17 13:10:28 +02:00
|
|
|
iwl_mvm_rx_lmac_scan_iter_complete_notif, RX_HANDLER_SYNC,
|
|
|
|
struct iwl_lmac_scan_complete_notif),
|
2013-08-28 09:29:43 +03:00
|
|
|
RX_HANDLER(SCAN_OFFLOAD_COMPLETE,
|
2016-03-03 15:35:34 +02:00
|
|
|
iwl_mvm_rx_lmac_scan_complete_notif,
|
2021-01-17 13:10:28 +02:00
|
|
|
RX_HANDLER_ASYNC_LOCKED, struct iwl_periodic_scan_complete),
|
|
|
|
RX_HANDLER_NO_SIZE(MATCH_FOUND_NOTIFICATION,
|
|
|
|
iwl_mvm_rx_scan_match_found,
|
|
|
|
RX_HANDLER_SYNC),
|
2014-05-20 12:46:37 +03:00
|
|
|
RX_HANDLER(SCAN_COMPLETE_UMAC, iwl_mvm_rx_umac_scan_complete_notif,
|
wifi: iwlwifi: mvm: Fix race in scan completion
The move of the scan complete notification handling to the wiphy worker
introduced a race between scan complete notification and scan abort:
- The wiphy lock is held, e.g., for rfkill handling etc.
- Scan complete notification is received but not handled yet.
- Scan abort is triggered, and scan abort is sent to the FW. Once the
scan abort command is sent successfully, the flow synchronously waits
for the scan complete notification. However, as the scan complete
notification was already received but not processed yet, this hangs for
a second and continues leaving the scan status in an inconsistent
state.
- Once scan complete handling is started (when the wiphy lock is not held)
since the scan status is not an inconsistent state, a warning is issued
and the scan complete notification is not handled.
To fix this issue, switch back the scan complete notification to be
asynchronously handling, and only move the link selection logic to
a worker (which was the original reason for the move to use wiphy lock).
While at it, refactor some prints to improve debug data.
Fixes: 07bf5297d392 ("wifi: iwlwifi: mvm: Implement new link selection algorithm")
Signed-off-by: Ilan Peer <ilan.peer@intel.com>
Signed-off-by: Miri Korenblit <miriam.rachel.korenblit@intel.com>
Link: https://msgid.link/20240506095953.1f484a86324b.I63ed445a47f144546948c74ae6df85587fdb4ce3@changeid
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
2024-05-06 10:04:11 +03:00
|
|
|
RX_HANDLER_ASYNC_LOCKED,
|
2024-04-16 13:54:03 +03:00
|
|
|
struct iwl_umac_scan_complete),
|
2015-03-23 15:09:27 +02:00
|
|
|
RX_HANDLER(SCAN_ITERATION_COMPLETE_UMAC,
|
2021-01-17 13:10:28 +02:00
|
|
|
iwl_mvm_rx_umac_scan_iter_complete_notif, RX_HANDLER_SYNC,
|
|
|
|
struct iwl_umac_scan_iter_complete_notif),
|
2013-06-02 20:54:48 +03:00
|
|
|
|
2013-03-13 18:00:03 +02:00
|
|
|
RX_HANDLER(MISSED_BEACONS_NOTIFICATION, iwl_mvm_rx_missed_beacons_notif,
|
2024-04-16 13:54:06 +03:00
|
|
|
RX_HANDLER_ASYNC_LOCKED_WIPHY,
|
|
|
|
struct iwl_missed_beacons_notif),
|
2013-03-13 18:00:03 +02:00
|
|
|
|
2021-01-17 13:10:28 +02:00
|
|
|
RX_HANDLER(REPLY_ERROR, iwl_mvm_rx_fw_error, RX_HANDLER_SYNC,
|
|
|
|
struct iwl_error_resp),
|
2013-04-14 20:59:37 +03:00
|
|
|
RX_HANDLER(PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION,
|
2021-01-17 13:10:28 +02:00
|
|
|
iwl_mvm_power_uapsd_misbehaving_ap_notif, RX_HANDLER_SYNC,
|
|
|
|
struct iwl_uapsd_misbehaving_ap_notif),
|
|
|
|
RX_HANDLER_NO_SIZE(DTS_MEASUREMENT_NOTIFICATION, iwl_mvm_temp_notif,
|
|
|
|
RX_HANDLER_ASYNC_LOCKED),
|
|
|
|
RX_HANDLER_GRP_NO_SIZE(PHY_OPS_GROUP, DTS_MEASUREMENT_NOTIF_WIDE,
|
|
|
|
iwl_mvm_temp_notif, RX_HANDLER_ASYNC_UNLOCKED),
|
2015-12-16 16:34:55 +02:00
|
|
|
RX_HANDLER_GRP(PHY_OPS_GROUP, CT_KILL_NOTIFICATION,
|
2021-01-17 13:10:28 +02:00
|
|
|
iwl_mvm_ct_kill_notif, RX_HANDLER_SYNC,
|
|
|
|
struct ct_kill_notif),
|
2014-11-06 10:34:49 +02:00
|
|
|
|
2014-10-23 18:03:10 +03:00
|
|
|
RX_HANDLER(TDLS_CHANNEL_SWITCH_NOTIFICATION, iwl_mvm_rx_tdls_notif,
|
2021-01-17 13:10:28 +02:00
|
|
|
RX_HANDLER_ASYNC_LOCKED,
|
|
|
|
struct iwl_tdls_channel_switch_notif),
|
2016-03-03 15:35:34 +02:00
|
|
|
RX_HANDLER(MFUART_LOAD_NOTIFICATION, iwl_mvm_rx_mfuart_notif,
|
2021-01-17 13:10:28 +02:00
|
|
|
RX_HANDLER_SYNC, struct iwl_mfuart_load_notif_v1),
|
2018-12-05 11:34:09 +01:00
|
|
|
RX_HANDLER_GRP(LOCATION_GROUP, TOF_RESPONDER_STATS,
|
2021-01-17 13:10:28 +02:00
|
|
|
iwl_mvm_ftm_responder_stats, RX_HANDLER_ASYNC_LOCKED,
|
|
|
|
struct iwl_ftm_responder_stats),
|
2018-12-05 11:33:34 +01:00
|
|
|
|
2021-01-17 13:10:28 +02:00
|
|
|
RX_HANDLER_GRP_NO_SIZE(LOCATION_GROUP, TOF_RANGE_RESPONSE_NOTIF,
|
|
|
|
iwl_mvm_ftm_range_resp, RX_HANDLER_ASYNC_LOCKED),
|
|
|
|
RX_HANDLER_GRP_NO_SIZE(LOCATION_GROUP, TOF_LC_NOTIF,
|
|
|
|
iwl_mvm_ftm_lc_notif, RX_HANDLER_ASYNC_LOCKED),
|
2018-12-05 11:33:34 +01:00
|
|
|
|
2016-11-15 14:45:29 +02:00
|
|
|
RX_HANDLER_GRP(DEBUG_GROUP, MFU_ASSERT_DUMP_NTF,
|
2021-01-17 13:10:28 +02:00
|
|
|
iwl_mvm_mfu_assert_dump_notif, RX_HANDLER_SYNC,
|
|
|
|
struct iwl_mfu_assert_dump_notif),
|
2015-12-29 11:07:15 +02:00
|
|
|
RX_HANDLER_GRP(PROT_OFFLOAD_GROUP, STORED_BEACON_NTF,
|
2021-01-17 13:10:28 +02:00
|
|
|
iwl_mvm_rx_stored_beacon_notif, RX_HANDLER_SYNC,
|
2021-08-26 22:47:40 +03:00
|
|
|
struct iwl_stored_beacon_notif_v2),
|
2016-02-03 15:04:49 +02:00
|
|
|
RX_HANDLER_GRP(DATA_PATH_GROUP, MU_GROUP_MGMT_NOTIF,
|
2021-01-17 13:10:28 +02:00
|
|
|
iwl_mvm_mu_mimo_grp_notif, RX_HANDLER_SYNC,
|
|
|
|
struct iwl_mu_group_mgmt_notif),
|
2016-04-13 14:24:22 +02:00
|
|
|
RX_HANDLER_GRP(DATA_PATH_GROUP, STA_PM_NOTIF,
|
2021-01-17 13:10:28 +02:00
|
|
|
iwl_mvm_sta_pm_notif, RX_HANDLER_SYNC,
|
|
|
|
struct iwl_mvm_pm_state_notification),
|
2020-12-09 23:16:47 +02:00
|
|
|
RX_HANDLER_GRP(MAC_CONF_GROUP, PROBE_RESPONSE_DATA_NOTIF,
|
|
|
|
iwl_mvm_probe_resp_data_notif,
|
2021-01-17 13:10:28 +02:00
|
|
|
RX_HANDLER_ASYNC_LOCKED,
|
|
|
|
struct iwl_probe_resp_data_notif),
|
2021-10-24 18:20:35 +03:00
|
|
|
RX_HANDLER_GRP(MAC_CONF_GROUP, CHANNEL_SWITCH_START_NOTIF,
|
|
|
|
iwl_mvm_channel_switch_start_notif,
|
|
|
|
RX_HANDLER_SYNC, struct iwl_channel_switch_start_notif),
|
2022-01-28 15:34:21 +02:00
|
|
|
RX_HANDLER_GRP(MAC_CONF_GROUP, CHANNEL_SWITCH_ERROR_NOTIF,
|
|
|
|
iwl_mvm_channel_switch_error_notif,
|
|
|
|
RX_HANDLER_ASYNC_UNLOCKED,
|
|
|
|
struct iwl_channel_switch_error_notif),
|
2024-05-05 09:19:51 +03:00
|
|
|
|
|
|
|
RX_HANDLER_GRP(DATA_PATH_GROUP, ESR_MODE_NOTIF,
|
2024-05-05 09:19:57 +03:00
|
|
|
iwl_mvm_rx_esr_mode_notif,
|
|
|
|
RX_HANDLER_ASYNC_LOCKED_WIPHY,
|
2024-05-05 09:19:51 +03:00
|
|
|
struct iwl_mvm_esr_mode_notif),
|
|
|
|
|
2021-01-17 13:10:32 +02:00
|
|
|
RX_HANDLER_GRP(DATA_PATH_GROUP, MONITOR_NOTIF,
|
|
|
|
iwl_mvm_rx_monitor_notif, RX_HANDLER_ASYNC_LOCKED,
|
|
|
|
struct iwl_datapath_monitor_notif),
|
2021-06-17 10:08:44 +03:00
|
|
|
|
|
|
|
RX_HANDLER_GRP(DATA_PATH_GROUP, THERMAL_DUAL_CHAIN_REQUEST,
|
|
|
|
iwl_mvm_rx_thermal_dual_chain_req,
|
|
|
|
RX_HANDLER_ASYNC_LOCKED,
|
|
|
|
struct iwl_thermal_dual_chain_request),
|
2022-02-05 11:21:31 +02:00
|
|
|
|
|
|
|
RX_HANDLER_GRP(SYSTEM_GROUP, RFI_DEACTIVATE_NOTIF,
|
|
|
|
iwl_rfi_deactivate_notif_handler, RX_HANDLER_ASYNC_UNLOCKED,
|
|
|
|
struct iwl_rfi_deactivate_notif),
|
2023-03-20 12:33:04 +02:00
|
|
|
|
|
|
|
RX_HANDLER_GRP(LEGACY_GROUP,
|
|
|
|
WNM_80211V_TIMING_MEASUREMENT_NOTIFICATION,
|
|
|
|
iwl_mvm_time_sync_msmt_event, RX_HANDLER_SYNC,
|
|
|
|
struct iwl_time_msmt_notify),
|
|
|
|
RX_HANDLER_GRP(LEGACY_GROUP,
|
|
|
|
WNM_80211V_TIMING_MEASUREMENT_CONFIRM_NOTIFICATION,
|
|
|
|
iwl_mvm_time_sync_msmt_confirm_event, RX_HANDLER_SYNC,
|
|
|
|
struct iwl_time_msmt_cfm_notify),
|
2023-10-11 13:07:22 +03:00
|
|
|
RX_HANDLER_GRP(MAC_CONF_GROUP, ROC_NOTIF,
|
|
|
|
iwl_mvm_rx_roc_notif, RX_HANDLER_SYNC,
|
|
|
|
struct iwl_roc_notif),
|
2024-05-06 10:04:13 +03:00
|
|
|
RX_HANDLER_GRP(SCAN_GROUP, CHANNEL_SURVEY_NOTIF,
|
|
|
|
iwl_mvm_rx_channel_survey_notif, RX_HANDLER_ASYNC_LOCKED,
|
|
|
|
struct iwl_umac_scan_channel_survey_notif),
|
2013-01-24 14:25:36 +01:00
|
|
|
};
|
|
|
|
#undef RX_HANDLER
|
2015-07-09 17:17:03 +03:00
|
|
|
#undef RX_HANDLER_GRP
|
2015-10-15 18:18:09 +03:00
|
|
|
|
|
|
|
/* Please keep this array *SORTED* by hex value.
|
|
|
|
* Access is done through binary search
|
|
|
|
*/
|
|
|
|
static const struct iwl_hcmd_names iwl_mvm_legacy_names[] = {
|
2020-09-30 19:19:55 +03:00
|
|
|
HCMD_NAME(UCODE_ALIVE_NTFY),
|
2015-10-15 18:18:09 +03:00
|
|
|
HCMD_NAME(REPLY_ERROR),
|
|
|
|
HCMD_NAME(ECHO_CMD),
|
|
|
|
HCMD_NAME(INIT_COMPLETE_NOTIF),
|
|
|
|
HCMD_NAME(PHY_CONTEXT_CMD),
|
|
|
|
HCMD_NAME(DBG_CFG),
|
|
|
|
HCMD_NAME(SCAN_CFG_CMD),
|
|
|
|
HCMD_NAME(SCAN_REQ_UMAC),
|
|
|
|
HCMD_NAME(SCAN_ABORT_UMAC),
|
|
|
|
HCMD_NAME(SCAN_COMPLETE_UMAC),
|
2015-07-22 11:38:40 +03:00
|
|
|
HCMD_NAME(BA_WINDOW_STATUS_NOTIFICATION_ID),
|
2015-10-15 18:18:09 +03:00
|
|
|
HCMD_NAME(ADD_STA_KEY),
|
|
|
|
HCMD_NAME(ADD_STA),
|
|
|
|
HCMD_NAME(REMOVE_STA),
|
|
|
|
HCMD_NAME(TX_CMD),
|
|
|
|
HCMD_NAME(SCD_QUEUE_CFG),
|
|
|
|
HCMD_NAME(TXPATH_FLUSH),
|
|
|
|
HCMD_NAME(MGMT_MCAST_KEY),
|
|
|
|
HCMD_NAME(WEP_KEY),
|
|
|
|
HCMD_NAME(SHARED_MEM_CFG),
|
|
|
|
HCMD_NAME(TDLS_CHANNEL_SWITCH_CMD),
|
|
|
|
HCMD_NAME(MAC_CONTEXT_CMD),
|
|
|
|
HCMD_NAME(TIME_EVENT_CMD),
|
|
|
|
HCMD_NAME(TIME_EVENT_NOTIFICATION),
|
|
|
|
HCMD_NAME(BINDING_CONTEXT_CMD),
|
|
|
|
HCMD_NAME(TIME_QUOTA_CMD),
|
|
|
|
HCMD_NAME(NON_QOS_TX_COUNTER_CMD),
|
2017-06-28 16:19:49 +02:00
|
|
|
HCMD_NAME(LEDS_CMD),
|
2015-10-15 18:18:09 +03:00
|
|
|
HCMD_NAME(LQ_CMD),
|
|
|
|
HCMD_NAME(FW_PAGING_BLOCK_CMD),
|
|
|
|
HCMD_NAME(SCAN_OFFLOAD_REQUEST_CMD),
|
|
|
|
HCMD_NAME(SCAN_OFFLOAD_ABORT_CMD),
|
|
|
|
HCMD_NAME(HOT_SPOT_CMD),
|
|
|
|
HCMD_NAME(SCAN_OFFLOAD_PROFILES_QUERY_CMD),
|
|
|
|
HCMD_NAME(BT_COEX_UPDATE_REDUCED_TXP),
|
|
|
|
HCMD_NAME(BT_COEX_CI),
|
2023-03-20 12:33:04 +02:00
|
|
|
HCMD_NAME(WNM_80211V_TIMING_MEASUREMENT_NOTIFICATION),
|
2023-04-10 17:07:20 +03:00
|
|
|
HCMD_NAME(WNM_80211V_TIMING_MEASUREMENT_CONFIRM_NOTIFICATION),
|
2015-10-15 18:18:09 +03:00
|
|
|
HCMD_NAME(PHY_CONFIGURATION_CMD),
|
|
|
|
HCMD_NAME(CALIB_RES_NOTIF_PHY_DB),
|
2016-08-31 19:03:01 +03:00
|
|
|
HCMD_NAME(PHY_DB_CMD),
|
2015-10-15 18:18:09 +03:00
|
|
|
HCMD_NAME(SCAN_OFFLOAD_COMPLETE),
|
|
|
|
HCMD_NAME(SCAN_OFFLOAD_UPDATE_PROFILES_CMD),
|
|
|
|
HCMD_NAME(POWER_TABLE_CMD),
|
|
|
|
HCMD_NAME(PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION),
|
|
|
|
HCMD_NAME(REPLY_THERMAL_MNG_BACKOFF),
|
|
|
|
HCMD_NAME(NVM_ACCESS_CMD),
|
|
|
|
HCMD_NAME(BEACON_NOTIFICATION),
|
|
|
|
HCMD_NAME(BEACON_TEMPLATE_CMD),
|
|
|
|
HCMD_NAME(TX_ANT_CONFIGURATION_CMD),
|
|
|
|
HCMD_NAME(BT_CONFIG),
|
|
|
|
HCMD_NAME(STATISTICS_CMD),
|
|
|
|
HCMD_NAME(STATISTICS_NOTIFICATION),
|
|
|
|
HCMD_NAME(EOSP_NOTIFICATION),
|
|
|
|
HCMD_NAME(REDUCE_TX_POWER_CMD),
|
|
|
|
HCMD_NAME(MISSED_BEACONS_NOTIFICATION),
|
|
|
|
HCMD_NAME(TDLS_CONFIG_CMD),
|
|
|
|
HCMD_NAME(MAC_PM_POWER_TABLE),
|
|
|
|
HCMD_NAME(TDLS_CHANNEL_SWITCH_NOTIFICATION),
|
|
|
|
HCMD_NAME(MFUART_LOAD_NOTIFICATION),
|
2015-12-31 11:49:18 +02:00
|
|
|
HCMD_NAME(RSS_CONFIG_CMD),
|
2015-10-15 18:18:09 +03:00
|
|
|
HCMD_NAME(SCAN_ITERATION_COMPLETE_UMAC),
|
|
|
|
HCMD_NAME(REPLY_RX_PHY_CMD),
|
|
|
|
HCMD_NAME(REPLY_RX_MPDU_CMD),
|
2019-07-04 17:24:47 +02:00
|
|
|
HCMD_NAME(BAR_FRAME_RELEASE),
|
2017-07-27 09:40:16 +03:00
|
|
|
HCMD_NAME(FRAME_RELEASE),
|
2015-10-15 18:18:09 +03:00
|
|
|
HCMD_NAME(BA_NOTIF),
|
|
|
|
HCMD_NAME(MCC_UPDATE_CMD),
|
|
|
|
HCMD_NAME(MCC_CHUB_UPDATE_CMD),
|
|
|
|
HCMD_NAME(MARKER_CMD),
|
|
|
|
HCMD_NAME(BT_PROFILE_NOTIFICATION),
|
|
|
|
HCMD_NAME(MCAST_FILTER_CMD),
|
|
|
|
HCMD_NAME(REPLY_SF_CFG_CMD),
|
|
|
|
HCMD_NAME(REPLY_BEACON_FILTERING_CMD),
|
|
|
|
HCMD_NAME(D3_CONFIG_CMD),
|
|
|
|
HCMD_NAME(PROT_OFFLOAD_CONFIG_CMD),
|
|
|
|
HCMD_NAME(MATCH_FOUND_NOTIFICATION),
|
|
|
|
HCMD_NAME(DTS_MEASUREMENT_NOTIFICATION),
|
|
|
|
HCMD_NAME(WOWLAN_PATTERNS),
|
|
|
|
HCMD_NAME(WOWLAN_CONFIGURATION),
|
|
|
|
HCMD_NAME(WOWLAN_TSC_RSC_PARAM),
|
|
|
|
HCMD_NAME(WOWLAN_TKIP_PARAM),
|
|
|
|
HCMD_NAME(WOWLAN_KEK_KCK_MATERIAL),
|
|
|
|
HCMD_NAME(WOWLAN_GET_STATUSES),
|
|
|
|
HCMD_NAME(SCAN_ITERATION_COMPLETE),
|
|
|
|
HCMD_NAME(D0I3_END_CMD),
|
|
|
|
HCMD_NAME(LTR_CONFIG),
|
2019-05-23 08:39:27 +03:00
|
|
|
HCMD_NAME(LDBG_CONFIG_CMD),
|
2013-01-24 14:25:36 +01:00
|
|
|
};
|
2015-10-15 18:18:09 +03:00
|
|
|
|
2016-02-09 12:57:16 +02:00
|
|
|
/* Please keep this array *SORTED* by hex value.
|
|
|
|
* Access is done through binary search
|
|
|
|
*/
|
|
|
|
static const struct iwl_hcmd_names iwl_mvm_system_names[] = {
|
|
|
|
HCMD_NAME(SHARED_MEM_CFG_CMD),
|
2016-12-11 10:32:42 +02:00
|
|
|
HCMD_NAME(INIT_EXTENDED_CFG_CMD),
|
2018-12-13 23:04:51 +02:00
|
|
|
HCMD_NAME(FW_ERROR_RECOVERY_CMD),
|
2021-12-04 13:10:53 +02:00
|
|
|
HCMD_NAME(RFI_CONFIG_CMD),
|
|
|
|
HCMD_NAME(RFI_GET_FREQ_TABLE_CMD),
|
2021-12-04 17:49:34 +02:00
|
|
|
HCMD_NAME(SYSTEM_FEATURES_CONTROL_CMD),
|
2023-10-22 17:55:47 +03:00
|
|
|
HCMD_NAME(SYSTEM_STATISTICS_CMD),
|
|
|
|
HCMD_NAME(SYSTEM_STATISTICS_END_NOTIF),
|
2023-11-12 16:36:20 +02:00
|
|
|
HCMD_NAME(RFI_DEACTIVATE_NOTIF),
|
2016-02-09 12:57:16 +02:00
|
|
|
};
|
|
|
|
|
2016-02-18 14:09:33 +02:00
|
|
|
/* Please keep this array *SORTED* by hex value.
|
|
|
|
* Access is done through binary search
|
|
|
|
*/
|
|
|
|
static const struct iwl_hcmd_names iwl_mvm_mac_conf_names[] = {
|
2018-10-25 14:12:05 +03:00
|
|
|
HCMD_NAME(CHANNEL_SWITCH_TIME_EVENT_CMD),
|
2019-07-11 21:44:42 +03:00
|
|
|
HCMD_NAME(SESSION_PROTECTION_CMD),
|
2023-03-14 19:49:18 +02:00
|
|
|
HCMD_NAME(MAC_CONFIG_CMD),
|
2023-03-14 19:49:19 +02:00
|
|
|
HCMD_NAME(LINK_CONFIG_CMD),
|
2023-03-14 19:49:20 +02:00
|
|
|
HCMD_NAME(STA_CONFIG_CMD),
|
|
|
|
HCMD_NAME(AUX_STA_CMD),
|
|
|
|
HCMD_NAME(STA_REMOVE_CMD),
|
|
|
|
HCMD_NAME(STA_DISABLE_TX_CMD),
|
2023-10-11 13:07:22 +03:00
|
|
|
HCMD_NAME(ROC_CMD),
|
|
|
|
HCMD_NAME(ROC_NOTIF),
|
2019-07-11 21:44:42 +03:00
|
|
|
HCMD_NAME(SESSION_PROTECTION_NOTIF),
|
2021-10-24 18:20:35 +03:00
|
|
|
HCMD_NAME(CHANNEL_SWITCH_START_NOTIF),
|
2016-02-18 14:09:33 +02:00
|
|
|
};
|
|
|
|
|
2015-10-15 18:18:09 +03:00
|
|
|
/* Please keep this array *SORTED* by hex value.
|
|
|
|
* Access is done through binary search
|
|
|
|
*/
|
|
|
|
static const struct iwl_hcmd_names iwl_mvm_phy_names[] = {
|
|
|
|
HCMD_NAME(CMD_DTS_MEASUREMENT_TRIGGER_WIDE),
|
2016-01-05 10:34:47 +02:00
|
|
|
HCMD_NAME(CTDP_CONFIG_CMD),
|
2015-12-29 09:54:49 +02:00
|
|
|
HCMD_NAME(TEMP_REPORTING_THRESHOLDS_CMD),
|
2021-10-24 18:20:32 +03:00
|
|
|
HCMD_NAME(PER_CHAIN_LIMIT_OFFSET_CMD),
|
2024-04-16 13:53:57 +03:00
|
|
|
HCMD_NAME(AP_TX_POWER_CONSTRAINTS_CMD),
|
2015-12-16 16:34:55 +02:00
|
|
|
HCMD_NAME(CT_KILL_NOTIFICATION),
|
2015-10-15 18:18:09 +03:00
|
|
|
HCMD_NAME(DTS_MEASUREMENT_NOTIF_WIDE),
|
|
|
|
};
|
|
|
|
|
2015-12-28 22:37:08 +02:00
|
|
|
/* Please keep this array *SORTED* by hex value.
|
|
|
|
* Access is done through binary search
|
|
|
|
*/
|
|
|
|
static const struct iwl_hcmd_names iwl_mvm_data_path_names[] = {
|
2016-12-19 08:41:16 +02:00
|
|
|
HCMD_NAME(DQA_ENABLE_CMD),
|
2015-12-28 22:37:08 +02:00
|
|
|
HCMD_NAME(UPDATE_MU_GROUPS_CMD),
|
2015-12-16 18:48:28 +02:00
|
|
|
HCMD_NAME(TRIGGER_RX_QUEUES_NOTIF_CMD),
|
2018-06-24 11:59:54 +03:00
|
|
|
HCMD_NAME(STA_HE_CTXT_CMD),
|
2021-12-04 08:35:52 +02:00
|
|
|
HCMD_NAME(RLC_CONFIG_CMD),
|
2018-02-05 12:42:44 +02:00
|
|
|
HCMD_NAME(RFH_QUEUE_CONFIG_CMD),
|
2018-12-09 11:20:39 +02:00
|
|
|
HCMD_NAME(TLC_MNG_CONFIG_CMD),
|
2018-10-25 09:15:21 +02:00
|
|
|
HCMD_NAME(CHEST_COLLECTOR_FILTER_CONFIG_CMD),
|
2022-02-10 18:22:30 +02:00
|
|
|
HCMD_NAME(SCD_QUEUE_CONFIG_CMD),
|
2022-11-02 16:59:53 +02:00
|
|
|
HCMD_NAME(SEC_KEY_CMD),
|
2024-05-05 09:19:51 +03:00
|
|
|
HCMD_NAME(ESR_MODE_NOTIF),
|
2021-01-17 13:10:32 +02:00
|
|
|
HCMD_NAME(MONITOR_NOTIF),
|
2021-06-17 10:08:44 +03:00
|
|
|
HCMD_NAME(THERMAL_DUAL_CHAIN_REQUEST),
|
2016-04-13 14:24:22 +02:00
|
|
|
HCMD_NAME(STA_PM_NOTIF),
|
2016-02-03 15:04:49 +02:00
|
|
|
HCMD_NAME(MU_GROUP_MGMT_NOTIF),
|
2015-12-16 18:48:28 +02:00
|
|
|
HCMD_NAME(RX_QUEUES_NOTIFICATION),
|
2015-12-28 22:37:08 +02:00
|
|
|
};
|
|
|
|
|
2023-10-22 17:55:47 +03:00
|
|
|
/* Please keep this array *SORTED* by hex value.
|
|
|
|
* Access is done through binary search
|
|
|
|
*/
|
|
|
|
static const struct iwl_hcmd_names iwl_mvm_statistics_names[] = {
|
|
|
|
HCMD_NAME(STATISTICS_OPER_NOTIF),
|
|
|
|
HCMD_NAME(STATISTICS_OPER_PART1_NOTIF),
|
|
|
|
};
|
|
|
|
|
2022-09-06 16:42:16 +03:00
|
|
|
/* Please keep this array *SORTED* by hex value.
|
|
|
|
* Access is done through binary search
|
|
|
|
*/
|
|
|
|
static const struct iwl_hcmd_names iwl_mvm_scan_names[] = {
|
2024-05-06 10:04:12 +03:00
|
|
|
HCMD_NAME(CHANNEL_SURVEY_NOTIF),
|
2022-09-06 16:42:16 +03:00
|
|
|
HCMD_NAME(OFFLOAD_MATCH_INFO_NOTIF),
|
|
|
|
};
|
|
|
|
|
2018-10-31 10:29:31 +01:00
|
|
|
/* Please keep this array *SORTED* by hex value.
|
|
|
|
* Access is done through binary search
|
|
|
|
*/
|
|
|
|
static const struct iwl_hcmd_names iwl_mvm_location_names[] = {
|
|
|
|
HCMD_NAME(TOF_RANGE_REQ_CMD),
|
|
|
|
HCMD_NAME(TOF_CONFIG_CMD),
|
|
|
|
HCMD_NAME(TOF_RANGE_ABORT_CMD),
|
|
|
|
HCMD_NAME(TOF_RANGE_REQ_EXT_CMD),
|
|
|
|
HCMD_NAME(TOF_RESPONDER_CONFIG_CMD),
|
|
|
|
HCMD_NAME(TOF_RESPONDER_DYN_CONFIG_CMD),
|
|
|
|
HCMD_NAME(TOF_LC_NOTIF),
|
|
|
|
HCMD_NAME(TOF_RESPONDER_STATS),
|
|
|
|
HCMD_NAME(TOF_MCSI_DEBUG_NOTIF),
|
|
|
|
HCMD_NAME(TOF_RANGE_RESPONSE_NOTIF),
|
|
|
|
};
|
|
|
|
|
2015-12-29 11:07:15 +02:00
|
|
|
/* Please keep this array *SORTED* by hex value.
|
|
|
|
* Access is done through binary search
|
|
|
|
*/
|
|
|
|
static const struct iwl_hcmd_names iwl_mvm_prot_offload_names[] = {
|
2022-09-06 16:42:13 +03:00
|
|
|
HCMD_NAME(WOWLAN_WAKE_PKT_NOTIFICATION),
|
wifi: iwlwifi: mvm: Add support for wowlan info notification
IMR (Isolated Memory Regions) is a mechanism to protect memory regions
from unwarranted access by agents in the system that should not have access
to that memory.
When IMR is enabled, pages in the DRAM will be located within the IMR
memory space, accessible only by the device.
As a side effect, during S4 (a.k.a hibernate) the IMR memory space
is not retained.
While the DRAM is saved to the disk and restored by the OS upon resume,
the IMR, which is hidden from the OS neither saved upon suspend nor
restored upon resume.
As a consequence of the above, it turned out that commands cannot
be sent as part of the resume flow, and so after ending
d3 the FW needs to use notifications instead of cmd-resp.
The resume flow becomes asynchronous, with a series
of notifications, starting with wowlan_info_notif, through
wowlan_pkt_notif and complete the resume flow by d3_end_notif.
This patch adds the support for wowlan info notification.
The wake packet has been removed from the wowlan info struct
and will be handled in a dedicated notification.
Signed-off-by: Yedidya Benshimol <yedidya.ben.shimol@intel.com>
Signed-off-by: Haim Dreyfuss <haim.dreyfuss@intel.com>
Signed-off-by: Gregory Greenman <gregory.greenman@intel.com>
Link: https://lore.kernel.org/r/20220906161827.3ce8deefd929.Ieba8610e8bb4bec788076371ae38becb4a3d20d5@changeid
Signed-off-by: Gregory Greenman <gregory.greenman@intel.com>
2022-09-06 16:42:12 +03:00
|
|
|
HCMD_NAME(WOWLAN_INFO_NOTIFICATION),
|
2022-09-06 16:42:14 +03:00
|
|
|
HCMD_NAME(D3_END_NOTIFICATION),
|
2015-12-29 11:07:15 +02:00
|
|
|
HCMD_NAME(STORED_BEACON_NTF),
|
|
|
|
};
|
|
|
|
|
2016-08-31 18:13:57 +03:00
|
|
|
/* Please keep this array *SORTED* by hex value.
|
|
|
|
* Access is done through binary search
|
|
|
|
*/
|
|
|
|
static const struct iwl_hcmd_names iwl_mvm_regulatory_and_nvm_names[] = {
|
|
|
|
HCMD_NAME(NVM_ACCESS_COMPLETE),
|
2017-01-08 16:46:14 +02:00
|
|
|
HCMD_NAME(NVM_GET_INFO),
|
2020-04-18 11:08:50 +03:00
|
|
|
HCMD_NAME(TAS_CONFIG),
|
2016-08-31 18:13:57 +03:00
|
|
|
};
|
|
|
|
|
2015-10-15 18:18:09 +03:00
|
|
|
static const struct iwl_hcmd_arr iwl_mvm_groups[] = {
|
|
|
|
[LEGACY_GROUP] = HCMD_ARR(iwl_mvm_legacy_names),
|
|
|
|
[LONG_GROUP] = HCMD_ARR(iwl_mvm_legacy_names),
|
2016-02-09 12:57:16 +02:00
|
|
|
[SYSTEM_GROUP] = HCMD_ARR(iwl_mvm_system_names),
|
2016-02-18 14:09:33 +02:00
|
|
|
[MAC_CONF_GROUP] = HCMD_ARR(iwl_mvm_mac_conf_names),
|
2015-10-15 18:18:09 +03:00
|
|
|
[PHY_OPS_GROUP] = HCMD_ARR(iwl_mvm_phy_names),
|
2015-12-28 22:37:08 +02:00
|
|
|
[DATA_PATH_GROUP] = HCMD_ARR(iwl_mvm_data_path_names),
|
2022-09-06 16:42:16 +03:00
|
|
|
[SCAN_GROUP] = HCMD_ARR(iwl_mvm_scan_names),
|
2018-10-31 10:29:31 +01:00
|
|
|
[LOCATION_GROUP] = HCMD_ARR(iwl_mvm_location_names),
|
2015-12-29 11:07:15 +02:00
|
|
|
[PROT_OFFLOAD_GROUP] = HCMD_ARR(iwl_mvm_prot_offload_names),
|
2016-08-31 18:13:57 +03:00
|
|
|
[REGULATORY_AND_NVM_GROUP] =
|
|
|
|
HCMD_ARR(iwl_mvm_regulatory_and_nvm_names),
|
2023-10-22 17:55:47 +03:00
|
|
|
[STATISTICS_GROUP] = HCMD_ARR(iwl_mvm_statistics_names),
|
2015-10-15 18:18:09 +03:00
|
|
|
};
|
|
|
|
|
2013-01-24 14:25:36 +01:00
|
|
|
/* this forward declaration can avoid to export the function */
|
|
|
|
static void iwl_mvm_async_handlers_wk(struct work_struct *wk);
|
2024-01-23 20:08:23 +02:00
|
|
|
static void iwl_mvm_async_handlers_wiphy_wk(struct wiphy *wiphy,
|
|
|
|
struct wiphy_work *work);
|
2013-01-24 14:25:36 +01:00
|
|
|
|
2017-09-28 15:29:27 +03:00
|
|
|
static u32 iwl_mvm_min_backoff(struct iwl_mvm *mvm)
|
2014-01-16 21:12:02 -05:00
|
|
|
{
|
2017-09-28 15:29:27 +03:00
|
|
|
const struct iwl_pwr_tx_backoff *backoff = mvm->cfg->pwr_tx_backoffs;
|
|
|
|
u64 dflt_pwr_limit;
|
2014-01-16 21:12:02 -05:00
|
|
|
|
2017-09-28 15:29:27 +03:00
|
|
|
if (!backoff)
|
2014-01-16 21:12:02 -05:00
|
|
|
return 0;
|
|
|
|
|
2024-02-01 16:17:26 +02:00
|
|
|
iwl_bios_get_pwr_limit(&mvm->fwrt, &dflt_pwr_limit);
|
2014-01-16 21:12:02 -05:00
|
|
|
|
2017-09-28 15:29:27 +03:00
|
|
|
while (backoff->pwr) {
|
|
|
|
if (dflt_pwr_limit >= backoff->pwr)
|
|
|
|
return backoff->backoff;
|
|
|
|
|
|
|
|
backoff++;
|
2014-01-16 21:12:02 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-02-28 17:12:21 +02:00
|
|
|
static void iwl_mvm_tx_unblock_dwork(struct work_struct *work)
|
|
|
|
{
|
|
|
|
struct iwl_mvm *mvm =
|
|
|
|
container_of(work, struct iwl_mvm, cs_tx_unblock_dwork.work);
|
|
|
|
struct ieee80211_vif *tx_blocked_vif;
|
|
|
|
struct iwl_mvm_vif *mvmvif;
|
|
|
|
|
|
|
|
mutex_lock(&mvm->mutex);
|
|
|
|
|
|
|
|
tx_blocked_vif =
|
|
|
|
rcu_dereference_protected(mvm->csa_tx_blocked_vif,
|
|
|
|
lockdep_is_held(&mvm->mutex));
|
|
|
|
|
|
|
|
if (!tx_blocked_vif)
|
|
|
|
goto unlock;
|
|
|
|
|
|
|
|
mvmvif = iwl_mvm_vif_from_mac80211(tx_blocked_vif);
|
|
|
|
iwl_mvm_modify_all_sta_disable_tx(mvm, mvmvif, false);
|
|
|
|
RCU_INIT_POINTER(mvm->csa_tx_blocked_vif, NULL);
|
|
|
|
unlock:
|
|
|
|
mutex_unlock(&mvm->mutex);
|
|
|
|
}
|
|
|
|
|
2022-01-30 11:52:59 +02:00
|
|
|
static void iwl_mvm_fwrt_dump_start(void *ctx)
|
2017-06-01 16:03:19 +02:00
|
|
|
{
|
|
|
|
struct iwl_mvm *mvm = ctx;
|
2019-04-02 15:28:46 +03:00
|
|
|
|
|
|
|
mutex_lock(&mvm->mutex);
|
2017-06-01 16:03:19 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static void iwl_mvm_fwrt_dump_end(void *ctx)
|
|
|
|
{
|
|
|
|
struct iwl_mvm *mvm = ctx;
|
|
|
|
|
2019-04-02 15:28:46 +03:00
|
|
|
mutex_unlock(&mvm->mutex);
|
2017-06-01 16:03:19 +02:00
|
|
|
}
|
|
|
|
|
2018-01-11 16:18:46 +02:00
|
|
|
static bool iwl_mvm_fwrt_fw_running(void *ctx)
|
|
|
|
{
|
|
|
|
return iwl_mvm_firmware_running(ctx);
|
|
|
|
}
|
|
|
|
|
2018-06-12 15:40:42 +03:00
|
|
|
static int iwl_mvm_fwrt_send_hcmd(void *ctx, struct iwl_host_cmd *host_cmd)
|
|
|
|
{
|
|
|
|
struct iwl_mvm *mvm = (struct iwl_mvm *)ctx;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
mutex_lock(&mvm->mutex);
|
|
|
|
ret = iwl_mvm_send_cmd(mvm, host_cmd);
|
|
|
|
mutex_unlock(&mvm->mutex);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2019-01-01 17:38:32 +02:00
|
|
|
static bool iwl_mvm_d3_debug_enable(void *ctx)
|
|
|
|
{
|
|
|
|
return IWL_MVM_D3_DEBUG;
|
|
|
|
}
|
|
|
|
|
2017-06-01 16:03:19 +02:00
|
|
|
static const struct iwl_fw_runtime_ops iwl_mvm_fwrt_ops = {
|
|
|
|
.dump_start = iwl_mvm_fwrt_dump_start,
|
|
|
|
.dump_end = iwl_mvm_fwrt_dump_end,
|
2018-01-11 16:18:46 +02:00
|
|
|
.fw_running = iwl_mvm_fwrt_fw_running,
|
2018-06-12 15:40:42 +03:00
|
|
|
.send_hcmd = iwl_mvm_fwrt_send_hcmd,
|
2019-01-01 17:38:32 +02:00
|
|
|
.d3_debug_enable = iwl_mvm_d3_debug_enable,
|
2017-06-01 16:03:19 +02:00
|
|
|
};
|
|
|
|
|
2021-02-10 17:15:07 +02:00
|
|
|
static int iwl_mvm_start_get_nvm(struct iwl_mvm *mvm)
|
|
|
|
{
|
2021-11-12 08:28:12 +02:00
|
|
|
struct iwl_trans *trans = mvm->trans;
|
2021-02-10 17:15:07 +02:00
|
|
|
int ret;
|
|
|
|
|
2021-11-12 08:28:12 +02:00
|
|
|
if (trans->csme_own) {
|
|
|
|
if (WARN(!mvm->mei_registered,
|
|
|
|
"csme is owner, but we aren't registered to iwlmei\n"))
|
|
|
|
goto get_nvm_from_fw;
|
|
|
|
|
|
|
|
mvm->mei_nvm_data = iwl_mei_get_nvm();
|
|
|
|
if (mvm->mei_nvm_data) {
|
|
|
|
/*
|
|
|
|
* mvm->mei_nvm_data is set and because of that,
|
|
|
|
* we'll load the NVM from the FW when we'll get
|
|
|
|
* ownership.
|
|
|
|
*/
|
|
|
|
mvm->nvm_data =
|
|
|
|
iwl_parse_mei_nvm_data(trans, trans->cfg,
|
2023-09-21 11:57:59 +03:00
|
|
|
mvm->mei_nvm_data,
|
|
|
|
mvm->fw,
|
|
|
|
mvm->set_tx_ant,
|
|
|
|
mvm->set_rx_ant);
|
2021-11-12 08:28:12 +02:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
IWL_ERR(mvm,
|
|
|
|
"Got a NULL NVM from CSME, trying to get it from the device\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
get_nvm_from_fw:
|
2021-09-02 13:11:01 +03:00
|
|
|
rtnl_lock();
|
2021-11-10 22:57:44 +01:00
|
|
|
wiphy_lock(mvm->hw->wiphy);
|
2021-02-10 17:15:07 +02:00
|
|
|
mutex_lock(&mvm->mutex);
|
|
|
|
|
2021-11-12 08:28:12 +02:00
|
|
|
ret = iwl_trans_start_hw(mvm->trans);
|
|
|
|
if (ret) {
|
|
|
|
mutex_unlock(&mvm->mutex);
|
2021-12-19 11:01:28 +02:00
|
|
|
wiphy_unlock(mvm->hw->wiphy);
|
|
|
|
rtnl_unlock();
|
2021-11-12 08:28:12 +02:00
|
|
|
return ret;
|
|
|
|
}
|
2021-02-10 17:15:07 +02:00
|
|
|
|
2021-11-12 08:28:12 +02:00
|
|
|
ret = iwl_run_init_mvm_ucode(mvm);
|
2021-02-10 17:15:07 +02:00
|
|
|
if (ret && ret != -ERFKILL)
|
|
|
|
iwl_fw_dbg_error_collect(&mvm->fwrt, FW_DBG_TRIGGER_DRIVER);
|
2021-08-05 14:21:57 +03:00
|
|
|
if (!ret && iwl_mvm_is_lar_supported(mvm)) {
|
|
|
|
mvm->hw->wiphy->regulatory_flags |= REGULATORY_WIPHY_SELF_MANAGED;
|
|
|
|
ret = iwl_mvm_init_mcc(mvm);
|
|
|
|
}
|
2021-02-10 17:15:07 +02:00
|
|
|
|
|
|
|
if (!iwlmvm_mod_params.init_dbg || !ret)
|
|
|
|
iwl_mvm_stop_device(mvm);
|
|
|
|
|
|
|
|
mutex_unlock(&mvm->mutex);
|
2021-11-10 22:57:44 +01:00
|
|
|
wiphy_unlock(mvm->hw->wiphy);
|
2021-08-05 14:21:57 +03:00
|
|
|
rtnl_unlock();
|
2021-02-10 17:15:07 +02:00
|
|
|
|
2021-11-12 08:28:12 +02:00
|
|
|
if (ret)
|
2021-02-10 17:15:07 +02:00
|
|
|
IWL_ERR(mvm, "Failed to run INIT ucode: %d\n", ret);
|
|
|
|
|
2023-09-13 14:56:43 +03:00
|
|
|
/* no longer need this regardless of failure or not */
|
|
|
|
mvm->pldr_sync = false;
|
|
|
|
|
2021-02-10 17:15:07 +02:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2021-02-10 17:15:14 +02:00
|
|
|
static int iwl_mvm_start_post_nvm(struct iwl_mvm *mvm)
|
|
|
|
{
|
2021-11-12 08:28:12 +02:00
|
|
|
struct iwl_mvm_csme_conn_info *csme_conn_info __maybe_unused;
|
2021-02-10 17:15:14 +02:00
|
|
|
int ret;
|
|
|
|
|
|
|
|
iwl_mvm_toggle_tx_ant(mvm, &mvm->mgmt_last_antenna_idx);
|
|
|
|
|
|
|
|
ret = iwl_mvm_mac_setup_register(mvm);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
2021-11-12 08:28:12 +02:00
|
|
|
|
2021-02-10 17:15:14 +02:00
|
|
|
mvm->hw_registered = true;
|
|
|
|
|
|
|
|
iwl_mvm_dbgfs_register(mvm);
|
|
|
|
|
2021-11-12 08:28:12 +02:00
|
|
|
wiphy_rfkill_set_hw_state_reason(mvm->hw->wiphy,
|
|
|
|
mvm->mei_rfkill_blocked,
|
|
|
|
RFKILL_HARD_BLOCK_NOT_OWNER);
|
2021-11-12 08:28:14 +02:00
|
|
|
|
|
|
|
iwl_mvm_mei_set_sw_rfkill_state(mvm);
|
|
|
|
|
2021-02-10 17:15:14 +02:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2021-10-17 12:40:13 +03:00
|
|
|
struct iwl_mvm_frob_txf_data {
|
|
|
|
u8 *buf;
|
|
|
|
size_t buflen;
|
|
|
|
};
|
|
|
|
|
|
|
|
static void iwl_mvm_frob_txf_key_iter(struct ieee80211_hw *hw,
|
|
|
|
struct ieee80211_vif *vif,
|
|
|
|
struct ieee80211_sta *sta,
|
|
|
|
struct ieee80211_key_conf *key,
|
|
|
|
void *data)
|
|
|
|
{
|
|
|
|
struct iwl_mvm_frob_txf_data *txf = data;
|
|
|
|
u8 keylen, match, matchend;
|
|
|
|
u8 *keydata;
|
|
|
|
size_t i;
|
|
|
|
|
|
|
|
switch (key->cipher) {
|
|
|
|
case WLAN_CIPHER_SUITE_CCMP:
|
|
|
|
keydata = key->key;
|
|
|
|
keylen = key->keylen;
|
|
|
|
break;
|
|
|
|
case WLAN_CIPHER_SUITE_WEP40:
|
|
|
|
case WLAN_CIPHER_SUITE_WEP104:
|
|
|
|
case WLAN_CIPHER_SUITE_TKIP:
|
|
|
|
/*
|
|
|
|
* WEP has short keys which might show up in the payload,
|
|
|
|
* and then you can deduce the key, so in this case just
|
|
|
|
* remove all FIFO data.
|
|
|
|
* For TKIP, we don't know the phase 2 keys here, so same.
|
|
|
|
*/
|
|
|
|
memset(txf->buf, 0xBB, txf->buflen);
|
|
|
|
return;
|
|
|
|
default:
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* scan for key material and clear it out */
|
|
|
|
match = 0;
|
|
|
|
for (i = 0; i < txf->buflen; i++) {
|
|
|
|
if (txf->buf[i] != keydata[match]) {
|
|
|
|
match = 0;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
match++;
|
|
|
|
if (match == keylen) {
|
|
|
|
memset(txf->buf + i - keylen, 0xAA, keylen);
|
|
|
|
match = 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* we're dealing with a FIFO, so check wrapped around data */
|
|
|
|
matchend = match;
|
|
|
|
for (i = 0; match && i < keylen - match; i++) {
|
|
|
|
if (txf->buf[i] != keydata[match])
|
|
|
|
break;
|
|
|
|
match++;
|
|
|
|
if (match == keylen) {
|
|
|
|
memset(txf->buf, 0xAA, i + 1);
|
|
|
|
memset(txf->buf + txf->buflen - matchend, 0xAA,
|
|
|
|
matchend);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void iwl_mvm_frob_txf(void *ctx, void *buf, size_t buflen)
|
|
|
|
{
|
|
|
|
struct iwl_mvm_frob_txf_data txf = {
|
|
|
|
.buf = buf,
|
|
|
|
.buflen = buflen,
|
|
|
|
};
|
|
|
|
struct iwl_mvm *mvm = ctx;
|
|
|
|
|
|
|
|
/* embedded key material exists only on old API */
|
|
|
|
if (iwl_mvm_has_new_tx_api(mvm))
|
|
|
|
return;
|
|
|
|
|
|
|
|
rcu_read_lock();
|
|
|
|
ieee80211_iter_keys_rcu(mvm->hw, NULL, iwl_mvm_frob_txf_key_iter, &txf);
|
|
|
|
rcu_read_unlock();
|
|
|
|
}
|
|
|
|
|
|
|
|
static void iwl_mvm_frob_hcmd(void *ctx, void *hcmd, size_t len)
|
|
|
|
{
|
|
|
|
/* we only use wide headers for commands */
|
|
|
|
struct iwl_cmd_header_wide *hdr = hcmd;
|
|
|
|
unsigned int frob_start = sizeof(*hdr), frob_end = 0;
|
|
|
|
|
|
|
|
if (len < sizeof(hdr))
|
|
|
|
return;
|
|
|
|
|
|
|
|
/* all the commands we care about are in LONG_GROUP */
|
|
|
|
if (hdr->group_id != LONG_GROUP)
|
|
|
|
return;
|
|
|
|
|
|
|
|
switch (hdr->cmd) {
|
|
|
|
case WEP_KEY:
|
|
|
|
case WOWLAN_TKIP_PARAM:
|
|
|
|
case WOWLAN_KEK_KCK_MATERIAL:
|
|
|
|
case ADD_STA_KEY:
|
|
|
|
/*
|
|
|
|
* blank out everything here, easier than dealing
|
|
|
|
* with the various versions of the command
|
|
|
|
*/
|
|
|
|
frob_end = INT_MAX;
|
|
|
|
break;
|
|
|
|
case MGMT_MCAST_KEY:
|
|
|
|
frob_start = offsetof(struct iwl_mvm_mgmt_mcast_key_cmd, igtk);
|
|
|
|
BUILD_BUG_ON(offsetof(struct iwl_mvm_mgmt_mcast_key_cmd, igtk) !=
|
|
|
|
offsetof(struct iwl_mvm_mgmt_mcast_key_cmd_v1, igtk));
|
|
|
|
|
|
|
|
frob_end = offsetofend(struct iwl_mvm_mgmt_mcast_key_cmd, igtk);
|
|
|
|
BUILD_BUG_ON(offsetof(struct iwl_mvm_mgmt_mcast_key_cmd, igtk) <
|
|
|
|
offsetof(struct iwl_mvm_mgmt_mcast_key_cmd_v1, igtk));
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (frob_start >= frob_end)
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (frob_end > len)
|
|
|
|
frob_end = len;
|
|
|
|
|
|
|
|
memset((u8 *)hcmd + frob_start, 0xAA, frob_end - frob_start);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void iwl_mvm_frob_mem(void *ctx, u32 mem_addr, void *mem, size_t buflen)
|
|
|
|
{
|
|
|
|
const struct iwl_dump_exclude *excl;
|
|
|
|
struct iwl_mvm *mvm = ctx;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
switch (mvm->fwrt.cur_fw_img) {
|
|
|
|
case IWL_UCODE_INIT:
|
|
|
|
default:
|
|
|
|
/* not relevant */
|
|
|
|
return;
|
|
|
|
case IWL_UCODE_REGULAR:
|
|
|
|
case IWL_UCODE_REGULAR_USNIFFER:
|
|
|
|
excl = mvm->fw->dump_excl;
|
|
|
|
break;
|
|
|
|
case IWL_UCODE_WOWLAN:
|
|
|
|
excl = mvm->fw->dump_excl_wowlan;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
BUILD_BUG_ON(sizeof(mvm->fw->dump_excl) !=
|
|
|
|
sizeof(mvm->fw->dump_excl_wowlan));
|
|
|
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE(mvm->fw->dump_excl); i++) {
|
|
|
|
u32 start, end;
|
|
|
|
|
|
|
|
if (!excl[i].addr || !excl[i].size)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
start = excl[i].addr;
|
|
|
|
end = start + excl[i].size;
|
|
|
|
|
|
|
|
if (end <= mem_addr || start >= mem_addr + buflen)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (start < mem_addr)
|
|
|
|
start = mem_addr;
|
|
|
|
|
|
|
|
if (end > mem_addr + buflen)
|
|
|
|
end = mem_addr + buflen;
|
|
|
|
|
|
|
|
memset((u8 *)mem + start - mem_addr, 0xAA, end - start);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct iwl_dump_sanitize_ops iwl_mvm_sanitize_ops = {
|
|
|
|
.frob_txf = iwl_mvm_frob_txf,
|
|
|
|
.frob_hcmd = iwl_mvm_frob_hcmd,
|
|
|
|
.frob_mem = iwl_mvm_frob_mem,
|
|
|
|
};
|
|
|
|
|
2021-11-12 08:28:12 +02:00
|
|
|
static void iwl_mvm_me_conn_status(void *priv, const struct iwl_mei_conn_info *conn_info)
|
|
|
|
{
|
|
|
|
struct iwl_mvm *mvm = priv;
|
|
|
|
struct iwl_mvm_csme_conn_info *prev_conn_info, *curr_conn_info;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This is protected by the guarantee that this function will not be
|
|
|
|
* called twice on two different threads
|
|
|
|
*/
|
|
|
|
prev_conn_info = rcu_dereference_protected(mvm->csme_conn_info, true);
|
|
|
|
|
|
|
|
curr_conn_info = kzalloc(sizeof(*curr_conn_info), GFP_KERNEL);
|
|
|
|
if (!curr_conn_info)
|
|
|
|
return;
|
|
|
|
|
|
|
|
curr_conn_info->conn_info = *conn_info;
|
|
|
|
|
|
|
|
rcu_assign_pointer(mvm->csme_conn_info, curr_conn_info);
|
|
|
|
|
|
|
|
if (prev_conn_info)
|
|
|
|
kfree_rcu(prev_conn_info, rcu_head);
|
|
|
|
}
|
|
|
|
|
2023-04-18 12:28:14 +03:00
|
|
|
static void iwl_mvm_mei_rfkill(void *priv, bool blocked,
|
|
|
|
bool csme_taking_ownership)
|
2021-11-12 08:28:12 +02:00
|
|
|
{
|
|
|
|
struct iwl_mvm *mvm = priv;
|
|
|
|
|
2023-04-18 12:28:14 +03:00
|
|
|
if (blocked && !csme_taking_ownership)
|
|
|
|
return;
|
|
|
|
|
2021-11-12 08:28:12 +02:00
|
|
|
mvm->mei_rfkill_blocked = blocked;
|
|
|
|
if (!mvm->hw_registered)
|
|
|
|
return;
|
|
|
|
|
|
|
|
wiphy_rfkill_set_hw_state_reason(mvm->hw->wiphy,
|
|
|
|
mvm->mei_rfkill_blocked,
|
|
|
|
RFKILL_HARD_BLOCK_NOT_OWNER);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void iwl_mvm_mei_roaming_forbidden(void *priv, bool forbidden)
|
|
|
|
{
|
|
|
|
struct iwl_mvm *mvm = priv;
|
|
|
|
|
|
|
|
if (!mvm->hw_registered || !mvm->csme_vif)
|
|
|
|
return;
|
|
|
|
|
|
|
|
iwl_mvm_send_roaming_forbidden_event(mvm, mvm->csme_vif, forbidden);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void iwl_mvm_sap_connected_wk(struct work_struct *wk)
|
|
|
|
{
|
|
|
|
struct iwl_mvm *mvm =
|
|
|
|
container_of(wk, struct iwl_mvm, sap_connected_wk);
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = iwl_mvm_start_get_nvm(mvm);
|
|
|
|
if (ret)
|
|
|
|
goto out_free;
|
|
|
|
|
|
|
|
ret = iwl_mvm_start_post_nvm(mvm);
|
|
|
|
if (ret)
|
|
|
|
goto out_free;
|
|
|
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
out_free:
|
|
|
|
IWL_ERR(mvm, "Couldn't get started...\n");
|
|
|
|
iwl_mei_start_unregister();
|
|
|
|
iwl_mei_unregister_complete();
|
|
|
|
iwl_fw_flush_dumps(&mvm->fwrt);
|
|
|
|
iwl_mvm_thermal_exit(mvm);
|
|
|
|
iwl_fw_runtime_free(&mvm->fwrt);
|
|
|
|
iwl_phy_db_free(mvm->phy_db);
|
|
|
|
kfree(mvm->scan_cmd);
|
|
|
|
iwl_trans_op_mode_leave(mvm->trans);
|
|
|
|
kfree(mvm->nvm_data);
|
|
|
|
kfree(mvm->mei_nvm_data);
|
|
|
|
|
|
|
|
ieee80211_free_hw(mvm->hw);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void iwl_mvm_mei_sap_connected(void *priv)
|
|
|
|
{
|
|
|
|
struct iwl_mvm *mvm = priv;
|
|
|
|
|
|
|
|
if (!mvm->hw_registered)
|
|
|
|
schedule_work(&mvm->sap_connected_wk);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void iwl_mvm_mei_nic_stolen(void *priv)
|
|
|
|
{
|
|
|
|
struct iwl_mvm *mvm = priv;
|
|
|
|
|
|
|
|
rtnl_lock();
|
|
|
|
cfg80211_shutdown_all_interfaces(mvm->hw->wiphy);
|
|
|
|
rtnl_unlock();
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct iwl_mei_ops mei_ops = {
|
|
|
|
.me_conn_status = iwl_mvm_me_conn_status,
|
|
|
|
.rfkill = iwl_mvm_mei_rfkill,
|
|
|
|
.roaming_forbidden = iwl_mvm_mei_roaming_forbidden,
|
|
|
|
.sap_connected = iwl_mvm_mei_sap_connected,
|
|
|
|
.nic_stolen = iwl_mvm_mei_nic_stolen,
|
|
|
|
};
|
|
|
|
|
wifi: iwlwifi: mvm: Fix race in scan completion
The move of the scan complete notification handling to the wiphy worker
introduced a race between scan complete notification and scan abort:
- The wiphy lock is held, e.g., for rfkill handling etc.
- Scan complete notification is received but not handled yet.
- Scan abort is triggered, and scan abort is sent to the FW. Once the
scan abort command is sent successfully, the flow synchronously waits
for the scan complete notification. However, as the scan complete
notification was already received but not processed yet, this hangs for
a second and continues leaving the scan status in an inconsistent
state.
- Once scan complete handling is started (when the wiphy lock is not held)
since the scan status is not an inconsistent state, a warning is issued
and the scan complete notification is not handled.
To fix this issue, switch back the scan complete notification to be
asynchronously handling, and only move the link selection logic to
a worker (which was the original reason for the move to use wiphy lock).
While at it, refactor some prints to improve debug data.
Fixes: 07bf5297d392 ("wifi: iwlwifi: mvm: Implement new link selection algorithm")
Signed-off-by: Ilan Peer <ilan.peer@intel.com>
Signed-off-by: Miri Korenblit <miriam.rachel.korenblit@intel.com>
Link: https://msgid.link/20240506095953.1f484a86324b.I63ed445a47f144546948c74ae6df85587fdb4ce3@changeid
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
2024-05-06 10:04:11 +03:00
|
|
|
static void iwl_mvm_find_link_selection_vif(void *_data, u8 *mac,
|
|
|
|
struct ieee80211_vif *vif)
|
|
|
|
{
|
|
|
|
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
|
|
|
|
|
|
|
|
if (ieee80211_vif_is_mld(vif) && mvmvif->authorized)
|
|
|
|
iwl_mvm_select_links(mvmvif->mvm, vif);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void iwl_mvm_trig_link_selection(struct wiphy *wiphy,
|
|
|
|
struct wiphy_work *wk)
|
|
|
|
{
|
|
|
|
struct iwl_mvm *mvm =
|
|
|
|
container_of(wk, struct iwl_mvm, trig_link_selection_wk);
|
|
|
|
|
|
|
|
ieee80211_iterate_active_interfaces(mvm->hw,
|
|
|
|
IEEE80211_IFACE_ITER_NORMAL,
|
|
|
|
iwl_mvm_find_link_selection_vif,
|
|
|
|
NULL);
|
|
|
|
}
|
|
|
|
|
2013-01-24 14:25:36 +01:00
|
|
|
static struct iwl_op_mode *
|
|
|
|
iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
|
|
|
|
const struct iwl_fw *fw, struct dentry *dbgfs_dir)
|
|
|
|
{
|
|
|
|
struct ieee80211_hw *hw;
|
|
|
|
struct iwl_op_mode *op_mode;
|
|
|
|
struct iwl_mvm *mvm;
|
|
|
|
struct iwl_trans_config trans_cfg = {};
|
|
|
|
static const u8 no_reclaim_cmds[] = {
|
|
|
|
TX_CMD,
|
|
|
|
};
|
2022-11-22 22:10:35 +02:00
|
|
|
u32 max_agg;
|
2022-09-23 15:08:53 -07:00
|
|
|
size_t scan_size;
|
2014-01-16 21:12:02 -05:00
|
|
|
u32 min_backoff;
|
2021-11-12 08:28:12 +02:00
|
|
|
struct iwl_mvm_csme_conn_info *csme_conn_info __maybe_unused;
|
2013-01-24 14:25:36 +01:00
|
|
|
|
2014-01-14 08:45:26 +02:00
|
|
|
/*
|
2020-10-08 18:09:37 +03:00
|
|
|
* We use IWL_MVM_STATION_COUNT_MAX to check the validity of the station
|
2014-01-14 08:45:26 +02:00
|
|
|
* index all over the driver - check that its value corresponds to the
|
|
|
|
* array size.
|
|
|
|
*/
|
2020-10-08 18:09:37 +03:00
|
|
|
BUILD_BUG_ON(ARRAY_SIZE(mvm->fw_id_to_mac_id) !=
|
|
|
|
IWL_MVM_STATION_COUNT_MAX);
|
2014-01-14 08:45:26 +02:00
|
|
|
|
2013-01-24 14:25:36 +01:00
|
|
|
/********************************
|
|
|
|
* 1. Allocating and configuring HW data
|
|
|
|
********************************/
|
|
|
|
hw = ieee80211_alloc_hw(sizeof(struct iwl_op_mode) +
|
|
|
|
sizeof(struct iwl_mvm),
|
2023-03-28 10:58:50 +03:00
|
|
|
iwl_mvm_has_mld_api(fw) ? &iwl_mvm_mld_hw_ops :
|
2013-01-24 14:25:36 +01:00
|
|
|
&iwl_mvm_hw_ops);
|
|
|
|
if (!hw)
|
|
|
|
return NULL;
|
|
|
|
|
2022-11-22 22:10:35 +02:00
|
|
|
if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
|
2023-08-30 11:31:02 +03:00
|
|
|
max_agg = 512;
|
2022-11-22 22:10:35 +02:00
|
|
|
else
|
|
|
|
max_agg = IEEE80211_MAX_AMPDU_BUF_HE;
|
|
|
|
|
|
|
|
hw->max_rx_aggregation_subframes = max_agg;
|
2014-06-16 10:54:52 +03:00
|
|
|
|
2014-09-02 16:04:58 +02:00
|
|
|
if (cfg->max_tx_agg_size)
|
|
|
|
hw->max_tx_aggregation_subframes = cfg->max_tx_agg_size;
|
2018-06-15 14:21:53 +02:00
|
|
|
else
|
2022-11-22 22:10:35 +02:00
|
|
|
hw->max_tx_aggregation_subframes = max_agg;
|
2014-09-02 16:04:58 +02:00
|
|
|
|
2013-01-24 14:25:36 +01:00
|
|
|
op_mode = hw->priv;
|
|
|
|
|
|
|
|
mvm = IWL_OP_MODE_GET_MVM(op_mode);
|
|
|
|
mvm->dev = trans->dev;
|
|
|
|
mvm->trans = trans;
|
|
|
|
mvm->cfg = cfg;
|
|
|
|
mvm->fw = fw;
|
|
|
|
mvm->hw = hw;
|
|
|
|
|
2017-09-26 11:31:55 +00:00
|
|
|
iwl_fw_runtime_init(&mvm->fwrt, trans, fw, &iwl_mvm_fwrt_ops, mvm,
|
2021-10-17 12:40:13 +03:00
|
|
|
&iwl_mvm_sanitize_ops, mvm, dbgfs_dir);
|
2017-06-01 12:10:32 +02:00
|
|
|
|
2024-01-31 10:24:39 +02:00
|
|
|
iwl_mvm_get_bios_tables(mvm);
|
2021-12-04 17:49:37 +02:00
|
|
|
iwl_uefi_get_sgom_table(trans, &mvm->fwrt);
|
2023-01-27 00:28:20 +02:00
|
|
|
iwl_uefi_get_step_table(trans);
|
2021-08-05 14:21:56 +03:00
|
|
|
|
2017-03-16 13:00:59 +02:00
|
|
|
mvm->init_status = 0;
|
|
|
|
|
2015-05-22 13:41:07 +02:00
|
|
|
if (iwl_mvm_has_new_rx_api(mvm)) {
|
|
|
|
op_mode->ops = &iwl_mvm_ops_mq;
|
2018-02-05 12:54:36 +02:00
|
|
|
trans->rx_mpdu_cmd_hdr_size =
|
2019-07-12 15:03:48 +03:00
|
|
|
(trans->trans_cfg->device_family >=
|
2019-11-15 09:28:25 +02:00
|
|
|
IWL_DEVICE_FAMILY_AX210) ?
|
2018-02-05 12:54:36 +02:00
|
|
|
sizeof(struct iwl_rx_mpdu_desc) :
|
|
|
|
IWL_RX_DESC_SIZE_V1;
|
2015-05-22 13:41:07 +02:00
|
|
|
} else {
|
|
|
|
op_mode->ops = &iwl_mvm_ops;
|
2016-02-07 13:09:59 +02:00
|
|
|
trans->rx_mpdu_cmd_hdr_size =
|
|
|
|
sizeof(struct iwl_rx_mpdu_res_start);
|
2015-05-22 13:41:07 +02:00
|
|
|
|
|
|
|
if (WARN_ON(trans->num_rx_queues > 1))
|
|
|
|
goto out_free;
|
|
|
|
}
|
|
|
|
|
2017-05-30 16:45:31 +02:00
|
|
|
mvm->fw_restart = iwlwifi_mod_params.fw_restart ? -1 : 0;
|
2013-07-03 11:00:06 +03:00
|
|
|
|
iwlwifi: mvm: avoid static queue number aliasing
When TVQM is enabled (iwl_mvm_has_new_tx_api() is true), then
queue numbers are just sequentially assigned 0, 1, 2, ...
Prior to TVQM, in DQA, there were some statically allocated
queue numbers:
* IWL_MVM_DQA_AUX_QUEUE == 1,
* both IWL_MVM_DQA_INJECT_MONITOR_QUEUE and
IWL_MVM_DQA_P2P_DEVICE_QUEUE == 2, and
* IWL_MVM_DQA_AP_PROBE_RESP_QUEUE == 9.
Now, these values are assigned to the members mvm->aux_queue,
mvm->snif_queue, mvm->probe_queue and mvm->p2p_dev_queue by
default. Normally, this doesn't really matter, and if TVQM is
in fact available we override them to the real values after
allocating a queue for use there.
However, this allocation doesn't always happen. For example,
for mvm->p2p_dev_queue (== 2) it only happens when the P2P
Device interface is started, if any. If it's not started, the
value in mvm->p2p_dev_queue remains 2. This wouldn't really
matter all that much if it weren't for iwl_mvm_is_static_queue()
which checks a queue number against one of those four static
numbers.
Now, if no P2P Device or monitor interface is added then queue
2 may be dynamically allocated, yet alias mvm->p2p_dev_queue or
mvm->snif_queue, and thus iwl_mvm_is_static_queue() erroneously
returns true for it. If it then gets full, all interface queues
are stopped, instead of just backpressuring against the one TXQ
that's really the only affected one.
This clearly can lead to issues, as everything is stopped even
if just a single TXQ filled its corresponding HW queue, if it
happens to have an appropriate number (2 or 9, AUX is always
reassigned.) Due to a mac80211 bug, this also led to a situation
in which the queues remained stopped across a deauthentication
and then attempts to connect to a new AP started failing, but
that's fixed separately.
Fix all of this by simply initializing the queue numbers to
the invalid value until they're used, if TVQM is enabled, and
also setting them back to that value when the queues are later
freed again.
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Signed-off-by: Luca Coelho <luciano.coelho@intel.com>
Link: https://lore.kernel.org/r/iwlwifi.20210802172232.2e47e623f9e2.I9b0830dafbb68ef35b7b8f0f46160abec02ac7d0@changeid
Signed-off-by: Luca Coelho <luciano.coelho@intel.com>
2021-08-02 17:28:27 +03:00
|
|
|
if (iwl_mvm_has_new_tx_api(mvm)) {
|
|
|
|
/*
|
|
|
|
* If we have the new TX/queue allocation API initialize them
|
|
|
|
* all to invalid numbers. We'll rewrite the ones that we need
|
|
|
|
* later, but that doesn't happen for all of them all of the
|
|
|
|
* time (e.g. P2P Device is optional), and if a dynamic queue
|
|
|
|
* ends up getting number 2 (IWL_MVM_DQA_P2P_DEVICE_QUEUE) then
|
|
|
|
* iwl_mvm_is_static_queue() erroneously returns true, and we
|
|
|
|
* might have things getting stuck.
|
|
|
|
*/
|
|
|
|
mvm->aux_queue = IWL_MVM_INVALID_QUEUE;
|
|
|
|
mvm->snif_queue = IWL_MVM_INVALID_QUEUE;
|
|
|
|
mvm->probe_queue = IWL_MVM_INVALID_QUEUE;
|
|
|
|
mvm->p2p_dev_queue = IWL_MVM_INVALID_QUEUE;
|
|
|
|
} else {
|
|
|
|
mvm->aux_queue = IWL_MVM_DQA_AUX_QUEUE;
|
|
|
|
mvm->snif_queue = IWL_MVM_DQA_INJECT_MONITOR_QUEUE;
|
|
|
|
mvm->probe_queue = IWL_MVM_DQA_AP_PROBE_RESP_QUEUE;
|
|
|
|
mvm->p2p_dev_queue = IWL_MVM_DQA_P2P_DEVICE_QUEUE;
|
|
|
|
}
|
2017-06-19 23:50:31 +02:00
|
|
|
|
2013-10-06 13:03:32 +02:00
|
|
|
mvm->sf_state = SF_UNINIT;
|
2017-06-08 09:18:22 +02:00
|
|
|
if (iwl_mvm_has_unified_ucode(mvm))
|
2017-06-02 11:56:58 +02:00
|
|
|
iwl_fw_set_current_image(&mvm->fwrt, IWL_UCODE_REGULAR);
|
2016-08-31 18:13:57 +03:00
|
|
|
else
|
2017-06-02 11:56:58 +02:00
|
|
|
iwl_fw_set_current_image(&mvm->fwrt, IWL_UCODE_INIT);
|
2016-01-26 18:12:28 +02:00
|
|
|
mvm->drop_bcn_ap_mode = true;
|
2013-09-09 13:30:15 +02:00
|
|
|
|
2013-01-24 14:25:36 +01:00
|
|
|
mutex_init(&mvm->mutex);
|
|
|
|
spin_lock_init(&mvm->async_handlers_lock);
|
|
|
|
INIT_LIST_HEAD(&mvm->time_event_list);
|
2014-07-16 21:11:12 +03:00
|
|
|
INIT_LIST_HEAD(&mvm->aux_roc_te_list);
|
2013-01-24 14:25:36 +01:00
|
|
|
INIT_LIST_HEAD(&mvm->async_handlers_list);
|
|
|
|
spin_lock_init(&mvm->time_event_lock);
|
2018-12-05 11:33:34 +01:00
|
|
|
INIT_LIST_HEAD(&mvm->ftm_initiator.loc_list);
|
2020-09-30 16:31:15 +03:00
|
|
|
INIT_LIST_HEAD(&mvm->ftm_initiator.pasn_list);
|
2020-09-11 20:44:39 +03:00
|
|
|
INIT_LIST_HEAD(&mvm->resp_pasn_list);
|
2013-01-24 14:25:36 +01:00
|
|
|
|
|
|
|
INIT_WORK(&mvm->async_handlers_wk, iwl_mvm_async_handlers_wk);
|
|
|
|
INIT_WORK(&mvm->roc_done_wk, iwl_mvm_roc_done_wk);
|
2021-11-12 08:28:12 +02:00
|
|
|
INIT_WORK(&mvm->sap_connected_wk, iwl_mvm_sap_connected_wk);
|
2014-10-23 18:03:10 +03:00
|
|
|
INIT_DELAYED_WORK(&mvm->tdls_cs.dwork, iwl_mvm_tdls_ch_switch_work);
|
2016-05-03 12:18:33 +03:00
|
|
|
INIT_DELAYED_WORK(&mvm->scan_timeout_dwork, iwl_mvm_scan_timeout_wk);
|
2015-07-28 18:56:08 +03:00
|
|
|
INIT_WORK(&mvm->add_stream_wk, iwl_mvm_add_new_dqa_stream_wk);
|
2018-08-21 15:23:39 +03:00
|
|
|
INIT_LIST_HEAD(&mvm->add_stream_txqs);
|
2023-03-17 10:53:25 +01:00
|
|
|
spin_lock_init(&mvm->add_stream_lock);
|
2013-01-24 14:25:36 +01:00
|
|
|
|
2024-01-23 20:08:23 +02:00
|
|
|
wiphy_work_init(&mvm->async_handlers_wiphy_wk,
|
|
|
|
iwl_mvm_async_handlers_wiphy_wk);
|
wifi: iwlwifi: mvm: Fix race in scan completion
The move of the scan complete notification handling to the wiphy worker
introduced a race between scan complete notification and scan abort:
- The wiphy lock is held, e.g., for rfkill handling etc.
- Scan complete notification is received but not handled yet.
- Scan abort is triggered, and scan abort is sent to the FW. Once the
scan abort command is sent successfully, the flow synchronously waits
for the scan complete notification. However, as the scan complete
notification was already received but not processed yet, this hangs for
a second and continues leaving the scan status in an inconsistent
state.
- Once scan complete handling is started (when the wiphy lock is not held)
since the scan status is not an inconsistent state, a warning is issued
and the scan complete notification is not handled.
To fix this issue, switch back the scan complete notification to be
asynchronously handling, and only move the link selection logic to
a worker (which was the original reason for the move to use wiphy lock).
While at it, refactor some prints to improve debug data.
Fixes: 07bf5297d392 ("wifi: iwlwifi: mvm: Implement new link selection algorithm")
Signed-off-by: Ilan Peer <ilan.peer@intel.com>
Signed-off-by: Miri Korenblit <miriam.rachel.korenblit@intel.com>
Link: https://msgid.link/20240506095953.1f484a86324b.I63ed445a47f144546948c74ae6df85587fdb4ce3@changeid
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
2024-05-06 10:04:11 +03:00
|
|
|
|
|
|
|
wiphy_work_init(&mvm->trig_link_selection_wk,
|
|
|
|
iwl_mvm_trig_link_selection);
|
|
|
|
|
2016-10-09 17:34:24 +03:00
|
|
|
init_waitqueue_head(&mvm->rx_sync_waitq);
|
2014-03-13 12:21:50 +02:00
|
|
|
|
2020-12-09 23:16:31 +02:00
|
|
|
mvm->queue_sync_state = 0;
|
2016-02-18 14:21:12 +02:00
|
|
|
|
2013-01-24 14:25:36 +01:00
|
|
|
SET_IEEE80211_DEV(mvm->hw, mvm->trans->dev);
|
|
|
|
|
2018-04-12 16:15:07 +03:00
|
|
|
spin_lock_init(&mvm->tcm.lock);
|
|
|
|
INIT_DELAYED_WORK(&mvm->tcm.work, iwl_mvm_tcm_work);
|
|
|
|
mvm->tcm.ts = jiffies;
|
|
|
|
mvm->tcm.ll_ts = jiffies;
|
|
|
|
mvm->tcm.uapsd_nonagg_ts = jiffies;
|
|
|
|
|
2016-02-28 17:12:21 +02:00
|
|
|
INIT_DELAYED_WORK(&mvm->cs_tx_unblock_dwork, iwl_mvm_tx_unblock_dwork);
|
|
|
|
|
2020-09-28 12:23:20 +03:00
|
|
|
mvm->cmd_ver.range_resp =
|
|
|
|
iwl_fw_lookup_notif_ver(mvm->fw, LOCATION_GROUP,
|
|
|
|
TOF_RANGE_RESPONSE_NOTIF, 5);
|
2021-10-17 12:40:18 +03:00
|
|
|
/* we only support up to version 9 */
|
|
|
|
if (WARN_ON_ONCE(mvm->cmd_ver.range_resp > 9))
|
2020-09-28 12:23:20 +03:00
|
|
|
goto out_free;
|
|
|
|
|
2013-01-24 14:25:36 +01:00
|
|
|
/*
|
|
|
|
* Populate the state variables that the transport layer needs
|
|
|
|
* to know about.
|
|
|
|
*/
|
|
|
|
trans_cfg.op_mode = op_mode;
|
|
|
|
trans_cfg.no_reclaim_cmds = no_reclaim_cmds;
|
|
|
|
trans_cfg.n_no_reclaim_cmds = ARRAY_SIZE(no_reclaim_cmds);
|
2018-05-10 17:34:52 +03:00
|
|
|
|
2015-11-10 11:57:41 +02:00
|
|
|
switch (iwlwifi_mod_params.amsdu_size) {
|
2016-04-07 16:44:42 +03:00
|
|
|
case IWL_AMSDU_DEF:
|
2021-12-04 17:49:35 +02:00
|
|
|
trans_cfg.rx_buf_size = IWL_AMSDU_4K;
|
2018-05-10 17:34:52 +03:00
|
|
|
break;
|
2015-11-10 11:57:41 +02:00
|
|
|
case IWL_AMSDU_4K:
|
|
|
|
trans_cfg.rx_buf_size = IWL_AMSDU_4K;
|
|
|
|
break;
|
|
|
|
case IWL_AMSDU_8K:
|
|
|
|
trans_cfg.rx_buf_size = IWL_AMSDU_8K;
|
|
|
|
break;
|
|
|
|
case IWL_AMSDU_12K:
|
|
|
|
trans_cfg.rx_buf_size = IWL_AMSDU_12K;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
pr_err("%s: Unsupported amsdu_size: %d\n", KBUILD_MODNAME,
|
|
|
|
iwlwifi_mod_params.amsdu_size);
|
2021-12-04 17:49:35 +02:00
|
|
|
trans_cfg.rx_buf_size = IWL_AMSDU_4K;
|
2015-11-10 11:57:41 +02:00
|
|
|
}
|
2016-04-07 16:44:42 +03:00
|
|
|
|
2020-10-09 12:21:23 +03:00
|
|
|
trans->wide_cmd_header = true;
|
2018-01-02 12:08:31 +02:00
|
|
|
trans_cfg.bc_table_dword =
|
2019-11-15 09:28:25 +02:00
|
|
|
mvm->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210;
|
2013-01-24 14:25:36 +01:00
|
|
|
|
2015-10-15 18:18:09 +03:00
|
|
|
trans_cfg.command_groups = iwl_mvm_groups;
|
|
|
|
trans_cfg.command_groups_size = ARRAY_SIZE(iwl_mvm_groups);
|
2013-01-24 14:25:36 +01:00
|
|
|
|
2017-06-19 23:50:31 +02:00
|
|
|
trans_cfg.cmd_queue = IWL_MVM_DQA_CMD_QUEUE;
|
2014-08-01 20:48:25 +02:00
|
|
|
trans_cfg.cmd_fifo = IWL_MVM_TX_FIFO_CMD;
|
2014-09-10 11:16:41 +03:00
|
|
|
trans_cfg.scd_set_active = true;
|
2013-01-24 14:25:36 +01:00
|
|
|
|
2016-06-21 13:11:48 +02:00
|
|
|
trans_cfg.cb_data_offs = offsetof(struct ieee80211_tx_info,
|
|
|
|
driver_data[2]);
|
|
|
|
|
2015-01-12 14:38:29 +02:00
|
|
|
/* Set a short watchdog for the command queue */
|
|
|
|
trans_cfg.cmd_q_wdg_timeout =
|
2015-03-19 20:04:51 +02:00
|
|
|
iwl_mvm_get_wd_timeout(mvm, NULL, false, true);
|
2015-01-12 14:38:29 +02:00
|
|
|
|
2013-01-24 14:25:36 +01:00
|
|
|
snprintf(mvm->hw->wiphy->fw_version,
|
|
|
|
sizeof(mvm->hw->wiphy->fw_version),
|
2023-10-12 15:41:48 +03:00
|
|
|
"%.31s", fw->fw_version);
|
2013-01-24 14:25:36 +01:00
|
|
|
|
2020-12-09 23:16:43 +02:00
|
|
|
trans_cfg.fw_reset_handshake = fw_has_capa(&mvm->fw->ucode_capa,
|
|
|
|
IWL_UCODE_TLV_CAPA_FW_RESET_HANDSHAKE);
|
|
|
|
|
2022-02-10 18:22:30 +02:00
|
|
|
trans_cfg.queue_alloc_cmd_ver =
|
|
|
|
iwl_fw_lookup_cmd_ver(mvm->fw,
|
|
|
|
WIDE_ID(DATA_PATH_GROUP,
|
|
|
|
SCD_QUEUE_CONFIG_CMD),
|
|
|
|
0);
|
|
|
|
mvm->sta_remove_requires_queue_remove =
|
|
|
|
trans_cfg.queue_alloc_cmd_ver > 0;
|
|
|
|
|
2023-03-28 10:58:50 +03:00
|
|
|
mvm->mld_api_is_used = iwl_mvm_has_mld_api(mvm->fw);
|
2023-03-28 10:58:43 +03:00
|
|
|
|
2013-01-24 14:25:36 +01:00
|
|
|
/* Configure transport layer */
|
|
|
|
iwl_trans_configure(mvm->trans, &trans_cfg);
|
|
|
|
|
|
|
|
trans->rx_mpdu_cmd = REPLY_RX_MPDU_CMD;
|
2019-05-13 10:13:47 +03:00
|
|
|
trans->dbg.dest_tlv = mvm->fw->dbg.dest_tlv;
|
|
|
|
trans->dbg.n_dest_reg = mvm->fw->dbg.n_dest_reg;
|
|
|
|
memcpy(trans->dbg.conf_tlv, mvm->fw->dbg.conf_tlv,
|
|
|
|
sizeof(trans->dbg.conf_tlv));
|
|
|
|
trans->dbg.trigger_tlv = mvm->fw->dbg.trigger_tlv;
|
2013-01-24 14:25:36 +01:00
|
|
|
|
2017-07-17 19:42:35 +03:00
|
|
|
trans->iml = mvm->fw->iml;
|
|
|
|
trans->iml_len = mvm->fw->iml_len;
|
|
|
|
|
2013-01-24 14:25:36 +01:00
|
|
|
/* set up notification wait support */
|
|
|
|
iwl_notification_wait_init(&mvm->notif_wait);
|
|
|
|
|
|
|
|
/* Init phy db */
|
|
|
|
mvm->phy_db = iwl_phy_db_init(trans);
|
|
|
|
if (!mvm->phy_db) {
|
|
|
|
IWL_ERR(mvm, "Cannot init phy_db\n");
|
|
|
|
goto out_free;
|
|
|
|
}
|
|
|
|
|
|
|
|
IWL_INFO(mvm, "Detected %s, REV=0x%X\n",
|
2019-10-10 18:29:26 +03:00
|
|
|
mvm->trans->name, mvm->trans->hw_rev);
|
2013-01-24 14:25:36 +01:00
|
|
|
|
2015-04-19 10:05:18 +03:00
|
|
|
if (iwlwifi_mod_params.nvm_file)
|
2014-05-07 12:27:10 +03:00
|
|
|
mvm->nvm_file_name = iwlwifi_mod_params.nvm_file;
|
2015-04-19 10:05:18 +03:00
|
|
|
else
|
|
|
|
IWL_DEBUG_EEPROM(mvm->trans->dev,
|
|
|
|
"working without external nvm file\n");
|
2013-05-19 19:14:41 +03:00
|
|
|
|
2014-05-20 12:46:37 +03:00
|
|
|
scan_size = iwl_mvm_scan_size(mvm);
|
2014-05-04 12:51:10 +03:00
|
|
|
|
2013-01-24 14:25:36 +01:00
|
|
|
mvm->scan_cmd = kmalloc(scan_size, GFP_KERNEL);
|
|
|
|
if (!mvm->scan_cmd)
|
|
|
|
goto out_free;
|
2022-09-23 15:08:53 -07:00
|
|
|
mvm->scan_cmd_size = scan_size;
|
2013-01-24 14:25:36 +01:00
|
|
|
|
2021-01-22 14:52:37 +02:00
|
|
|
/* invalidate ids to prevent accidental removal of sta_id 0 */
|
|
|
|
mvm->aux_sta.sta_id = IWL_MVM_INVALID_STA;
|
|
|
|
mvm->snif_sta.sta_id = IWL_MVM_INVALID_STA;
|
|
|
|
|
2015-01-13 11:54:51 +02:00
|
|
|
/* Set EBS as successful as long as not stated otherwise by the FW. */
|
|
|
|
mvm->last_ebs_successful = true;
|
|
|
|
|
2017-09-28 15:29:27 +03:00
|
|
|
min_backoff = iwl_mvm_min_backoff(mvm);
|
2016-03-03 13:31:39 +02:00
|
|
|
iwl_mvm_thermal_initialize(mvm, min_backoff);
|
|
|
|
|
2017-05-18 18:00:49 +03:00
|
|
|
if (!iwl_mvm_has_new_rx_stats_api(mvm))
|
|
|
|
memset(&mvm->rx_stats_v3, 0,
|
|
|
|
sizeof(struct mvm_statistics_rx_v3));
|
|
|
|
else
|
|
|
|
memset(&mvm->rx_stats, 0, sizeof(struct mvm_statistics_rx));
|
2013-07-30 15:29:37 +03:00
|
|
|
|
2023-04-16 15:47:31 +03:00
|
|
|
iwl_mvm_ftm_initiator_smooth_config(mvm);
|
|
|
|
|
2023-03-20 12:33:04 +02:00
|
|
|
iwl_mvm_init_time_sync(&mvm->time_sync);
|
|
|
|
|
2021-02-10 17:15:14 +02:00
|
|
|
mvm->debugfs_dir = dbgfs_dir;
|
2018-07-05 17:34:03 +03:00
|
|
|
|
2021-11-12 08:28:12 +02:00
|
|
|
mvm->mei_registered = !iwl_mei_register(mvm, &mei_ops);
|
|
|
|
|
2023-04-18 12:28:11 +03:00
|
|
|
iwl_mvm_mei_scan_filter_init(&mvm->mei_scan_filter);
|
|
|
|
|
2021-12-19 13:28:30 +02:00
|
|
|
if (iwl_mvm_start_get_nvm(mvm)) {
|
|
|
|
/*
|
|
|
|
* Getting NVM failed while CSME is the owner, but we are
|
|
|
|
* registered to MEI, we'll get the NVM later when it'll be
|
|
|
|
* possible to get it from CSME.
|
|
|
|
*/
|
|
|
|
if (trans->csme_own && mvm->mei_registered)
|
|
|
|
return op_mode;
|
|
|
|
|
|
|
|
goto out_thermal_exit;
|
|
|
|
}
|
|
|
|
|
2021-02-10 17:15:05 +02:00
|
|
|
|
2021-02-10 17:15:14 +02:00
|
|
|
if (iwl_mvm_start_post_nvm(mvm))
|
|
|
|
goto out_thermal_exit;
|
2021-02-10 17:15:05 +02:00
|
|
|
|
2013-01-24 14:25:36 +01:00
|
|
|
return op_mode;
|
|
|
|
|
2021-02-10 17:15:05 +02:00
|
|
|
out_thermal_exit:
|
|
|
|
iwl_mvm_thermal_exit(mvm);
|
2021-11-12 08:28:12 +02:00
|
|
|
if (mvm->mei_registered) {
|
|
|
|
iwl_mei_start_unregister();
|
|
|
|
iwl_mei_unregister_complete();
|
|
|
|
}
|
2013-01-24 14:25:36 +01:00
|
|
|
out_free:
|
2019-04-02 15:28:46 +03:00
|
|
|
iwl_fw_flush_dumps(&mvm->fwrt);
|
2018-08-20 17:16:21 +03:00
|
|
|
iwl_fw_runtime_free(&mvm->fwrt);
|
2017-03-16 13:00:59 +02:00
|
|
|
|
|
|
|
if (iwlmvm_mod_params.init_dbg)
|
|
|
|
return op_mode;
|
2013-01-24 14:25:36 +01:00
|
|
|
iwl_phy_db_free(mvm->phy_db);
|
|
|
|
kfree(mvm->scan_cmd);
|
2016-08-31 12:37:55 +03:00
|
|
|
iwl_trans_op_mode_leave(trans);
|
|
|
|
|
2013-01-24 14:25:36 +01:00
|
|
|
ieee80211_free_hw(mvm->hw);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2020-12-09 23:16:28 +02:00
|
|
|
void iwl_mvm_stop_device(struct iwl_mvm *mvm)
|
|
|
|
{
|
|
|
|
lockdep_assert_held(&mvm->mutex);
|
|
|
|
|
|
|
|
iwl_fw_cancel_timestamp(&mvm->fwrt);
|
|
|
|
|
|
|
|
clear_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status);
|
|
|
|
|
|
|
|
iwl_fw_dbg_stop_sync(&mvm->fwrt);
|
|
|
|
iwl_trans_stop_device(mvm->trans);
|
|
|
|
iwl_free_fw_paging(&mvm->fwrt);
|
|
|
|
iwl_fw_dump_conf_clear(&mvm->fwrt);
|
2022-10-30 19:17:41 +02:00
|
|
|
iwl_mvm_mei_device_state(mvm, false);
|
2020-12-09 23:16:28 +02:00
|
|
|
}
|
|
|
|
|
2013-01-24 14:25:36 +01:00
|
|
|
static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode)
|
|
|
|
{
|
|
|
|
struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
|
|
|
|
int i;
|
|
|
|
|
2021-11-12 08:28:12 +02:00
|
|
|
if (mvm->mei_registered) {
|
|
|
|
rtnl_lock();
|
|
|
|
iwl_mei_set_netdev(NULL);
|
|
|
|
rtnl_unlock();
|
|
|
|
iwl_mei_start_unregister();
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* After we unregister from mei, the worker can't be scheduled
|
|
|
|
* anymore.
|
|
|
|
*/
|
|
|
|
cancel_work_sync(&mvm->sap_connected_wk);
|
|
|
|
|
2013-01-24 14:25:36 +01:00
|
|
|
iwl_mvm_leds_exit(mvm);
|
|
|
|
|
2015-12-29 09:54:49 +02:00
|
|
|
iwl_mvm_thermal_exit(mvm);
|
2013-05-19 19:14:41 +03:00
|
|
|
|
2021-11-12 08:28:12 +02:00
|
|
|
/*
|
|
|
|
* If we couldn't get ownership on the device and we couldn't
|
|
|
|
* get the NVM from CSME, we haven't registered to mac80211.
|
|
|
|
* In that case, we didn't fail op_mode_start, because we are
|
|
|
|
* waiting for CSME to allow us to get the NVM to register to
|
|
|
|
* mac80211. If that didn't happen, we haven't registered to
|
|
|
|
* mac80211, hence the if below.
|
|
|
|
*/
|
|
|
|
if (mvm->hw_registered)
|
|
|
|
ieee80211_unregister_hw(mvm->hw);
|
2013-01-24 14:25:36 +01:00
|
|
|
|
|
|
|
kfree(mvm->scan_cmd);
|
2013-11-28 14:08:50 +02:00
|
|
|
kfree(mvm->mcast_filter_cmd);
|
|
|
|
mvm->mcast_filter_cmd = NULL;
|
2013-01-24 14:25:36 +01:00
|
|
|
|
2018-12-13 23:04:51 +02:00
|
|
|
kfree(mvm->error_recovery_buf);
|
|
|
|
mvm->error_recovery_buf = NULL;
|
|
|
|
|
2023-03-20 12:33:02 +02:00
|
|
|
iwl_mvm_ptp_remove(mvm);
|
|
|
|
|
2013-11-24 19:10:46 +02:00
|
|
|
iwl_trans_op_mode_leave(mvm->trans);
|
2013-01-24 14:25:36 +01:00
|
|
|
|
|
|
|
iwl_phy_db_free(mvm->phy_db);
|
|
|
|
mvm->phy_db = NULL;
|
|
|
|
|
2017-05-03 15:09:52 +03:00
|
|
|
kfree(mvm->nvm_data);
|
2021-11-12 08:28:12 +02:00
|
|
|
kfree(mvm->mei_nvm_data);
|
|
|
|
kfree(rcu_access_pointer(mvm->csme_conn_info));
|
|
|
|
kfree(mvm->temp_nvm_data);
|
2014-01-09 08:08:24 +02:00
|
|
|
for (i = 0; i < NVM_MAX_NUM_SECTIONS; i++)
|
2013-01-24 14:25:36 +01:00
|
|
|
kfree(mvm->nvm_sections[i].data);
|
2024-05-06 10:04:13 +03:00
|
|
|
kfree(mvm->acs_survey);
|
2013-01-24 14:25:36 +01:00
|
|
|
|
2018-04-12 16:15:07 +03:00
|
|
|
cancel_delayed_work_sync(&mvm->tcm.work);
|
|
|
|
|
2018-08-20 17:16:21 +03:00
|
|
|
iwl_fw_runtime_free(&mvm->fwrt);
|
2016-03-15 15:36:36 +02:00
|
|
|
mutex_destroy(&mvm->mutex);
|
|
|
|
|
2021-11-12 08:28:12 +02:00
|
|
|
if (mvm->mei_registered)
|
|
|
|
iwl_mei_unregister_complete();
|
|
|
|
|
2013-01-24 14:25:36 +01:00
|
|
|
ieee80211_free_hw(mvm->hw);
|
|
|
|
}
|
|
|
|
|
|
|
|
struct iwl_async_handler_entry {
|
|
|
|
struct list_head list;
|
|
|
|
struct iwl_rx_cmd_buffer rxb;
|
2016-03-03 15:35:34 +02:00
|
|
|
enum iwl_rx_handler_context context;
|
2015-06-23 21:22:09 +02:00
|
|
|
void (*fn)(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
|
2013-01-24 14:25:36 +01:00
|
|
|
};
|
|
|
|
|
|
|
|
void iwl_mvm_async_handlers_purge(struct iwl_mvm *mvm)
|
|
|
|
{
|
|
|
|
struct iwl_async_handler_entry *entry, *tmp;
|
|
|
|
|
|
|
|
spin_lock_bh(&mvm->async_handlers_lock);
|
|
|
|
list_for_each_entry_safe(entry, tmp, &mvm->async_handlers_list, list) {
|
|
|
|
iwl_free_rxb(&entry->rxb);
|
|
|
|
list_del(&entry->list);
|
|
|
|
kfree(entry);
|
|
|
|
}
|
|
|
|
spin_unlock_bh(&mvm->async_handlers_lock);
|
|
|
|
}
|
|
|
|
|
2024-01-23 20:08:23 +02:00
|
|
|
/*
|
|
|
|
* This function receives a bitmap of rx async handler contexts
|
|
|
|
* (&iwl_rx_handler_context) to handle, and runs only them
|
|
|
|
*/
|
|
|
|
static void iwl_mvm_async_handlers_by_context(struct iwl_mvm *mvm,
|
|
|
|
u8 contexts)
|
2013-01-24 14:25:36 +01:00
|
|
|
{
|
|
|
|
struct iwl_async_handler_entry *entry, *tmp;
|
2016-08-31 22:16:11 +02:00
|
|
|
LIST_HEAD(local_list);
|
2013-01-24 14:25:36 +01:00
|
|
|
|
|
|
|
/*
|
2024-01-23 20:08:23 +02:00
|
|
|
* Sync with Rx path with a lock. Remove all the entries of the
|
|
|
|
* wanted contexts from this list, add them to a local one (lock free),
|
|
|
|
* and then handle them.
|
2013-01-24 14:25:36 +01:00
|
|
|
*/
|
|
|
|
spin_lock_bh(&mvm->async_handlers_lock);
|
2024-01-23 20:08:23 +02:00
|
|
|
list_for_each_entry_safe(entry, tmp, &mvm->async_handlers_list, list) {
|
|
|
|
if (!(BIT(entry->context) & contexts))
|
|
|
|
continue;
|
|
|
|
list_del(&entry->list);
|
|
|
|
list_add_tail(&entry->list, &local_list);
|
|
|
|
}
|
2013-01-24 14:25:36 +01:00
|
|
|
spin_unlock_bh(&mvm->async_handlers_lock);
|
|
|
|
|
|
|
|
list_for_each_entry_safe(entry, tmp, &local_list, list) {
|
2024-01-23 20:08:23 +02:00
|
|
|
if (entry->context != RX_HANDLER_ASYNC_UNLOCKED)
|
2016-03-03 15:35:34 +02:00
|
|
|
mutex_lock(&mvm->mutex);
|
2015-06-23 21:22:09 +02:00
|
|
|
entry->fn(mvm, &entry->rxb);
|
2013-01-24 14:25:36 +01:00
|
|
|
iwl_free_rxb(&entry->rxb);
|
|
|
|
list_del(&entry->list);
|
2024-01-23 20:08:23 +02:00
|
|
|
if (entry->context != RX_HANDLER_ASYNC_UNLOCKED)
|
2016-03-03 15:35:34 +02:00
|
|
|
mutex_unlock(&mvm->mutex);
|
2013-01-24 14:25:36 +01:00
|
|
|
kfree(entry);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-01-23 20:08:23 +02:00
|
|
|
static void iwl_mvm_async_handlers_wiphy_wk(struct wiphy *wiphy,
|
|
|
|
struct wiphy_work *wk)
|
|
|
|
{
|
|
|
|
struct iwl_mvm *mvm =
|
|
|
|
container_of(wk, struct iwl_mvm, async_handlers_wiphy_wk);
|
|
|
|
u8 contexts = BIT(RX_HANDLER_ASYNC_LOCKED_WIPHY);
|
|
|
|
|
|
|
|
iwl_mvm_async_handlers_by_context(mvm, contexts);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void iwl_mvm_async_handlers_wk(struct work_struct *wk)
|
|
|
|
{
|
|
|
|
struct iwl_mvm *mvm =
|
|
|
|
container_of(wk, struct iwl_mvm, async_handlers_wk);
|
|
|
|
u8 contexts = BIT(RX_HANDLER_ASYNC_LOCKED) |
|
|
|
|
BIT(RX_HANDLER_ASYNC_UNLOCKED);
|
|
|
|
|
|
|
|
iwl_mvm_async_handlers_by_context(mvm, contexts);
|
|
|
|
}
|
|
|
|
|
2015-02-10 10:49:20 +02:00
|
|
|
static inline void iwl_mvm_rx_check_trigger(struct iwl_mvm *mvm,
|
|
|
|
struct iwl_rx_packet *pkt)
|
|
|
|
{
|
|
|
|
struct iwl_fw_dbg_trigger_tlv *trig;
|
|
|
|
struct iwl_fw_dbg_trigger_cmd *cmds_trig;
|
|
|
|
int i;
|
|
|
|
|
2018-06-12 10:41:35 +03:00
|
|
|
trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, NULL,
|
|
|
|
FW_DBG_TRIGGER_FW_NOTIF);
|
|
|
|
if (!trig)
|
2015-02-10 10:49:20 +02:00
|
|
|
return;
|
|
|
|
|
|
|
|
cmds_trig = (void *)trig->data;
|
|
|
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE(cmds_trig->cmds); i++) {
|
|
|
|
/* don't collect on CMD 0 */
|
|
|
|
if (!cmds_trig->cmds[i].cmd_id)
|
|
|
|
break;
|
|
|
|
|
2015-07-13 14:23:59 +03:00
|
|
|
if (cmds_trig->cmds[i].cmd_id != pkt->hdr.cmd ||
|
|
|
|
cmds_trig->cmds[i].group_id != pkt->hdr.group_id)
|
2015-02-10 10:49:20 +02:00
|
|
|
continue;
|
|
|
|
|
2017-06-01 16:03:19 +02:00
|
|
|
iwl_fw_dbg_collect_trig(&mvm->fwrt, trig,
|
|
|
|
"CMD 0x%02x.%02x received",
|
|
|
|
pkt->hdr.group_id, pkt->hdr.cmd);
|
2015-02-10 10:49:20 +02:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-05-22 13:41:07 +02:00
|
|
|
static void iwl_mvm_rx_common(struct iwl_mvm *mvm,
|
|
|
|
struct iwl_rx_cmd_buffer *rxb,
|
|
|
|
struct iwl_rx_packet *pkt)
|
2013-01-24 14:25:36 +01:00
|
|
|
{
|
2021-01-17 13:10:28 +02:00
|
|
|
unsigned int pkt_len = iwl_rx_packet_payload_len(pkt);
|
2015-05-22 13:41:07 +02:00
|
|
|
int i;
|
2019-07-22 13:04:16 +03:00
|
|
|
union iwl_dbg_tlv_tp_data tp_data = { .fw_pkt = pkt };
|
2015-05-22 12:09:44 +02:00
|
|
|
|
2019-07-22 13:04:16 +03:00
|
|
|
iwl_dbg_tlv_time_point(&mvm->fwrt,
|
|
|
|
IWL_FW_INI_TIME_POINT_FW_RSP_OR_NOTIF, &tp_data);
|
2015-02-10 10:49:20 +02:00
|
|
|
iwl_mvm_rx_check_trigger(mvm, pkt);
|
|
|
|
|
2013-01-24 14:25:36 +01:00
|
|
|
/*
|
|
|
|
* Do the notification wait before RX handlers so
|
|
|
|
* even if the RX handler consumes the RXB we have
|
|
|
|
* access to it in the notification wait entry.
|
|
|
|
*/
|
|
|
|
iwl_notification_wait_notify(&mvm->notif_wait, pkt);
|
|
|
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE(iwl_mvm_rx_handlers); i++) {
|
|
|
|
const struct iwl_rx_handlers *rx_h = &iwl_mvm_rx_handlers[i];
|
2013-02-10 13:25:25 +02:00
|
|
|
struct iwl_async_handler_entry *entry;
|
|
|
|
|
2015-07-09 17:17:03 +03:00
|
|
|
if (rx_h->cmd_id != WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd))
|
2013-02-10 13:25:25 +02:00
|
|
|
continue;
|
|
|
|
|
2023-06-14 12:41:31 +03:00
|
|
|
if (IWL_FW_CHECK(mvm, pkt_len < rx_h->min_size,
|
|
|
|
"unexpected notification 0x%04x size %d, need %d\n",
|
|
|
|
rx_h->cmd_id, pkt_len, rx_h->min_size))
|
2021-01-17 13:10:28 +02:00
|
|
|
return;
|
|
|
|
|
2016-03-03 15:35:34 +02:00
|
|
|
if (rx_h->context == RX_HANDLER_SYNC) {
|
2015-06-23 21:22:09 +02:00
|
|
|
rx_h->fn(mvm, rxb);
|
2015-06-23 21:58:17 +02:00
|
|
|
return;
|
2015-06-23 21:22:09 +02:00
|
|
|
}
|
2013-02-10 13:25:25 +02:00
|
|
|
|
|
|
|
entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
|
|
|
|
/* we can't do much... */
|
|
|
|
if (!entry)
|
2015-06-23 21:58:17 +02:00
|
|
|
return;
|
2013-02-10 13:25:25 +02:00
|
|
|
|
|
|
|
entry->rxb._page = rxb_steal_page(rxb);
|
|
|
|
entry->rxb._offset = rxb->_offset;
|
|
|
|
entry->rxb._rx_page_order = rxb->_rx_page_order;
|
|
|
|
entry->fn = rx_h->fn;
|
2016-03-03 15:35:34 +02:00
|
|
|
entry->context = rx_h->context;
|
2013-02-10 13:25:25 +02:00
|
|
|
spin_lock(&mvm->async_handlers_lock);
|
|
|
|
list_add_tail(&entry->list, &mvm->async_handlers_list);
|
|
|
|
spin_unlock(&mvm->async_handlers_lock);
|
2024-01-23 20:08:23 +02:00
|
|
|
if (rx_h->context == RX_HANDLER_ASYNC_LOCKED_WIPHY)
|
|
|
|
wiphy_work_queue(mvm->hw->wiphy,
|
|
|
|
&mvm->async_handlers_wiphy_wk);
|
|
|
|
else
|
|
|
|
schedule_work(&mvm->async_handlers_wk);
|
2018-03-13 13:49:25 +02:00
|
|
|
break;
|
2013-01-24 14:25:36 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-05-22 13:41:07 +02:00
|
|
|
static void iwl_mvm_rx(struct iwl_op_mode *op_mode,
|
|
|
|
struct napi_struct *napi,
|
|
|
|
struct iwl_rx_cmd_buffer *rxb)
|
|
|
|
{
|
|
|
|
struct iwl_rx_packet *pkt = rxb_addr(rxb);
|
|
|
|
struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
|
2016-08-04 08:57:59 +02:00
|
|
|
u16 cmd = WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd);
|
2015-05-22 13:41:07 +02:00
|
|
|
|
2016-08-04 08:57:59 +02:00
|
|
|
if (likely(cmd == WIDE_ID(LEGACY_GROUP, REPLY_RX_MPDU_CMD)))
|
2015-05-22 13:41:07 +02:00
|
|
|
iwl_mvm_rx_rx_mpdu(mvm, napi, rxb);
|
2016-08-04 08:57:59 +02:00
|
|
|
else if (cmd == WIDE_ID(LEGACY_GROUP, REPLY_RX_PHY_CMD))
|
2015-05-22 13:41:07 +02:00
|
|
|
iwl_mvm_rx_rx_phy_cmd(mvm, rxb);
|
|
|
|
else
|
|
|
|
iwl_mvm_rx_common(mvm, rxb, pkt);
|
|
|
|
}
|
|
|
|
|
2021-01-17 13:10:30 +02:00
|
|
|
void iwl_mvm_rx_mq(struct iwl_op_mode *op_mode,
|
|
|
|
struct napi_struct *napi,
|
|
|
|
struct iwl_rx_cmd_buffer *rxb)
|
2015-05-22 13:41:07 +02:00
|
|
|
{
|
|
|
|
struct iwl_rx_packet *pkt = rxb_addr(rxb);
|
|
|
|
struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
|
2016-08-04 08:57:59 +02:00
|
|
|
u16 cmd = WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd);
|
2015-05-22 13:41:07 +02:00
|
|
|
|
2016-08-04 08:57:59 +02:00
|
|
|
if (likely(cmd == WIDE_ID(LEGACY_GROUP, REPLY_RX_MPDU_CMD)))
|
2015-09-03 14:56:10 +02:00
|
|
|
iwl_mvm_rx_mpdu_mq(mvm, napi, rxb, 0);
|
2016-08-04 08:57:59 +02:00
|
|
|
else if (unlikely(cmd == WIDE_ID(DATA_PATH_GROUP,
|
|
|
|
RX_QUEUES_NOTIFICATION)))
|
2019-06-24 13:57:34 +03:00
|
|
|
iwl_mvm_rx_queue_notif(mvm, napi, rxb, 0);
|
2016-08-04 08:57:59 +02:00
|
|
|
else if (cmd == WIDE_ID(LEGACY_GROUP, FRAME_RELEASE))
|
2016-04-27 13:33:26 +02:00
|
|
|
iwl_mvm_rx_frame_release(mvm, napi, rxb, 0);
|
2019-07-04 17:24:47 +02:00
|
|
|
else if (cmd == WIDE_ID(LEGACY_GROUP, BAR_FRAME_RELEASE))
|
|
|
|
iwl_mvm_rx_bar_frame_release(mvm, napi, rxb, 0);
|
2018-07-23 11:40:30 +03:00
|
|
|
else if (cmd == WIDE_ID(DATA_PATH_GROUP, RX_NO_DATA_NOTIF))
|
2019-01-02 10:31:05 +01:00
|
|
|
iwl_mvm_rx_monitor_no_data(mvm, napi, rxb, 0);
|
2015-05-22 13:41:07 +02:00
|
|
|
else
|
|
|
|
iwl_mvm_rx_common(mvm, rxb, pkt);
|
|
|
|
}
|
|
|
|
|
2019-01-20 11:33:57 +02:00
|
|
|
static int iwl_mvm_is_static_queue(struct iwl_mvm *mvm, int queue)
|
|
|
|
{
|
|
|
|
return queue == mvm->aux_queue || queue == mvm->probe_queue ||
|
|
|
|
queue == mvm->p2p_dev_queue || queue == mvm->snif_queue;
|
|
|
|
}
|
|
|
|
|
2018-08-21 15:23:39 +03:00
|
|
|
static void iwl_mvm_queue_state_change(struct iwl_op_mode *op_mode,
|
|
|
|
int hw_queue, bool start)
|
2013-01-24 14:25:36 +01:00
|
|
|
{
|
|
|
|
struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
|
2018-08-21 15:23:39 +03:00
|
|
|
struct ieee80211_sta *sta;
|
|
|
|
struct ieee80211_txq *txq;
|
|
|
|
struct iwl_mvm_txq *mvmtxq;
|
|
|
|
int i;
|
|
|
|
unsigned long tid_bitmap;
|
|
|
|
struct iwl_mvm_sta *mvmsta;
|
|
|
|
u8 sta_id;
|
2016-02-03 11:05:41 +02:00
|
|
|
|
2018-08-21 15:23:39 +03:00
|
|
|
sta_id = iwl_mvm_has_new_tx_api(mvm) ?
|
|
|
|
mvm->tvqm_info[hw_queue].sta_id :
|
|
|
|
mvm->queue_info[hw_queue].ra_sta_id;
|
2016-02-03 11:05:41 +02:00
|
|
|
|
2020-10-08 18:09:37 +03:00
|
|
|
if (WARN_ON_ONCE(sta_id >= mvm->fw->ucode_capa.num_stations))
|
2013-01-24 14:25:36 +01:00
|
|
|
return;
|
|
|
|
|
2018-08-21 15:23:39 +03:00
|
|
|
rcu_read_lock();
|
|
|
|
|
|
|
|
sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
|
|
|
|
if (IS_ERR_OR_NULL(sta))
|
|
|
|
goto out;
|
|
|
|
mvmsta = iwl_mvm_sta_from_mac80211(sta);
|
|
|
|
|
2019-01-20 11:33:57 +02:00
|
|
|
if (iwl_mvm_is_static_queue(mvm, hw_queue)) {
|
|
|
|
if (!start)
|
|
|
|
ieee80211_stop_queues(mvm->hw);
|
|
|
|
else if (mvmsta->sta_state != IEEE80211_STA_NOTEXIST)
|
|
|
|
ieee80211_wake_queues(mvm->hw);
|
|
|
|
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2018-08-21 15:23:39 +03:00
|
|
|
if (iwl_mvm_has_new_tx_api(mvm)) {
|
|
|
|
int tid = mvm->tvqm_info[hw_queue].txq_tid;
|
|
|
|
|
|
|
|
tid_bitmap = BIT(tid);
|
|
|
|
} else {
|
|
|
|
tid_bitmap = mvm->queue_info[hw_queue].tid_bitmap;
|
|
|
|
}
|
|
|
|
|
|
|
|
for_each_set_bit(i, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
|
|
|
|
int tid = i;
|
|
|
|
|
|
|
|
if (tid == IWL_MAX_TID_COUNT)
|
|
|
|
tid = IEEE80211_NUM_TIDS;
|
2015-07-14 13:36:18 +03:00
|
|
|
|
2018-08-21 15:23:39 +03:00
|
|
|
txq = sta->txq[tid];
|
|
|
|
mvmtxq = iwl_mvm_txq_from_mac80211(txq);
|
2023-03-17 10:53:24 +01:00
|
|
|
if (start)
|
|
|
|
clear_bit(IWL_MVM_TXQ_STATE_STOP_FULL, &mvmtxq->state);
|
|
|
|
else
|
|
|
|
set_bit(IWL_MVM_TXQ_STATE_STOP_FULL, &mvmtxq->state);
|
2018-08-21 15:23:39 +03:00
|
|
|
|
2023-06-14 12:41:22 +03:00
|
|
|
if (start && mvmsta->sta_state != IEEE80211_STA_NOTEXIST) {
|
|
|
|
local_bh_disable();
|
2018-08-21 15:23:39 +03:00
|
|
|
iwl_mvm_mac_itxq_xmit(mvm->hw, txq);
|
2023-06-14 12:41:22 +03:00
|
|
|
local_bh_enable();
|
|
|
|
}
|
2015-07-14 13:36:18 +03:00
|
|
|
}
|
2018-08-21 15:23:39 +03:00
|
|
|
|
|
|
|
out:
|
|
|
|
rcu_read_unlock();
|
2013-01-24 14:25:36 +01:00
|
|
|
}
|
|
|
|
|
2018-08-21 15:23:39 +03:00
|
|
|
static void iwl_mvm_stop_sw_queue(struct iwl_op_mode *op_mode, int hw_queue)
|
2016-02-03 11:05:41 +02:00
|
|
|
{
|
2018-08-21 15:23:39 +03:00
|
|
|
iwl_mvm_queue_state_change(op_mode, hw_queue, false);
|
|
|
|
}
|
2016-02-03 11:05:41 +02:00
|
|
|
|
2018-08-21 15:23:39 +03:00
|
|
|
static void iwl_mvm_wake_sw_queue(struct iwl_op_mode *op_mode, int hw_queue)
|
|
|
|
{
|
|
|
|
iwl_mvm_queue_state_change(op_mode, hw_queue, true);
|
2016-02-03 11:05:41 +02:00
|
|
|
}
|
|
|
|
|
2017-04-25 10:21:18 +02:00
|
|
|
static void iwl_mvm_set_rfkill_state(struct iwl_mvm *mvm)
|
|
|
|
{
|
2024-02-05 21:21:03 +02:00
|
|
|
wiphy_rfkill_set_hw_state(mvm->hw->wiphy,
|
|
|
|
iwl_mvm_is_radio_killed(mvm));
|
2017-04-25 10:21:18 +02:00
|
|
|
}
|
|
|
|
|
2013-05-19 19:14:41 +03:00
|
|
|
void iwl_mvm_set_hw_ctkill_state(struct iwl_mvm *mvm, bool state)
|
|
|
|
{
|
|
|
|
if (state)
|
|
|
|
set_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status);
|
|
|
|
else
|
|
|
|
clear_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status);
|
|
|
|
|
2017-04-25 10:21:18 +02:00
|
|
|
iwl_mvm_set_rfkill_state(mvm);
|
2013-05-19 19:14:41 +03:00
|
|
|
}
|
|
|
|
|
2021-11-12 08:28:12 +02:00
|
|
|
struct iwl_mvm_csme_conn_info *iwl_mvm_get_csme_conn_info(struct iwl_mvm *mvm)
|
|
|
|
{
|
|
|
|
return rcu_dereference_protected(mvm->csme_conn_info,
|
|
|
|
lockdep_is_held(&mvm->mutex));
|
|
|
|
}
|
|
|
|
|
2014-02-25 20:50:53 +01:00
|
|
|
static bool iwl_mvm_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state)
|
2013-01-24 14:25:36 +01:00
|
|
|
{
|
|
|
|
struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
|
2019-05-29 16:39:50 +03:00
|
|
|
bool rfkill_safe_init_done = READ_ONCE(mvm->rfkill_safe_init_done);
|
|
|
|
bool unified = iwl_mvm_has_unified_ucode(mvm);
|
2013-01-24 14:25:36 +01:00
|
|
|
|
2024-02-05 21:21:03 +02:00
|
|
|
if (state) {
|
2013-01-24 14:25:36 +01:00
|
|
|
set_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status);
|
2024-02-05 21:21:03 +02:00
|
|
|
wake_up(&mvm->rx_sync_waitq);
|
|
|
|
} else {
|
2013-01-24 14:25:36 +01:00
|
|
|
clear_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status);
|
2024-02-05 21:21:03 +02:00
|
|
|
}
|
2013-01-24 14:25:36 +01:00
|
|
|
|
2017-04-25 10:21:18 +02:00
|
|
|
iwl_mvm_set_rfkill_state(mvm);
|
2014-02-25 20:50:53 +01:00
|
|
|
|
2019-05-29 16:39:50 +03:00
|
|
|
/* iwl_run_init_mvm_ucode is waiting for results, abort it. */
|
|
|
|
if (rfkill_safe_init_done)
|
2014-11-02 15:48:09 +02:00
|
|
|
iwl_abort_notification_waits(&mvm->notif_wait);
|
|
|
|
|
2019-05-29 16:39:50 +03:00
|
|
|
/*
|
|
|
|
* Don't ask the transport to stop the firmware. We'll do it
|
|
|
|
* after cfg80211 takes us down.
|
|
|
|
*/
|
|
|
|
if (unified)
|
|
|
|
return false;
|
|
|
|
|
2014-11-02 15:48:09 +02:00
|
|
|
/*
|
|
|
|
* Stop the device if we run OPERATIONAL firmware or if we are in the
|
|
|
|
* middle of the calibrations.
|
|
|
|
*/
|
2019-05-21 22:13:10 +03:00
|
|
|
return state && rfkill_safe_init_done;
|
2013-01-24 14:25:36 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
static void iwl_mvm_free_skb(struct iwl_op_mode *op_mode, struct sk_buff *skb)
|
|
|
|
{
|
|
|
|
struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
|
|
|
|
struct ieee80211_tx_info *info;
|
|
|
|
|
|
|
|
info = IEEE80211_SKB_CB(skb);
|
|
|
|
iwl_trans_free_tx_cmd(mvm->trans, info->driver_data[1]);
|
|
|
|
ieee80211_free_txskb(mvm->hw, skb);
|
|
|
|
}
|
|
|
|
|
2013-07-04 15:25:25 +02:00
|
|
|
struct iwl_mvm_reprobe {
|
|
|
|
struct device *dev;
|
|
|
|
struct work_struct work;
|
|
|
|
};
|
|
|
|
|
|
|
|
static void iwl_mvm_reprobe_wk(struct work_struct *wk)
|
|
|
|
{
|
|
|
|
struct iwl_mvm_reprobe *reprobe;
|
|
|
|
|
|
|
|
reprobe = container_of(wk, struct iwl_mvm_reprobe, work);
|
|
|
|
if (device_reprobe(reprobe->dev))
|
|
|
|
dev_err(reprobe->dev, "reprobe failed!\n");
|
2021-01-22 14:52:41 +02:00
|
|
|
put_device(reprobe->dev);
|
2013-07-04 15:25:25 +02:00
|
|
|
kfree(reprobe);
|
|
|
|
module_put(THIS_MODULE);
|
|
|
|
}
|
|
|
|
|
2014-05-20 23:31:05 +03:00
|
|
|
void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error)
|
2013-01-24 14:25:36 +01:00
|
|
|
{
|
|
|
|
iwl_abort_notification_waits(&mvm->notif_wait);
|
2019-07-01 16:03:48 +03:00
|
|
|
iwl_dbg_tlv_del_timers(mvm->trans);
|
2013-01-24 14:25:36 +01:00
|
|
|
|
2014-01-09 14:22:55 +02:00
|
|
|
/*
|
|
|
|
* This is a bit racy, but worst case we tell mac80211 about
|
|
|
|
* a stopped/aborted scan when that was already done which
|
|
|
|
* is not a problem. It is necessary to abort any os scan
|
|
|
|
* here because mac80211 requires having the scan cleared
|
|
|
|
* before restarting.
|
|
|
|
* We'll reset the scan_status to NONE in restart cleanup in
|
|
|
|
* the next start() call from mac80211. If restart isn't called
|
|
|
|
* (no fw restart) scan status will stay busy.
|
|
|
|
*/
|
2015-03-10 10:06:02 +02:00
|
|
|
iwl_mvm_report_scan_aborted(mvm);
|
2014-01-09 14:22:55 +02:00
|
|
|
|
2013-01-24 14:25:36 +01:00
|
|
|
/*
|
|
|
|
* If we're restarting already, don't cycle restarts.
|
|
|
|
* If INIT fw asserted, it will likely fail again.
|
|
|
|
* If WoWLAN fw asserted, don't restart either, mac80211
|
|
|
|
* can't recover this since we're already half suspended.
|
|
|
|
*/
|
2017-05-30 16:45:31 +02:00
|
|
|
if (!mvm->fw_restart && fw_error) {
|
2021-08-02 17:09:39 +03:00
|
|
|
iwl_fw_error_collect(&mvm->fwrt, false);
|
2021-11-10 15:01:59 +02:00
|
|
|
} else if (test_bit(IWL_MVM_STATUS_STARTING,
|
|
|
|
&mvm->status)) {
|
|
|
|
IWL_ERR(mvm, "Starting mac, retry will be triggered anyway\n");
|
iwlwifi: mvm: defer setting IWL_MVM_STATUS_IN_HW_RESTART
A hardware/firmware error may happen at any point in time. In
particular, it might happen while mac80211 is in the middle of
a flow. We observed the following situation:
* mac80211 is in authentication flow, in ieee80211_prep_connection()
* iwlwifi firmware crashes, but no error can be reported at this
precise point (mostly because the driver method is void, but even
if it wasn't we'd just shift to a race condition)
* mac80211 continues the flow, trying to add the AP station
* iwlwifi has already set its internal restart flag, and so thinks
that adding the station is part of the restart and already set up,
so it uses the information that's supposed to already be in the
struct
This can happen with any flow in mac80211 and with any information
we try to preserve across hardware restarts.
To fix this, only set a new HW_RESTART_REQUESTED flag and translate
that to IN_HW_RESTART once mac80211 actually starts the restart by
calling our start() method. As a consequence, any mac80211 flow in
progress at the time of the restart will properly finish (certainly
with errors), before the restart is attempted.
This fixes https://bugzilla.kernel.org/show_bug.cgi?id=195299.
Reported-by: djagoo <dev@djagoo.io>
Reported-by: Łukasz Siudut <lsiudut@gmail.com>
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Signed-off-by: Luca Coelho <luciano.coelho@intel.com>
2017-06-30 10:48:28 +02:00
|
|
|
} else if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
|
2013-07-04 15:25:25 +02:00
|
|
|
struct iwl_mvm_reprobe *reprobe;
|
|
|
|
|
|
|
|
IWL_ERR(mvm,
|
|
|
|
"Firmware error during reconfiguration - reprobe!\n");
|
|
|
|
|
|
|
|
/*
|
|
|
|
* get a module reference to avoid doing this while unloading
|
|
|
|
* anyway and to avoid scheduling a work with code that's
|
|
|
|
* being removed.
|
|
|
|
*/
|
|
|
|
if (!try_module_get(THIS_MODULE)) {
|
|
|
|
IWL_ERR(mvm, "Module is being unloaded - abort\n");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
reprobe = kzalloc(sizeof(*reprobe), GFP_ATOMIC);
|
|
|
|
if (!reprobe) {
|
|
|
|
module_put(THIS_MODULE);
|
|
|
|
return;
|
|
|
|
}
|
2021-01-22 14:52:41 +02:00
|
|
|
reprobe->dev = get_device(mvm->trans->dev);
|
2013-07-04 15:25:25 +02:00
|
|
|
INIT_WORK(&reprobe->work, iwl_mvm_reprobe_wk);
|
|
|
|
schedule_work(&reprobe->work);
|
2019-01-20 17:45:16 +02:00
|
|
|
} else if (test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED,
|
|
|
|
&mvm->status)) {
|
|
|
|
IWL_ERR(mvm, "HW restart already requested, but not started\n");
|
2017-06-02 11:56:58 +02:00
|
|
|
} else if (mvm->fwrt.cur_fw_img == IWL_UCODE_REGULAR &&
|
2018-05-01 14:23:56 +03:00
|
|
|
mvm->hw_registered &&
|
|
|
|
!test_bit(STATUS_TRANS_DEAD, &mvm->trans->status)) {
|
2020-12-09 23:16:18 +02:00
|
|
|
/* This should be first thing before trying to collect any
|
|
|
|
* data to avoid endless loops if any HW error happens while
|
|
|
|
* collecting debug data.
|
|
|
|
*/
|
|
|
|
set_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status);
|
|
|
|
|
2018-12-13 23:04:51 +02:00
|
|
|
if (mvm->fw->ucode_capa.error_log_size) {
|
|
|
|
u32 src_size = mvm->fw->ucode_capa.error_log_size;
|
|
|
|
u32 src_addr = mvm->fw->ucode_capa.error_log_addr;
|
|
|
|
u8 *recover_buf = kzalloc(src_size, GFP_ATOMIC);
|
|
|
|
|
|
|
|
if (recover_buf) {
|
|
|
|
mvm->error_recovery_buf = recover_buf;
|
|
|
|
iwl_trans_read_mem_bytes(mvm->trans,
|
|
|
|
src_addr,
|
|
|
|
recover_buf,
|
|
|
|
src_size);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-08-02 17:09:39 +03:00
|
|
|
iwl_fw_error_collect(&mvm->fwrt, false);
|
2019-02-10 15:59:46 +02:00
|
|
|
|
2021-12-19 12:18:13 +02:00
|
|
|
if (fw_error && mvm->fw_restart > 0) {
|
2017-05-30 16:45:31 +02:00
|
|
|
mvm->fw_restart--;
|
2021-12-19 12:18:13 +02:00
|
|
|
ieee80211_restart_hw(mvm->hw);
|
|
|
|
} else if (mvm->fwrt.trans->dbg.restart_required) {
|
|
|
|
IWL_DEBUG_INFO(mvm, "FW restart requested after debug collection\n");
|
2024-02-08 18:58:40 +02:00
|
|
|
mvm->fwrt.trans->dbg.restart_required = false;
|
2021-12-19 12:18:13 +02:00
|
|
|
ieee80211_restart_hw(mvm->hw);
|
|
|
|
} else if (mvm->trans->trans_cfg->device_family <= IWL_DEVICE_FAMILY_8000) {
|
|
|
|
ieee80211_restart_hw(mvm->hw);
|
|
|
|
}
|
2013-01-24 14:25:36 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-08-02 17:09:39 +03:00
|
|
|
static void iwl_mvm_nic_error(struct iwl_op_mode *op_mode, bool sync)
|
2013-02-28 08:57:31 +02:00
|
|
|
{
|
|
|
|
struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
|
|
|
|
|
2021-12-04 17:49:44 +02:00
|
|
|
if (!test_bit(STATUS_TRANS_DEAD, &mvm->trans->status) &&
|
|
|
|
!test_and_clear_bit(IWL_MVM_STATUS_SUPPRESS_ERROR_LOG_ONCE,
|
|
|
|
&mvm->status))
|
2018-05-01 14:23:56 +03:00
|
|
|
iwl_mvm_dump_nic_error_log(mvm);
|
2014-03-18 21:15:06 +02:00
|
|
|
|
2021-08-02 17:09:39 +03:00
|
|
|
if (sync) {
|
|
|
|
iwl_fw_error_collect(&mvm->fwrt, true);
|
|
|
|
/*
|
|
|
|
* Currently, the only case for sync=true is during
|
|
|
|
* shutdown, so just stop in this case. If/when that
|
|
|
|
* changes, we need to be a bit smarter here.
|
|
|
|
*/
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2021-08-02 17:09:36 +03:00
|
|
|
/*
|
|
|
|
* If the firmware crashes while we're already considering it
|
|
|
|
* to be dead then don't ask for a restart, that cannot do
|
|
|
|
* anything useful anyway.
|
|
|
|
*/
|
|
|
|
if (!test_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status))
|
|
|
|
return;
|
|
|
|
|
2021-12-19 12:18:13 +02:00
|
|
|
iwl_mvm_nic_restart(mvm, false);
|
2013-02-28 08:57:31 +02:00
|
|
|
}
|
|
|
|
|
2013-01-24 14:25:36 +01:00
|
|
|
static void iwl_mvm_cmd_queue_full(struct iwl_op_mode *op_mode)
|
|
|
|
{
|
2013-02-28 08:57:31 +02:00
|
|
|
struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
|
|
|
|
|
2013-01-24 14:25:36 +01:00
|
|
|
WARN_ON(1);
|
2014-05-20 23:31:05 +03:00
|
|
|
iwl_mvm_nic_restart(mvm, true);
|
2013-01-24 14:25:36 +01:00
|
|
|
}
|
|
|
|
|
2021-02-10 14:29:18 +02:00
|
|
|
static void iwl_op_mode_mvm_time_point(struct iwl_op_mode *op_mode,
|
|
|
|
enum iwl_fw_ini_time_point tp_id,
|
|
|
|
union iwl_dbg_tlv_tp_data *tp_data)
|
|
|
|
{
|
|
|
|
struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
|
|
|
|
|
|
|
|
iwl_dbg_tlv_time_point(&mvm->fwrt, tp_id, tp_data);
|
|
|
|
}
|
|
|
|
|
2015-05-22 13:41:07 +02:00
|
|
|
#define IWL_MVM_COMMON_OPS \
|
|
|
|
/* these could be differentiated */ \
|
|
|
|
.queue_full = iwl_mvm_stop_sw_queue, \
|
|
|
|
.queue_not_full = iwl_mvm_wake_sw_queue, \
|
|
|
|
.hw_rf_kill = iwl_mvm_set_hw_rfkill_state, \
|
|
|
|
.free_skb = iwl_mvm_free_skb, \
|
|
|
|
.nic_error = iwl_mvm_nic_error, \
|
|
|
|
.cmd_queue_full = iwl_mvm_cmd_queue_full, \
|
|
|
|
.nic_config = iwl_mvm_nic_config, \
|
|
|
|
/* as we only register one, these MUST be common! */ \
|
|
|
|
.start = iwl_op_mode_mvm_start, \
|
2021-02-10 14:29:18 +02:00
|
|
|
.stop = iwl_op_mode_mvm_stop, \
|
|
|
|
.time_point = iwl_op_mode_mvm_time_point
|
2015-05-22 13:41:07 +02:00
|
|
|
|
2013-01-24 14:25:36 +01:00
|
|
|
static const struct iwl_op_mode_ops iwl_mvm_ops = {
|
2015-05-22 13:41:07 +02:00
|
|
|
IWL_MVM_COMMON_OPS,
|
|
|
|
.rx = iwl_mvm_rx,
|
|
|
|
};
|
|
|
|
|
|
|
|
static void iwl_mvm_rx_mq_rss(struct iwl_op_mode *op_mode,
|
|
|
|
struct napi_struct *napi,
|
|
|
|
struct iwl_rx_cmd_buffer *rxb,
|
|
|
|
unsigned int queue)
|
|
|
|
{
|
|
|
|
struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
|
2015-12-01 13:48:18 +02:00
|
|
|
struct iwl_rx_packet *pkt = rxb_addr(rxb);
|
2016-08-04 08:57:59 +02:00
|
|
|
u16 cmd = WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd);
|
2015-05-22 13:41:07 +02:00
|
|
|
|
2021-12-19 12:18:11 +02:00
|
|
|
if (unlikely(queue >= mvm->trans->num_rx_queues))
|
|
|
|
return;
|
|
|
|
|
2016-08-04 08:57:59 +02:00
|
|
|
if (unlikely(cmd == WIDE_ID(LEGACY_GROUP, FRAME_RELEASE)))
|
2016-02-28 15:41:47 +02:00
|
|
|
iwl_mvm_rx_frame_release(mvm, napi, rxb, queue);
|
2016-08-04 08:57:59 +02:00
|
|
|
else if (unlikely(cmd == WIDE_ID(DATA_PATH_GROUP,
|
|
|
|
RX_QUEUES_NOTIFICATION)))
|
2019-06-24 13:57:34 +03:00
|
|
|
iwl_mvm_rx_queue_notif(mvm, napi, rxb, queue);
|
2016-08-04 08:57:59 +02:00
|
|
|
else if (likely(cmd == WIDE_ID(LEGACY_GROUP, REPLY_RX_MPDU_CMD)))
|
2015-12-01 13:48:18 +02:00
|
|
|
iwl_mvm_rx_mpdu_mq(mvm, napi, rxb, queue);
|
2015-05-22 13:41:07 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static const struct iwl_op_mode_ops iwl_mvm_ops_mq = {
|
|
|
|
IWL_MVM_COMMON_OPS,
|
|
|
|
.rx = iwl_mvm_rx_mq,
|
|
|
|
.rx_rss = iwl_mvm_rx_mq_rss,
|
2013-01-24 14:25:36 +01:00
|
|
|
};
|