2020-12-10 00:06:03 +02:00
|
|
|
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
|
|
|
|
/*
|
2024-02-05 21:21:01 +02:00
|
|
|
* Copyright (C) 2012-2014, 2018-2024 Intel Corporation
|
2020-12-10 00:06:03 +02:00
|
|
|
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
|
|
|
|
* Copyright (C) 2015-2017 Intel Deutschland GmbH
|
|
|
|
*/
|
2015-09-03 14:56:10 +02:00
|
|
|
#include <linux/etherdevice.h>
|
|
|
|
#include <linux/skbuff.h>
|
|
|
|
#include "iwl-trans.h"
|
|
|
|
#include "mvm.h"
|
|
|
|
#include "fw-api.h"
|
2023-03-20 12:33:04 +02:00
|
|
|
#include "time-sync.h"
|
2015-09-03 14:56:10 +02:00
|
|
|
|
2015-12-06 14:58:08 +02:00
|
|
|
static inline int iwl_mvm_check_pn(struct iwl_mvm *mvm, struct sk_buff *skb,
|
|
|
|
int queue, struct ieee80211_sta *sta)
|
|
|
|
{
|
|
|
|
struct iwl_mvm_sta *mvmsta;
|
2023-01-27 00:28:17 +02:00
|
|
|
struct ieee80211_hdr *hdr = (void *)skb_mac_header(skb);
|
2015-12-06 14:58:08 +02:00
|
|
|
struct ieee80211_rx_status *stats = IEEE80211_SKB_RXCB(skb);
|
|
|
|
struct iwl_mvm_key_pn *ptk_pn;
|
2016-03-29 10:56:57 +03:00
|
|
|
int res;
|
2015-12-06 14:58:08 +02:00
|
|
|
u8 tid, keyidx;
|
|
|
|
u8 pn[IEEE80211_CCMP_PN_LEN];
|
|
|
|
u8 *extiv;
|
|
|
|
|
|
|
|
/* do PN checking */
|
|
|
|
|
|
|
|
/* multicast and non-data only arrives on default queue */
|
|
|
|
if (!ieee80211_is_data(hdr->frame_control) ||
|
|
|
|
is_multicast_ether_addr(hdr->addr1))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
/* do not check PN for open AP */
|
|
|
|
if (!(stats->flag & RX_FLAG_DECRYPTED))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* avoid checking for default queue - we don't want to replicate
|
|
|
|
* all the logic that's necessary for checking the PN on fragmented
|
|
|
|
* frames, leave that to mac80211
|
|
|
|
*/
|
|
|
|
if (queue == 0)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
/* if we are here - this for sure is either CCMP or GCMP */
|
|
|
|
if (IS_ERR_OR_NULL(sta)) {
|
2021-08-05 13:19:29 +03:00
|
|
|
IWL_DEBUG_DROP(mvm,
|
|
|
|
"expected hw-decrypted unicast frame for station\n");
|
2015-12-06 14:58:08 +02:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
mvmsta = iwl_mvm_sta_from_mac80211(sta);
|
|
|
|
|
|
|
|
extiv = (u8 *)hdr + ieee80211_hdrlen(hdr->frame_control);
|
|
|
|
keyidx = extiv[3] >> 6;
|
|
|
|
|
|
|
|
ptk_pn = rcu_dereference(mvmsta->ptk_pn[keyidx]);
|
|
|
|
if (!ptk_pn)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
if (ieee80211_is_data_qos(hdr->frame_control))
|
2018-01-17 16:35:39 +02:00
|
|
|
tid = ieee80211_get_tid(hdr);
|
2015-12-06 14:58:08 +02:00
|
|
|
else
|
|
|
|
tid = 0;
|
|
|
|
|
|
|
|
/* we don't use HCCA/802.11 QoS TSPECs, so drop such frames */
|
|
|
|
if (tid >= IWL_MAX_TID_COUNT)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
/* load pn */
|
|
|
|
pn[0] = extiv[7];
|
|
|
|
pn[1] = extiv[6];
|
|
|
|
pn[2] = extiv[5];
|
|
|
|
pn[3] = extiv[4];
|
|
|
|
pn[4] = extiv[1];
|
|
|
|
pn[5] = extiv[0];
|
|
|
|
|
2016-03-29 10:56:57 +03:00
|
|
|
res = memcmp(pn, ptk_pn->q[queue].pn[tid], IEEE80211_CCMP_PN_LEN);
|
|
|
|
if (res < 0)
|
|
|
|
return -1;
|
|
|
|
if (!res && !(stats->flag & RX_FLAG_ALLOW_SAME_PN))
|
2015-12-06 14:58:08 +02:00
|
|
|
return -1;
|
|
|
|
|
2016-03-29 10:56:57 +03:00
|
|
|
memcpy(ptk_pn->q[queue].pn[tid], pn, IEEE80211_CCMP_PN_LEN);
|
2015-12-06 14:58:08 +02:00
|
|
|
stats->flag |= RX_FLAG_PN_VALIDATED;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* iwl_mvm_create_skb Adds the rxb to a new skb */
|
2019-04-16 12:57:21 +03:00
|
|
|
static int iwl_mvm_create_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
|
|
|
|
struct ieee80211_hdr *hdr, u16 len, u8 crypt_len,
|
|
|
|
struct iwl_rx_cmd_buffer *rxb)
|
2015-09-03 14:56:10 +02:00
|
|
|
{
|
2016-01-28 14:25:33 +02:00
|
|
|
struct iwl_rx_packet *pkt = rxb_addr(rxb);
|
|
|
|
struct iwl_rx_mpdu_desc *desc = (void *)pkt->data;
|
|
|
|
unsigned int headlen, fraglen, pad_len = 0;
|
|
|
|
unsigned int hdrlen = ieee80211_hdrlen(hdr->frame_control);
|
2021-12-19 12:18:16 +02:00
|
|
|
u8 mic_crc_len = u8_get_bits(desc->mac_flags1,
|
|
|
|
IWL_RX_MPDU_MFLG1_MIC_CRC_LEN_MASK) << 1;
|
2016-01-28 14:25:33 +02:00
|
|
|
|
2016-12-08 10:38:08 +01:00
|
|
|
if (desc->mac_flags2 & IWL_RX_MPDU_MFLG2_PAD) {
|
2018-02-20 16:32:36 +01:00
|
|
|
len -= 2;
|
2016-01-28 14:25:33 +02:00
|
|
|
pad_len = 2;
|
2016-12-08 10:38:08 +01:00
|
|
|
}
|
2015-09-03 14:56:10 +02:00
|
|
|
|
2021-12-19 12:18:16 +02:00
|
|
|
/*
|
|
|
|
* For non monitor interface strip the bytes the RADA might not have
|
wifi: iwlwifi: mvm: fix MIC removal confusion
The RADA/firmware collaborate on MIC stripping in the following
way:
- the firmware fills the IWL_RX_MPDU_MFLG1_MIC_CRC_LEN_MASK
value for how many words need to be removed at the end of
the frame, CRC and, if decryption was done, MIC
- if the RADA is active, it will
- remove that much from the end of the frame
- zero the value in IWL_RX_MPDU_MFLG1_MIC_CRC_LEN_MASK
As a consequence, the only thing the driver should need to do
is to
- unconditionally tell mac80211 that the MIC was removed
if decryption was already done
- remove as much as IWL_RX_MPDU_MFLG1_MIC_CRC_LEN_MASK says
at the end of the frame, since either RADA did it and then
the value is 0, or RADA was disabled and then the value is
whatever should be removed to strip both CRC & MIC
However, all this code was historically grown and getting a
bit confused. Originally, we were indicating that the MIC was
not stripped, which is the version of the code upstreamed in
commit 780e87c29e77 ("iwlwifi: mvm: add 9000 series RX processing")
which indicated RX_FLAG_DECRYPTED in iwl_mvm_rx_crypto().
We later had a commit to change that to also indicate that the
MIC was stripped, adding RX_FLAG_MIC_STRIPPED. However, this was
then "fixed" later to only do that conditionally on RADA being
enabled, since otherwise RADA didn't strip the MIC bytes yet.
At the time, we were also always including the FCS if the RADA
was not enabled, so that was still broken wrt. the FCS if the
RADA isn't enabled - but that's a pretty rare case. Notably
though, it does happen for management frames, where we do need
to remove the MIC and CRC but the RADA is disabled.
Later, in commit 40a0b38d7a7f ("iwlwifi: mvm: Fix calculation of
frame length"), we changed this again, upstream this was just a
single commit, but internally it was split into first the correct
commit and then an additional fix that reduced the number of bytes
that are removed by crypt_len. Note that this is clearly wrong
since crypt_len indicates the length of the PN header (always 8),
not the length of the MIC (8 or 16 depending on algorithm).
However, this additional fix mostly canceled the other bugs,
apart from the confusion about the size of the MIC.
To fix this correctly, remove all those additional workarounds.
We really should always indicate to mac80211 the MIC was stripped
(it cannot use it anyway if decryption was already done), and also
always actually remove it and the CRC regardless of the RADA being
enabled or not. That's simple though, the value indicated in the
metadata is zeroed by the RADA if it's enabled and used the value,
so there's no need to check if it's enabled or not.
Notably then, this fixes the MIC size confusion, letting us receive
GCMP-256 encrypted management frames correctly that would otherwise
be reported to mac80211 8 bytes too short since the RADA is turned
off for them, crypt_len is 8, but the MIC size is 16, so when we do
the adjustment based on IWL_RX_MPDU_MFLG1_MIC_CRC_LEN_MASK (which
indicates 20 bytes to remove) we remove 12 bytes but indicate then
to mac80211 the MIC is still present, so mac80211 again removes the
MIC of 16 bytes, for an overall removal of 28 rather than 20 bytes.
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Signed-off-by: Gregory Greenman <gregory.greenman@intel.com>
Link: https://lore.kernel.org/r/20230418122405.81345b6ab0cd.Ibe0348defb6cce11c99929a1f049e60b5cfc150c@changeid
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
2023-04-18 12:28:06 +03:00
|
|
|
* removed (it might be disabled, e.g. for mgmt frames). As a monitor
|
|
|
|
* interface cannot exist with other interfaces, this removal is safe
|
|
|
|
* and sufficient, in monitor mode there's no decryption being done.
|
2021-12-19 12:18:16 +02:00
|
|
|
*/
|
wifi: iwlwifi: mvm: fix MIC removal confusion
The RADA/firmware collaborate on MIC stripping in the following
way:
- the firmware fills the IWL_RX_MPDU_MFLG1_MIC_CRC_LEN_MASK
value for how many words need to be removed at the end of
the frame, CRC and, if decryption was done, MIC
- if the RADA is active, it will
- remove that much from the end of the frame
- zero the value in IWL_RX_MPDU_MFLG1_MIC_CRC_LEN_MASK
As a consequence, the only thing the driver should need to do
is to
- unconditionally tell mac80211 that the MIC was removed
if decryption was already done
- remove as much as IWL_RX_MPDU_MFLG1_MIC_CRC_LEN_MASK says
at the end of the frame, since either RADA did it and then
the value is 0, or RADA was disabled and then the value is
whatever should be removed to strip both CRC & MIC
However, all this code was historically grown and getting a
bit confused. Originally, we were indicating that the MIC was
not stripped, which is the version of the code upstreamed in
commit 780e87c29e77 ("iwlwifi: mvm: add 9000 series RX processing")
which indicated RX_FLAG_DECRYPTED in iwl_mvm_rx_crypto().
We later had a commit to change that to also indicate that the
MIC was stripped, adding RX_FLAG_MIC_STRIPPED. However, this was
then "fixed" later to only do that conditionally on RADA being
enabled, since otherwise RADA didn't strip the MIC bytes yet.
At the time, we were also always including the FCS if the RADA
was not enabled, so that was still broken wrt. the FCS if the
RADA isn't enabled - but that's a pretty rare case. Notably
though, it does happen for management frames, where we do need
to remove the MIC and CRC but the RADA is disabled.
Later, in commit 40a0b38d7a7f ("iwlwifi: mvm: Fix calculation of
frame length"), we changed this again, upstream this was just a
single commit, but internally it was split into first the correct
commit and then an additional fix that reduced the number of bytes
that are removed by crypt_len. Note that this is clearly wrong
since crypt_len indicates the length of the PN header (always 8),
not the length of the MIC (8 or 16 depending on algorithm).
However, this additional fix mostly canceled the other bugs,
apart from the confusion about the size of the MIC.
To fix this correctly, remove all those additional workarounds.
We really should always indicate to mac80211 the MIC was stripped
(it cannot use it anyway if decryption was already done), and also
always actually remove it and the CRC regardless of the RADA being
enabled or not. That's simple though, the value indicated in the
metadata is zeroed by the RADA if it's enabled and used the value,
so there's no need to check if it's enabled or not.
Notably then, this fixes the MIC size confusion, letting us receive
GCMP-256 encrypted management frames correctly that would otherwise
be reported to mac80211 8 bytes too short since the RADA is turned
off for them, crypt_len is 8, but the MIC size is 16, so when we do
the adjustment based on IWL_RX_MPDU_MFLG1_MIC_CRC_LEN_MASK (which
indicates 20 bytes to remove) we remove 12 bytes but indicate then
to mac80211 the MIC is still present, so mac80211 again removes the
MIC of 16 bytes, for an overall removal of 28 rather than 20 bytes.
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Signed-off-by: Gregory Greenman <gregory.greenman@intel.com>
Link: https://lore.kernel.org/r/20230418122405.81345b6ab0cd.Ibe0348defb6cce11c99929a1f049e60b5cfc150c@changeid
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
2023-04-18 12:28:06 +03:00
|
|
|
if (len > mic_crc_len && !ieee80211_hw_check(mvm->hw, RX_INCLUDES_FCS))
|
2021-12-19 12:18:16 +02:00
|
|
|
len -= mic_crc_len;
|
|
|
|
|
2015-09-03 14:56:10 +02:00
|
|
|
/* If frame is small enough to fit in skb->head, pull it completely.
|
|
|
|
* If not, only pull ieee80211_hdr (including crypto if present, and
|
|
|
|
* an additional 8 bytes for SNAP/ethertype, see below) so that
|
|
|
|
* splice() or TCP coalesce are more efficient.
|
|
|
|
*
|
|
|
|
* Since, in addition, ieee80211_data_to_8023() always pull in at
|
|
|
|
* least 8 bytes (possibly more for mesh) we can do the same here
|
|
|
|
* to save the cost of doing it later. That still doesn't pull in
|
|
|
|
* the actual IP header since the typical case has a SNAP header.
|
|
|
|
* If the latter changes (there are efforts in the standards group
|
|
|
|
* to do so) we should revisit this and ieee80211_data_to_8023().
|
|
|
|
*/
|
2016-01-28 14:25:33 +02:00
|
|
|
headlen = (len <= skb_tailroom(skb)) ? len :
|
|
|
|
hdrlen + crypt_len + 8;
|
2015-09-03 14:56:10 +02:00
|
|
|
|
2016-01-28 14:25:33 +02:00
|
|
|
/* The firmware may align the packet to DWORD.
|
|
|
|
* The padding is inserted after the IV.
|
|
|
|
* After copying the header + IV skip the padding if
|
|
|
|
* present before copying packet data.
|
|
|
|
*/
|
|
|
|
hdrlen += crypt_len;
|
2019-04-16 12:57:21 +03:00
|
|
|
|
2021-12-19 13:28:32 +02:00
|
|
|
if (unlikely(headlen < hdrlen))
|
2019-04-16 12:57:21 +03:00
|
|
|
return -EINVAL;
|
|
|
|
|
2023-01-27 00:28:17 +02:00
|
|
|
/* Since data doesn't move data while putting data on skb and that is
|
|
|
|
* the only way we use, data + len is the next place that hdr would be put
|
|
|
|
*/
|
|
|
|
skb_set_mac_header(skb, skb->len);
|
networking: introduce and use skb_put_data()
A common pattern with skb_put() is to just want to memcpy()
some data into the new space, introduce skb_put_data() for
this.
An spatch similar to the one for skb_put_zero() converts many
of the places using it:
@@
identifier p, p2;
expression len, skb, data;
type t, t2;
@@
(
-p = skb_put(skb, len);
+p = skb_put_data(skb, data, len);
|
-p = (t)skb_put(skb, len);
+p = skb_put_data(skb, data, len);
)
(
p2 = (t2)p;
-memcpy(p2, data, len);
|
-memcpy(p, data, len);
)
@@
type t, t2;
identifier p, p2;
expression skb, data;
@@
t *p;
...
(
-p = skb_put(skb, sizeof(t));
+p = skb_put_data(skb, data, sizeof(t));
|
-p = (t *)skb_put(skb, sizeof(t));
+p = skb_put_data(skb, data, sizeof(t));
)
(
p2 = (t2)p;
-memcpy(p2, data, sizeof(*p));
|
-memcpy(p, data, sizeof(*p));
)
@@
expression skb, len, data;
@@
-memcpy(skb_put(skb, len), data, len);
+skb_put_data(skb, data, len);
(again, manually post-processed to retain some comments)
Reviewed-by: Stephen Hemminger <stephen@networkplumber.org>
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-06-16 14:29:20 +02:00
|
|
|
skb_put_data(skb, hdr, hdrlen);
|
|
|
|
skb_put_data(skb, (u8 *)hdr + hdrlen + pad_len, headlen - hdrlen);
|
2016-01-28 14:25:33 +02:00
|
|
|
|
2020-09-26 00:30:50 +03:00
|
|
|
/*
|
|
|
|
* If we did CHECKSUM_COMPLETE, the hardware only does it right for
|
|
|
|
* certain cases and starts the checksum after the SNAP. Check if
|
|
|
|
* this is the case - it's easier to just bail out to CHECKSUM_NONE
|
|
|
|
* in the cases the hardware didn't handle, since it's rare to see
|
|
|
|
* such packets, even though the hardware did calculate the checksum
|
|
|
|
* in this case, just starting after the MAC header instead.
|
2021-12-19 12:18:10 +02:00
|
|
|
*
|
|
|
|
* Starting from Bz hardware, it calculates starting directly after
|
|
|
|
* the MAC header, so that matches mac80211's expectation.
|
2020-09-26 00:30:50 +03:00
|
|
|
*/
|
2023-04-13 10:44:15 +03:00
|
|
|
if (skb->ip_summed == CHECKSUM_COMPLETE) {
|
2020-09-26 00:30:50 +03:00
|
|
|
struct {
|
|
|
|
u8 hdr[6];
|
|
|
|
__be16 type;
|
|
|
|
} __packed *shdr = (void *)((u8 *)hdr + hdrlen + pad_len);
|
|
|
|
|
|
|
|
if (unlikely(headlen - hdrlen < sizeof(*shdr) ||
|
|
|
|
!ether_addr_equal(shdr->hdr, rfc1042_header) ||
|
|
|
|
(shdr->type != htons(ETH_P_IP) &&
|
|
|
|
shdr->type != htons(ETH_P_ARP) &&
|
|
|
|
shdr->type != htons(ETH_P_IPV6) &&
|
|
|
|
shdr->type != htons(ETH_P_8021Q) &&
|
|
|
|
shdr->type != htons(ETH_P_PAE) &&
|
|
|
|
shdr->type != htons(ETH_P_TDLS))))
|
|
|
|
skb->ip_summed = CHECKSUM_NONE;
|
2023-04-13 10:44:15 +03:00
|
|
|
else if (mvm->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_BZ)
|
cfg80211/mac80211: assume CHECKSUM_COMPLETE includes SNAP
There's currently only one driver that reports CHECKSUM_COMPLETE,
that is iwlwifi. The current hardware there calculates checksum
after the SNAP header, but only RFC 1042 (and some other cases,
but replicating the exact hardware logic for corner cases in the
driver seemed awkward.)
Newer generations of hardware will checksum _including_ the SNAP,
which makes things easier.
To handle that, simply always assume the checksum _includes_ the
SNAP header, which this patch does, requiring to first add it
for older iwlwifi hardware, and then remove it again later on
conversion.
Alternatively, we could have:
1) Always assumed the checksum starts _after_ the SNAP header;
the problem with this is that we'd have to replace the exact
"what is the SNAP" check in iwlwifi that cfg80211 has.
2) Made it configurable with some flag, but that seemed like too
much complexity.
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Signed-off-by: Luca Coelho <luciano.coelho@intel.com>
Link: https://lore.kernel.org/r/iwlwifi.20220202104617.230736e19e0e.I3e6745873585ad943c152fab9e23b5221f17a95f@changeid
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
2022-02-02 10:49:36 +02:00
|
|
|
/* mac80211 assumes full CSUM including SNAP header */
|
|
|
|
skb_postpush_rcsum(skb, shdr, sizeof(*shdr));
|
2020-09-26 00:30:50 +03:00
|
|
|
}
|
|
|
|
|
2016-01-28 14:25:33 +02:00
|
|
|
fraglen = len - headlen;
|
2015-09-03 14:56:10 +02:00
|
|
|
|
|
|
|
if (fraglen) {
|
2022-01-28 15:34:29 +02:00
|
|
|
int offset = (u8 *)hdr + headlen + pad_len -
|
|
|
|
(u8 *)rxb_addr(rxb) + rxb_offset(rxb);
|
2015-09-03 14:56:10 +02:00
|
|
|
|
|
|
|
skb_add_rx_frag(skb, 0, rxb_steal_page(rxb), offset,
|
|
|
|
fraglen, rxb->truesize);
|
|
|
|
}
|
2019-04-16 12:57:21 +03:00
|
|
|
|
|
|
|
return 0;
|
2015-12-06 14:58:08 +02:00
|
|
|
}
|
2015-09-03 14:56:10 +02:00
|
|
|
|
2023-03-05 14:16:19 +02:00
|
|
|
/* put a TLV on the skb and return data pointer
|
|
|
|
*
|
|
|
|
* Also pad to 4 the len and zero out all data part
|
|
|
|
*/
|
|
|
|
static void *
|
|
|
|
iwl_mvm_radiotap_put_tlv(struct sk_buff *skb, u16 type, u16 len)
|
|
|
|
{
|
|
|
|
struct ieee80211_radiotap_tlv *tlv;
|
|
|
|
|
|
|
|
tlv = skb_put(skb, sizeof(*tlv));
|
|
|
|
tlv->type = cpu_to_le16(type);
|
|
|
|
tlv->len = cpu_to_le16(len);
|
|
|
|
return skb_put_zero(skb, ALIGN(len, 4));
|
|
|
|
}
|
|
|
|
|
2018-11-20 17:58:46 +01:00
|
|
|
static void iwl_mvm_add_rtap_sniffer_config(struct iwl_mvm *mvm,
|
|
|
|
struct sk_buff *skb)
|
|
|
|
{
|
|
|
|
struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb);
|
2023-03-05 14:16:19 +02:00
|
|
|
struct ieee80211_radiotap_vendor_content *radiotap;
|
2023-03-01 12:09:35 +02:00
|
|
|
const u16 vendor_data_len = sizeof(mvm->cur_aid);
|
2018-11-20 17:58:46 +01:00
|
|
|
|
|
|
|
if (!mvm->cur_aid)
|
|
|
|
return;
|
|
|
|
|
2023-03-05 14:16:19 +02:00
|
|
|
radiotap = iwl_mvm_radiotap_put_tlv(skb,
|
|
|
|
IEEE80211_RADIOTAP_VENDOR_NAMESPACE,
|
|
|
|
sizeof(*radiotap) + vendor_data_len);
|
2018-12-10 10:36:29 +01:00
|
|
|
|
2018-11-20 17:58:46 +01:00
|
|
|
/* Intel OUI */
|
2023-03-05 14:16:19 +02:00
|
|
|
radiotap->oui[0] = 0xf6;
|
|
|
|
radiotap->oui[1] = 0x54;
|
|
|
|
radiotap->oui[2] = 0x25;
|
2018-11-20 17:58:46 +01:00
|
|
|
/* radiotap sniffer config sub-namespace */
|
2023-03-05 14:16:19 +02:00
|
|
|
radiotap->oui_subtype = 1;
|
|
|
|
radiotap->vendor_type = 0;
|
|
|
|
|
2018-11-20 17:58:46 +01:00
|
|
|
/* fill the data now */
|
2023-03-05 14:16:19 +02:00
|
|
|
memcpy(radiotap->data, &mvm->cur_aid, sizeof(mvm->cur_aid));
|
2018-11-20 17:58:46 +01:00
|
|
|
|
2023-03-01 12:09:35 +02:00
|
|
|
rx_status->flag |= RX_FLAG_RADIOTAP_TLV_AT_END;
|
2018-11-20 17:58:46 +01:00
|
|
|
}
|
|
|
|
|
2015-12-06 14:58:08 +02:00
|
|
|
/* iwl_mvm_pass_packet_to_mac80211 - passes the packet for mac80211 */
|
|
|
|
static void iwl_mvm_pass_packet_to_mac80211(struct iwl_mvm *mvm,
|
|
|
|
struct napi_struct *napi,
|
|
|
|
struct sk_buff *skb, int queue,
|
2024-03-20 23:26:22 +02:00
|
|
|
struct ieee80211_sta *sta)
|
2015-12-06 14:58:08 +02:00
|
|
|
{
|
2023-03-29 10:05:21 +03:00
|
|
|
if (unlikely(iwl_mvm_check_pn(mvm, skb, queue, sta))) {
|
2015-12-06 14:58:08 +02:00
|
|
|
kfree_skb(skb);
|
2023-03-29 10:05:21 +03:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
ieee80211_rx_napi(mvm->hw, sta, skb, napi);
|
2015-09-03 14:56:10 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static void iwl_mvm_get_signal_strength(struct iwl_mvm *mvm,
|
2018-02-05 12:54:36 +02:00
|
|
|
struct ieee80211_rx_status *rx_status,
|
|
|
|
u32 rate_n_flags, int energy_a,
|
|
|
|
int energy_b)
|
2015-09-03 14:56:10 +02:00
|
|
|
{
|
2018-02-05 12:54:36 +02:00
|
|
|
int max_energy;
|
|
|
|
u32 rate_flags = rate_n_flags;
|
2015-09-03 14:56:10 +02:00
|
|
|
|
|
|
|
energy_a = energy_a ? -energy_a : S8_MIN;
|
|
|
|
energy_b = energy_b ? -energy_b : S8_MIN;
|
|
|
|
max_energy = max(energy_a, energy_b);
|
|
|
|
|
2016-01-26 12:35:13 +02:00
|
|
|
IWL_DEBUG_STATS(mvm, "energy In A %d B %d, and max %d\n",
|
|
|
|
energy_a, energy_b, max_energy);
|
2015-09-03 14:56:10 +02:00
|
|
|
|
|
|
|
rx_status->signal = max_energy;
|
2017-10-30 17:38:43 +02:00
|
|
|
rx_status->chains =
|
|
|
|
(rate_flags & RATE_MCS_ANT_AB_MSK) >> RATE_MCS_ANT_POS;
|
2015-09-03 14:56:10 +02:00
|
|
|
rx_status->chain_signal[0] = energy_a;
|
|
|
|
rx_status->chain_signal[1] = energy_b;
|
|
|
|
}
|
|
|
|
|
2021-03-26 12:57:23 +02:00
|
|
|
static int iwl_mvm_rx_mgmt_prot(struct ieee80211_sta *sta,
|
|
|
|
struct ieee80211_hdr *hdr,
|
|
|
|
struct iwl_rx_mpdu_desc *desc,
|
2023-06-20 13:04:01 +03:00
|
|
|
u32 status,
|
|
|
|
struct ieee80211_rx_status *stats)
|
2021-02-05 11:06:31 +02:00
|
|
|
{
|
2024-01-28 08:53:48 +02:00
|
|
|
struct wireless_dev *wdev;
|
2021-02-05 11:06:31 +02:00
|
|
|
struct iwl_mvm_sta *mvmsta;
|
|
|
|
struct iwl_mvm_vif *mvmvif;
|
|
|
|
u8 keyid;
|
|
|
|
struct ieee80211_key_conf *key;
|
|
|
|
u32 len = le16_to_cpu(desc->mpdu_len);
|
|
|
|
const u8 *frame = (void *)hdr;
|
|
|
|
|
2021-03-26 12:57:23 +02:00
|
|
|
if ((status & IWL_RX_MPDU_STATUS_SEC_MASK) == IWL_RX_MPDU_STATUS_SEC_NONE)
|
|
|
|
return 0;
|
|
|
|
|
2021-02-05 11:06:31 +02:00
|
|
|
/*
|
|
|
|
* For non-beacon, we don't really care. But beacons may
|
|
|
|
* be filtered out, and we thus need the firmware's replay
|
|
|
|
* detection, otherwise beacons the firmware previously
|
|
|
|
* filtered could be replayed, or something like that, and
|
|
|
|
* it can filter a lot - though usually only if nothing has
|
|
|
|
* changed.
|
|
|
|
*/
|
|
|
|
if (!ieee80211_is_beacon(hdr->frame_control))
|
|
|
|
return 0;
|
|
|
|
|
2024-01-28 08:53:48 +02:00
|
|
|
if (!sta)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
mvmsta = iwl_mvm_sta_from_mac80211(sta);
|
|
|
|
mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif);
|
|
|
|
|
2021-08-26 22:47:43 +03:00
|
|
|
/* key mismatch - will also report !MIC_OK but we shouldn't count it */
|
|
|
|
if (!(status & IWL_RX_MPDU_STATUS_KEY_VALID))
|
2024-01-28 08:53:48 +02:00
|
|
|
goto report;
|
2021-08-26 22:47:43 +03:00
|
|
|
|
2021-02-05 11:06:31 +02:00
|
|
|
/* good cases */
|
|
|
|
if (likely(status & IWL_RX_MPDU_STATUS_MIC_OK &&
|
2023-06-20 13:04:01 +03:00
|
|
|
!(status & IWL_RX_MPDU_STATUS_REPLAY_ERROR))) {
|
|
|
|
stats->flag |= RX_FLAG_DECRYPTED;
|
2021-02-05 11:06:31 +02:00
|
|
|
return 0;
|
2023-06-20 13:04:01 +03:00
|
|
|
}
|
2021-02-05 11:06:31 +02:00
|
|
|
|
2021-08-26 22:47:43 +03:00
|
|
|
/*
|
|
|
|
* both keys will have the same cipher and MIC length, use
|
|
|
|
* whichever one is available
|
|
|
|
*/
|
|
|
|
key = rcu_dereference(mvmvif->bcn_prot.keys[0]);
|
|
|
|
if (!key) {
|
|
|
|
key = rcu_dereference(mvmvif->bcn_prot.keys[1]);
|
|
|
|
if (!key)
|
2024-01-28 08:53:48 +02:00
|
|
|
goto report;
|
2021-08-26 22:47:43 +03:00
|
|
|
}
|
2021-02-05 11:06:31 +02:00
|
|
|
|
|
|
|
if (len < key->icv_len + IEEE80211_GMAC_PN_LEN + 2)
|
2024-01-28 08:53:48 +02:00
|
|
|
goto report;
|
2021-02-05 11:06:31 +02:00
|
|
|
|
2021-08-26 22:47:43 +03:00
|
|
|
/* get the real key ID */
|
2021-02-05 11:06:31 +02:00
|
|
|
keyid = frame[len - key->icv_len - IEEE80211_GMAC_PN_LEN - 2];
|
2021-08-26 22:47:43 +03:00
|
|
|
/* and if that's the other key, look it up */
|
|
|
|
if (keyid != key->keyidx) {
|
|
|
|
/*
|
|
|
|
* shouldn't happen since firmware checked, but be safe
|
|
|
|
* in case the MIC length is wrong too, for example
|
|
|
|
*/
|
|
|
|
if (keyid != 6 && keyid != 7)
|
|
|
|
return -1;
|
|
|
|
key = rcu_dereference(mvmvif->bcn_prot.keys[keyid - 6]);
|
|
|
|
if (!key)
|
2024-01-28 08:53:48 +02:00
|
|
|
goto report;
|
2021-08-26 22:47:43 +03:00
|
|
|
}
|
2021-02-05 11:06:31 +02:00
|
|
|
|
|
|
|
/* Report status to mac80211 */
|
|
|
|
if (!(status & IWL_RX_MPDU_STATUS_MIC_OK))
|
|
|
|
ieee80211_key_mic_failure(key);
|
|
|
|
else if (status & IWL_RX_MPDU_STATUS_REPLAY_ERROR)
|
|
|
|
ieee80211_key_replay(key);
|
2024-01-28 08:53:48 +02:00
|
|
|
report:
|
|
|
|
wdev = ieee80211_vif_to_wdev(mvmsta->vif);
|
|
|
|
if (wdev->netdev)
|
|
|
|
cfg80211_rx_unprot_mlme_mgmt(wdev->netdev, (void *)hdr, len);
|
2021-02-05 11:06:31 +02:00
|
|
|
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int iwl_mvm_rx_crypto(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
|
|
|
|
struct ieee80211_hdr *hdr,
|
2018-03-26 13:07:03 +03:00
|
|
|
struct ieee80211_rx_status *stats, u16 phy_info,
|
|
|
|
struct iwl_rx_mpdu_desc *desc,
|
|
|
|
u32 pkt_flags, int queue, u8 *crypt_len)
|
2015-09-03 14:56:10 +02:00
|
|
|
{
|
2020-09-30 19:19:49 +03:00
|
|
|
u32 status = le32_to_cpu(desc->status);
|
2015-09-03 14:56:10 +02:00
|
|
|
|
2018-03-26 13:07:03 +03:00
|
|
|
/*
|
|
|
|
* Drop UNKNOWN frames in aggregation, unless in monitor mode
|
|
|
|
* (where we don't have the keys).
|
|
|
|
* We limit this to aggregation because in TKIP this is a valid
|
|
|
|
* scenario, since we may not have the (correct) TTAK (phase 1
|
|
|
|
* key) in the firmware.
|
|
|
|
*/
|
|
|
|
if (phy_info & IWL_RX_MPDU_PHY_AMPDU &&
|
|
|
|
(status & IWL_RX_MPDU_STATUS_SEC_MASK) ==
|
2023-09-13 14:56:42 +03:00
|
|
|
IWL_RX_MPDU_STATUS_SEC_UNKNOWN && !mvm->monitor_on) {
|
|
|
|
IWL_DEBUG_DROP(mvm, "Dropping packets, bad enc status\n");
|
2018-03-26 13:07:03 +03:00
|
|
|
return -1;
|
2023-09-13 14:56:42 +03:00
|
|
|
}
|
2018-03-26 13:07:03 +03:00
|
|
|
|
2021-03-26 12:57:23 +02:00
|
|
|
if (unlikely(ieee80211_is_mgmt(hdr->frame_control) &&
|
|
|
|
!ieee80211_has_protected(hdr->frame_control)))
|
2023-06-20 13:04:01 +03:00
|
|
|
return iwl_mvm_rx_mgmt_prot(sta, hdr, desc, status, stats);
|
2021-03-26 12:57:23 +02:00
|
|
|
|
2015-09-03 14:56:10 +02:00
|
|
|
if (!ieee80211_has_protected(hdr->frame_control) ||
|
|
|
|
(status & IWL_RX_MPDU_STATUS_SEC_MASK) ==
|
|
|
|
IWL_RX_MPDU_STATUS_SEC_NONE)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
/* TODO: handle packets encrypted with unknown alg */
|
|
|
|
|
|
|
|
switch (status & IWL_RX_MPDU_STATUS_SEC_MASK) {
|
|
|
|
case IWL_RX_MPDU_STATUS_SEC_CCM:
|
|
|
|
case IWL_RX_MPDU_STATUS_SEC_GCM:
|
2015-12-06 14:58:08 +02:00
|
|
|
BUILD_BUG_ON(IEEE80211_CCMP_PN_LEN != IEEE80211_GCMP_PN_LEN);
|
2015-09-03 14:56:10 +02:00
|
|
|
/* alg is CCM: check MIC only */
|
2024-01-29 21:22:02 +02:00
|
|
|
if (!(status & IWL_RX_MPDU_STATUS_MIC_OK)) {
|
|
|
|
IWL_DEBUG_DROP(mvm,
|
|
|
|
"Dropping packet, bad MIC (CCM/GCM)\n");
|
2015-09-03 14:56:10 +02:00
|
|
|
return -1;
|
2024-01-29 21:22:02 +02:00
|
|
|
}
|
2015-09-03 14:56:10 +02:00
|
|
|
|
wifi: iwlwifi: mvm: fix MIC removal confusion
The RADA/firmware collaborate on MIC stripping in the following
way:
- the firmware fills the IWL_RX_MPDU_MFLG1_MIC_CRC_LEN_MASK
value for how many words need to be removed at the end of
the frame, CRC and, if decryption was done, MIC
- if the RADA is active, it will
- remove that much from the end of the frame
- zero the value in IWL_RX_MPDU_MFLG1_MIC_CRC_LEN_MASK
As a consequence, the only thing the driver should need to do
is to
- unconditionally tell mac80211 that the MIC was removed
if decryption was already done
- remove as much as IWL_RX_MPDU_MFLG1_MIC_CRC_LEN_MASK says
at the end of the frame, since either RADA did it and then
the value is 0, or RADA was disabled and then the value is
whatever should be removed to strip both CRC & MIC
However, all this code was historically grown and getting a
bit confused. Originally, we were indicating that the MIC was
not stripped, which is the version of the code upstreamed in
commit 780e87c29e77 ("iwlwifi: mvm: add 9000 series RX processing")
which indicated RX_FLAG_DECRYPTED in iwl_mvm_rx_crypto().
We later had a commit to change that to also indicate that the
MIC was stripped, adding RX_FLAG_MIC_STRIPPED. However, this was
then "fixed" later to only do that conditionally on RADA being
enabled, since otherwise RADA didn't strip the MIC bytes yet.
At the time, we were also always including the FCS if the RADA
was not enabled, so that was still broken wrt. the FCS if the
RADA isn't enabled - but that's a pretty rare case. Notably
though, it does happen for management frames, where we do need
to remove the MIC and CRC but the RADA is disabled.
Later, in commit 40a0b38d7a7f ("iwlwifi: mvm: Fix calculation of
frame length"), we changed this again, upstream this was just a
single commit, but internally it was split into first the correct
commit and then an additional fix that reduced the number of bytes
that are removed by crypt_len. Note that this is clearly wrong
since crypt_len indicates the length of the PN header (always 8),
not the length of the MIC (8 or 16 depending on algorithm).
However, this additional fix mostly canceled the other bugs,
apart from the confusion about the size of the MIC.
To fix this correctly, remove all those additional workarounds.
We really should always indicate to mac80211 the MIC was stripped
(it cannot use it anyway if decryption was already done), and also
always actually remove it and the CRC regardless of the RADA being
enabled or not. That's simple though, the value indicated in the
metadata is zeroed by the RADA if it's enabled and used the value,
so there's no need to check if it's enabled or not.
Notably then, this fixes the MIC size confusion, letting us receive
GCMP-256 encrypted management frames correctly that would otherwise
be reported to mac80211 8 bytes too short since the RADA is turned
off for them, crypt_len is 8, but the MIC size is 16, so when we do
the adjustment based on IWL_RX_MPDU_MFLG1_MIC_CRC_LEN_MASK (which
indicates 20 bytes to remove) we remove 12 bytes but indicate then
to mac80211 the MIC is still present, so mac80211 again removes the
MIC of 16 bytes, for an overall removal of 28 rather than 20 bytes.
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Signed-off-by: Gregory Greenman <gregory.greenman@intel.com>
Link: https://lore.kernel.org/r/20230418122405.81345b6ab0cd.Ibe0348defb6cce11c99929a1f049e60b5cfc150c@changeid
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
2023-04-18 12:28:06 +03:00
|
|
|
stats->flag |= RX_FLAG_DECRYPTED | RX_FLAG_MIC_STRIPPED;
|
2015-09-03 14:56:10 +02:00
|
|
|
*crypt_len = IEEE80211_CCMP_HDR_LEN;
|
|
|
|
return 0;
|
|
|
|
case IWL_RX_MPDU_STATUS_SEC_TKIP:
|
|
|
|
/* Don't drop the frame and decrypt it in SW */
|
2017-12-06 13:57:19 +02:00
|
|
|
if (!fw_has_api(&mvm->fw->ucode_capa,
|
|
|
|
IWL_UCODE_TLV_API_DEPRECATE_TTAK) &&
|
|
|
|
!(status & IWL_RX_MPDU_RES_STATUS_TTAK_OK))
|
2015-09-03 14:56:10 +02:00
|
|
|
return 0;
|
|
|
|
|
2019-07-12 15:03:48 +03:00
|
|
|
if (mvm->trans->trans_cfg->gen2 &&
|
2017-11-07 11:59:57 +02:00
|
|
|
!(status & RX_MPDU_RES_STATUS_MIC_OK))
|
|
|
|
stats->flag |= RX_FLAG_MMIC_ERROR;
|
|
|
|
|
2015-09-03 14:56:10 +02:00
|
|
|
*crypt_len = IEEE80211_TKIP_IV_LEN;
|
2020-11-17 07:50:53 -06:00
|
|
|
fallthrough;
|
2015-09-03 14:56:10 +02:00
|
|
|
case IWL_RX_MPDU_STATUS_SEC_WEP:
|
|
|
|
if (!(status & IWL_RX_MPDU_STATUS_ICV_OK))
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
stats->flag |= RX_FLAG_DECRYPTED;
|
|
|
|
if ((status & IWL_RX_MPDU_STATUS_SEC_MASK) ==
|
|
|
|
IWL_RX_MPDU_STATUS_SEC_WEP)
|
|
|
|
*crypt_len = IEEE80211_WEP_IV_LEN;
|
2016-11-21 17:01:25 +02:00
|
|
|
|
2017-11-07 11:59:57 +02:00
|
|
|
if (pkt_flags & FH_RSCSR_RADA_EN) {
|
2016-11-21 17:01:25 +02:00
|
|
|
stats->flag |= RX_FLAG_ICV_STRIPPED;
|
2019-07-12 15:03:48 +03:00
|
|
|
if (mvm->trans->trans_cfg->gen2)
|
2017-11-07 11:59:57 +02:00
|
|
|
stats->flag |= RX_FLAG_MMIC_STRIPPED;
|
|
|
|
}
|
2016-11-21 17:01:25 +02:00
|
|
|
|
2015-09-03 14:56:10 +02:00
|
|
|
return 0;
|
|
|
|
case IWL_RX_MPDU_STATUS_SEC_EXT_ENC:
|
|
|
|
if (!(status & IWL_RX_MPDU_STATUS_MIC_OK))
|
|
|
|
return -1;
|
|
|
|
stats->flag |= RX_FLAG_DECRYPTED;
|
|
|
|
return 0;
|
2021-02-05 11:06:31 +02:00
|
|
|
case RX_MPDU_RES_STATUS_SEC_CMAC_GMAC_ENC:
|
2021-03-26 12:57:23 +02:00
|
|
|
break;
|
2015-09-03 14:56:10 +02:00
|
|
|
default:
|
2019-07-23 12:54:15 +03:00
|
|
|
/*
|
|
|
|
* Sometimes we can get frames that were not decrypted
|
|
|
|
* because the firmware didn't have the keys yet. This can
|
|
|
|
* happen after connection where we can get multicast frames
|
|
|
|
* before the GTK is installed.
|
|
|
|
* Silently drop those frames.
|
|
|
|
* Also drop un-decrypted frames in monitor mode.
|
|
|
|
*/
|
|
|
|
if (!is_multicast_ether_addr(hdr->addr1) &&
|
|
|
|
!mvm->monitor_on && net_ratelimit())
|
2023-03-05 14:16:18 +02:00
|
|
|
IWL_WARN(mvm, "Unhandled alg: 0x%x\n", status);
|
2015-09-03 14:56:10 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-09-26 00:30:50 +03:00
|
|
|
static void iwl_mvm_rx_csum(struct iwl_mvm *mvm,
|
|
|
|
struct ieee80211_sta *sta,
|
2015-09-03 14:56:10 +02:00
|
|
|
struct sk_buff *skb,
|
2020-09-26 00:30:50 +03:00
|
|
|
struct iwl_rx_packet *pkt)
|
2015-09-03 14:56:10 +02:00
|
|
|
{
|
2020-09-26 00:30:50 +03:00
|
|
|
struct iwl_rx_mpdu_desc *desc = (void *)pkt->data;
|
|
|
|
|
|
|
|
if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
|
|
|
|
if (pkt->len_n_flags & cpu_to_le32(FH_RSCSR_RPA_EN)) {
|
|
|
|
u16 hwsum = be16_to_cpu(desc->v3.raw_xsum);
|
|
|
|
|
|
|
|
skb->ip_summed = CHECKSUM_COMPLETE;
|
|
|
|
skb->csum = csum_unfold(~(__force __sum16)hwsum);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
|
|
|
|
struct iwl_mvm_vif *mvmvif;
|
|
|
|
u16 flags = le16_to_cpu(desc->l3l4_flags);
|
|
|
|
u8 l3_prot = (u8)((flags & IWL_RX_L3L4_L3_PROTO_MASK) >>
|
|
|
|
IWL_RX_L3_PROTO_POS);
|
|
|
|
|
|
|
|
mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif);
|
|
|
|
|
|
|
|
if (mvmvif->features & NETIF_F_RXCSUM &&
|
|
|
|
flags & IWL_RX_L3L4_TCP_UDP_CSUM_OK &&
|
|
|
|
(flags & IWL_RX_L3L4_IP_HDR_CSUM_OK ||
|
|
|
|
l3_prot == IWL_RX_L3_TYPE_IPV6 ||
|
|
|
|
l3_prot == IWL_RX_L3_TYPE_IPV6_FRAG))
|
|
|
|
skb->ip_summed = CHECKSUM_UNNECESSARY;
|
|
|
|
}
|
2015-09-03 14:56:10 +02:00
|
|
|
}
|
|
|
|
|
2015-12-07 12:50:58 +02:00
|
|
|
/*
|
2023-06-14 12:41:35 +03:00
|
|
|
* returns true if a packet is a duplicate or invalid tid and should be dropped.
|
2016-03-29 10:56:57 +03:00
|
|
|
* Updates AMSDU PN tracking info
|
2015-12-07 12:50:58 +02:00
|
|
|
*/
|
2016-03-29 10:56:57 +03:00
|
|
|
static bool iwl_mvm_is_dup(struct ieee80211_sta *sta, int queue,
|
|
|
|
struct ieee80211_rx_status *rx_status,
|
|
|
|
struct ieee80211_hdr *hdr,
|
|
|
|
struct iwl_rx_mpdu_desc *desc)
|
2015-12-07 12:50:58 +02:00
|
|
|
{
|
|
|
|
struct iwl_mvm_sta *mvm_sta;
|
|
|
|
struct iwl_mvm_rxq_dup_data *dup_data;
|
2016-03-29 10:56:57 +03:00
|
|
|
u8 tid, sub_frame_idx;
|
2015-12-07 12:50:58 +02:00
|
|
|
|
|
|
|
if (WARN_ON(IS_ERR_OR_NULL(sta)))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
mvm_sta = iwl_mvm_sta_from_mac80211(sta);
|
2024-02-06 18:02:04 +02:00
|
|
|
|
|
|
|
if (WARN_ON_ONCE(!mvm_sta->dup_data))
|
|
|
|
return false;
|
|
|
|
|
2015-12-07 12:50:58 +02:00
|
|
|
dup_data = &mvm_sta->dup_data[queue];
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Drop duplicate 802.11 retransmissions
|
|
|
|
* (IEEE 802.11-2012: 9.3.2.10 "Duplicate detection and recovery")
|
|
|
|
*/
|
|
|
|
if (ieee80211_is_ctl(hdr->frame_control) ||
|
2024-02-05 21:21:15 +02:00
|
|
|
ieee80211_is_any_nullfunc(hdr->frame_control) ||
|
|
|
|
is_multicast_ether_addr(hdr->addr1))
|
2015-12-07 12:50:58 +02:00
|
|
|
return false;
|
|
|
|
|
2023-06-14 12:41:35 +03:00
|
|
|
if (ieee80211_is_data_qos(hdr->frame_control)) {
|
2015-12-07 12:50:58 +02:00
|
|
|
/* frame has qos control */
|
2018-01-17 16:35:39 +02:00
|
|
|
tid = ieee80211_get_tid(hdr);
|
2023-06-14 12:41:35 +03:00
|
|
|
if (tid >= IWL_MAX_TID_COUNT)
|
|
|
|
return true;
|
|
|
|
} else {
|
2015-12-07 12:50:58 +02:00
|
|
|
tid = IWL_MAX_TID_COUNT;
|
2023-06-14 12:41:35 +03:00
|
|
|
}
|
2015-12-07 12:50:58 +02:00
|
|
|
|
|
|
|
/* If this wasn't a part of an A-MSDU the sub-frame index will be 0 */
|
2018-02-05 12:54:36 +02:00
|
|
|
sub_frame_idx = desc->amsdu_info &
|
|
|
|
IWL_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK;
|
2015-12-07 12:50:58 +02:00
|
|
|
|
|
|
|
if (unlikely(ieee80211_has_retry(hdr->frame_control) &&
|
|
|
|
dup_data->last_seq[tid] == hdr->seq_ctrl &&
|
|
|
|
dup_data->last_sub_frame[tid] >= sub_frame_idx))
|
|
|
|
return true;
|
|
|
|
|
2016-03-29 10:56:57 +03:00
|
|
|
/* Allow same PN as the first subframe for following sub frames */
|
|
|
|
if (dup_data->last_seq[tid] == hdr->seq_ctrl &&
|
|
|
|
sub_frame_idx > dup_data->last_sub_frame[tid] &&
|
|
|
|
desc->mac_flags2 & IWL_RX_MPDU_MFLG2_AMSDU)
|
|
|
|
rx_status->flag |= RX_FLAG_ALLOW_SAME_PN;
|
|
|
|
|
2015-12-07 12:50:58 +02:00
|
|
|
dup_data->last_seq[tid] = hdr->seq_ctrl;
|
|
|
|
dup_data->last_sub_frame[tid] = sub_frame_idx;
|
|
|
|
|
|
|
|
rx_status->flag |= RX_FLAG_DUP_VALIDATED;
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2016-03-23 16:32:02 +02:00
|
|
|
static void iwl_mvm_release_frames(struct iwl_mvm *mvm,
|
|
|
|
struct ieee80211_sta *sta,
|
|
|
|
struct napi_struct *napi,
|
2017-10-02 13:43:27 +02:00
|
|
|
struct iwl_mvm_baid_data *baid_data,
|
2016-03-23 16:32:02 +02:00
|
|
|
struct iwl_mvm_reorder_buffer *reorder_buf,
|
2023-10-17 12:16:47 +03:00
|
|
|
u16 nssn)
|
2016-03-23 16:32:02 +02:00
|
|
|
{
|
2017-09-26 12:24:51 +02:00
|
|
|
struct iwl_mvm_reorder_buf_entry *entries =
|
|
|
|
&baid_data->entries[reorder_buf->queue *
|
|
|
|
baid_data->entries_per_queue];
|
2016-03-23 16:32:02 +02:00
|
|
|
u16 ssn = reorder_buf->head_sn;
|
|
|
|
|
2016-02-28 20:28:17 +02:00
|
|
|
lockdep_assert_held(&reorder_buf->lock);
|
|
|
|
|
2023-10-17 12:16:47 +03:00
|
|
|
while (ieee80211_sn_less(ssn, nssn)) {
|
2016-03-23 16:32:02 +02:00
|
|
|
int index = ssn % reorder_buf->buf_size;
|
2023-10-17 12:16:47 +03:00
|
|
|
struct sk_buff_head *skb_list = &entries[index].frames;
|
2016-03-23 16:32:02 +02:00
|
|
|
struct sk_buff *skb;
|
|
|
|
|
|
|
|
ssn = ieee80211_sn_inc(ssn);
|
|
|
|
|
2016-08-03 14:08:00 +03:00
|
|
|
/*
|
|
|
|
* Empty the list. Will have more than one frame for A-MSDU.
|
|
|
|
* Empty list is valid as well since nssn indicates frames were
|
|
|
|
* received.
|
|
|
|
*/
|
2016-03-23 16:32:02 +02:00
|
|
|
while ((skb = __skb_dequeue(skb_list))) {
|
|
|
|
iwl_mvm_pass_packet_to_mac80211(mvm, napi, skb,
|
|
|
|
reorder_buf->queue,
|
2024-03-20 23:26:22 +02:00
|
|
|
sta);
|
2016-03-23 16:32:02 +02:00
|
|
|
reorder_buf->num_stored--;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
reorder_buf->head_sn = nssn;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void iwl_mvm_del_ba(struct iwl_mvm *mvm, int queue,
|
|
|
|
struct iwl_mvm_delba_data *data)
|
|
|
|
{
|
|
|
|
struct iwl_mvm_baid_data *ba_data;
|
|
|
|
struct ieee80211_sta *sta;
|
|
|
|
struct iwl_mvm_reorder_buffer *reorder_buf;
|
|
|
|
u8 baid = data->baid;
|
2023-04-16 15:47:34 +03:00
|
|
|
u32 sta_id;
|
2016-03-23 16:32:02 +02:00
|
|
|
|
2016-08-03 13:52:56 +02:00
|
|
|
if (WARN_ONCE(baid >= IWL_MAX_BAID, "invalid BAID: %x\n", baid))
|
2016-03-23 16:32:02 +02:00
|
|
|
return;
|
|
|
|
|
|
|
|
rcu_read_lock();
|
|
|
|
|
|
|
|
ba_data = rcu_dereference(mvm->baid_map[baid]);
|
|
|
|
if (WARN_ON_ONCE(!ba_data))
|
|
|
|
goto out;
|
|
|
|
|
2023-04-16 15:47:34 +03:00
|
|
|
/* pick any STA ID to find the pointer */
|
|
|
|
sta_id = ffs(ba_data->sta_mask) - 1;
|
|
|
|
sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
|
2016-03-23 16:32:02 +02:00
|
|
|
if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta)))
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
reorder_buf = &ba_data->reorder_buf[queue];
|
|
|
|
|
|
|
|
/* release all frames that are in the reorder buffer to the stack */
|
2016-02-28 20:28:17 +02:00
|
|
|
spin_lock_bh(&reorder_buf->lock);
|
2017-10-02 13:43:27 +02:00
|
|
|
iwl_mvm_release_frames(mvm, sta, NULL, ba_data, reorder_buf,
|
2016-03-23 16:32:02 +02:00
|
|
|
ieee80211_sn_add(reorder_buf->head_sn,
|
2023-10-17 12:16:47 +03:00
|
|
|
reorder_buf->buf_size));
|
2016-02-28 20:28:17 +02:00
|
|
|
spin_unlock_bh(&reorder_buf->lock);
|
2016-03-23 16:32:02 +02:00
|
|
|
|
|
|
|
out:
|
|
|
|
rcu_read_unlock();
|
|
|
|
}
|
|
|
|
|
2019-06-24 13:57:34 +03:00
|
|
|
static void iwl_mvm_release_frames_from_notif(struct iwl_mvm *mvm,
|
|
|
|
struct napi_struct *napi,
|
2023-10-17 12:16:47 +03:00
|
|
|
u8 baid, u16 nssn, int queue)
|
2019-06-24 13:57:34 +03:00
|
|
|
{
|
|
|
|
struct ieee80211_sta *sta;
|
|
|
|
struct iwl_mvm_reorder_buffer *reorder_buf;
|
|
|
|
struct iwl_mvm_baid_data *ba_data;
|
2023-04-16 15:47:34 +03:00
|
|
|
u32 sta_id;
|
2019-06-24 13:57:34 +03:00
|
|
|
|
|
|
|
IWL_DEBUG_HT(mvm, "Frame release notification for BAID %u, NSSN %d\n",
|
|
|
|
baid, nssn);
|
|
|
|
|
|
|
|
if (WARN_ON_ONCE(baid == IWL_RX_REORDER_DATA_INVALID_BAID ||
|
|
|
|
baid >= ARRAY_SIZE(mvm->baid_map)))
|
|
|
|
return;
|
|
|
|
|
|
|
|
rcu_read_lock();
|
|
|
|
|
|
|
|
ba_data = rcu_dereference(mvm->baid_map[baid]);
|
2024-02-05 21:21:10 +02:00
|
|
|
if (WARN(!ba_data, "BAID %d not found in map\n", baid))
|
2019-06-24 13:57:34 +03:00
|
|
|
goto out;
|
|
|
|
|
2023-04-16 15:47:34 +03:00
|
|
|
/* pick any STA ID to find the pointer */
|
|
|
|
sta_id = ffs(ba_data->sta_mask) - 1;
|
|
|
|
sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
|
2019-06-24 13:57:34 +03:00
|
|
|
if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta)))
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
reorder_buf = &ba_data->reorder_buf[queue];
|
|
|
|
|
|
|
|
spin_lock_bh(&reorder_buf->lock);
|
iwlwifi: mvm: add a loose synchronization of the NSSN across Rx queues
In order to support MSI-X efficiently, we want to avoid
communication across Rx queues. Each Rx queue should have
all the data it needs to process a packet.
The reordering buffer is a challenge in the MSI-X world
since we can have a single BA session whose packets are
directed to different queues. This is why each queue has
its own reordering buffer. The hardware is able to hint
the driver whether we have a hole or not, which allows
the driver to know whether it can release a packet or not.
This indication is called NSSN. Roughly, if the packet's
SN is lower than the NSSN, we can release the packet to
the stack. The NSSN is the SN of the newest packet received
without any holes + 1.
This is working as long as we don't have packets that we
release because of a timeout. When that happens, we could
have taken the decision to release a packet after we have
been waiting for its predecessor for too long. If this
predecessor comes later, we have to drop it because we
can't release packets out of order. In that case, the
hardware will give us an indication that we can we release
the packet (SN < NSSN), but the packet still needs to be
dropped.
This is why we sometimes need to ignore the NSSN and we
track the head_sn in software.
Here is a specific example of this:
1) Rx queue 1 got packets: 480, 482, 483
2) We release 480 to to the stack and wait for 481
3) NSSN is now 481
4) The timeout expires
5) We release 482 and 483, NSSN is still 480
6) 481 arrives its NSSN is 484.
We need to drop 481 even if 481 < 484. This is why we'll
update the head_sn to 484 at step 2. The flow now is:
1) Rx queue 1 got packets: 480, 482, 483
2) We release 480 to to the stack and wait for 481
3) NSSN is now 481 / head_sn is 481
4) The timeout expires
5) We release 482 and 483, NSSN is still 480 but head_sn is 484.
6) 481 arrives its NSSN is 484, but head_sn is 484 and we drop it.
This code introduces another problem in case all the traffic
goes well (no hole, no timeout):
Rx queue 1: 0 -> 483 (head_sn = 484)
Rx queue 2: 501 -> 4095 (head_sn = 0)
Rx queue 2: 0 -> 480 (head_sn = 481)
Rx queue 1: 481 but head_sn = 484 and we drop it.
At this point, the SN of queue 1 is far behind: more than
4040 packets behind. Queue 1 will consider 481 "old"
because 481 is in [501-64:501] whereas it is a very new
packet.
In order to fix that, send an Rx notification from time to
time (twice across the full set of 4096 packets) to make
sure no Rx queue is lagging too far behind.
What will happen then is:
Rx queue 1: 0 -> 483 (head_sn = 484)
Rx queue 2: 501 -> 2047 (head_sn = 2048)
Rx queue 1: Sync nofication (head_sn = 2048)
Rx queue 2: 2048 -> 4095 (head_sn = 0)
Rx queue 1: Sync notification (head_sn = 0)
Rx queue 2: 1 -> 481 (head_sn = 482)
Rx queue 1: 481 and head_sn = 0.
In queue 1's data, head_sn is now 0, the packet coming in
is 481, it'll understand that the new packet is new and it
won't be dropped.
Signed-off-by: Emmanuel Grumbach <emmanuel.grumbach@intel.com>
Signed-off-by: Luca Coelho <luciano.coelho@intel.com>
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
2019-06-24 16:26:24 +03:00
|
|
|
iwl_mvm_release_frames(mvm, sta, napi, ba_data,
|
2023-10-17 12:16:47 +03:00
|
|
|
reorder_buf, nssn);
|
2019-06-24 13:57:34 +03:00
|
|
|
spin_unlock_bh(&reorder_buf->lock);
|
|
|
|
|
|
|
|
out:
|
|
|
|
rcu_read_unlock();
|
|
|
|
}
|
|
|
|
|
|
|
|
void iwl_mvm_rx_queue_notif(struct iwl_mvm *mvm, struct napi_struct *napi,
|
|
|
|
struct iwl_rx_cmd_buffer *rxb, int queue)
|
2015-12-16 18:48:28 +02:00
|
|
|
{
|
|
|
|
struct iwl_rx_packet *pkt = rxb_addr(rxb);
|
|
|
|
struct iwl_rxq_sync_notification *notif;
|
|
|
|
struct iwl_mvm_internal_rxq_notif *internal_notif;
|
2020-12-09 23:16:40 +02:00
|
|
|
u32 len = iwl_rx_packet_payload_len(pkt);
|
2015-12-16 18:48:28 +02:00
|
|
|
|
|
|
|
notif = (void *)pkt->data;
|
|
|
|
internal_notif = (void *)notif->payload;
|
|
|
|
|
2020-12-09 23:16:40 +02:00
|
|
|
if (WARN_ONCE(len < sizeof(*notif) + sizeof(*internal_notif),
|
|
|
|
"invalid notification size %d (%d)",
|
|
|
|
len, (int)(sizeof(*notif) + sizeof(*internal_notif))))
|
|
|
|
return;
|
2021-03-31 12:14:42 +03:00
|
|
|
len -= sizeof(*notif) + sizeof(*internal_notif);
|
2020-12-09 23:16:40 +02:00
|
|
|
|
2024-02-05 21:21:01 +02:00
|
|
|
if (WARN_ONCE(internal_notif->sync &&
|
|
|
|
mvm->queue_sync_cookie != internal_notif->cookie,
|
|
|
|
"Received expired RX queue sync message (cookie %d but wanted %d, queue %d)\n",
|
|
|
|
internal_notif->cookie, mvm->queue_sync_cookie, queue))
|
2018-05-22 15:08:32 +02:00
|
|
|
return;
|
2016-03-23 16:31:43 +02:00
|
|
|
|
|
|
|
switch (internal_notif->type) {
|
|
|
|
case IWL_MVM_RXQ_EMPTY:
|
2021-03-31 12:14:42 +03:00
|
|
|
WARN_ONCE(len, "invalid empty notification size %d", len);
|
2016-02-18 14:21:12 +02:00
|
|
|
break;
|
2015-12-16 18:48:28 +02:00
|
|
|
case IWL_MVM_RXQ_NOTIF_DEL_BA:
|
2021-03-31 12:14:42 +03:00
|
|
|
if (WARN_ONCE(len != sizeof(struct iwl_mvm_delba_data),
|
2020-12-09 23:16:40 +02:00
|
|
|
"invalid delba notification size %d (%d)",
|
2021-03-31 12:14:42 +03:00
|
|
|
len, (int)sizeof(struct iwl_mvm_delba_data)))
|
2020-12-09 23:16:40 +02:00
|
|
|
break;
|
2016-03-23 16:32:02 +02:00
|
|
|
iwl_mvm_del_ba(mvm, queue, (void *)internal_notif->data);
|
2015-12-16 18:48:28 +02:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
WARN_ONCE(1, "Invalid identifier %d", internal_notif->type);
|
|
|
|
}
|
2018-05-22 15:08:32 +02:00
|
|
|
|
2020-12-09 23:16:31 +02:00
|
|
|
if (internal_notif->sync) {
|
|
|
|
WARN_ONCE(!test_and_clear_bit(queue, &mvm->queue_sync_state),
|
|
|
|
"queue sync: queue %d responded a second time!\n",
|
|
|
|
queue);
|
|
|
|
if (READ_ONCE(mvm->queue_sync_state) == 0)
|
|
|
|
wake_up(&mvm->rx_sync_waitq);
|
|
|
|
}
|
2015-12-16 18:48:28 +02:00
|
|
|
}
|
|
|
|
|
2016-03-23 16:32:02 +02:00
|
|
|
/*
|
|
|
|
* Returns true if the MPDU was buffered\dropped, false if it should be passed
|
|
|
|
* to upper layer.
|
|
|
|
*/
|
|
|
|
static bool iwl_mvm_reorder(struct iwl_mvm *mvm,
|
|
|
|
struct napi_struct *napi,
|
|
|
|
int queue,
|
|
|
|
struct ieee80211_sta *sta,
|
|
|
|
struct sk_buff *skb,
|
|
|
|
struct iwl_rx_mpdu_desc *desc)
|
|
|
|
{
|
2023-01-27 00:28:17 +02:00
|
|
|
struct ieee80211_hdr *hdr = (void *)skb_mac_header(skb);
|
2016-03-23 16:32:02 +02:00
|
|
|
struct iwl_mvm_baid_data *baid_data;
|
|
|
|
struct iwl_mvm_reorder_buffer *buffer;
|
|
|
|
u32 reorder = le32_to_cpu(desc->reorder_data);
|
|
|
|
bool amsdu = desc->mac_flags2 & IWL_RX_MPDU_MFLG2_AMSDU;
|
2016-04-17 14:15:17 +03:00
|
|
|
bool last_subframe =
|
|
|
|
desc->amsdu_info & IWL_RX_MPDU_AMSDU_LAST_SUBFRAME;
|
2018-01-17 16:35:39 +02:00
|
|
|
u8 tid = ieee80211_get_tid(hdr);
|
2016-03-23 16:32:02 +02:00
|
|
|
u8 sub_frame_idx = desc->amsdu_info &
|
|
|
|
IWL_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK;
|
2017-09-26 12:24:51 +02:00
|
|
|
struct iwl_mvm_reorder_buf_entry *entries;
|
2023-04-16 15:47:34 +03:00
|
|
|
u32 sta_mask;
|
2016-03-23 16:32:02 +02:00
|
|
|
int index;
|
|
|
|
u16 nssn, sn;
|
|
|
|
u8 baid;
|
|
|
|
|
|
|
|
baid = (reorder & IWL_RX_MPDU_REORDER_BAID_MASK) >>
|
|
|
|
IWL_RX_MPDU_REORDER_BAID_SHIFT;
|
|
|
|
|
2023-10-17 12:16:46 +03:00
|
|
|
if (mvm->trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_9000)
|
|
|
|
return false;
|
|
|
|
|
2016-08-18 14:18:40 +02:00
|
|
|
/*
|
|
|
|
* This also covers the case of receiving a Block Ack Request
|
|
|
|
* outside a BA session; we'll pass it to mac80211 and that
|
|
|
|
* then sends a delBA action frame.
|
2018-11-20 17:14:06 +01:00
|
|
|
* This also covers pure monitor mode, in which case we won't
|
|
|
|
* have any BA sessions.
|
2016-08-18 14:18:40 +02:00
|
|
|
*/
|
2016-03-23 16:32:02 +02:00
|
|
|
if (baid == IWL_RX_REORDER_DATA_INVALID_BAID)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
/* no sta yet */
|
2017-09-28 11:11:51 +03:00
|
|
|
if (WARN_ONCE(IS_ERR_OR_NULL(sta),
|
|
|
|
"Got valid BAID without a valid station assigned\n"))
|
2016-03-23 16:32:02 +02:00
|
|
|
return false;
|
|
|
|
|
2016-08-08 13:07:01 +03:00
|
|
|
/* not a data packet or a bar */
|
|
|
|
if (!ieee80211_is_back_req(hdr->frame_control) &&
|
|
|
|
(!ieee80211_is_data_qos(hdr->frame_control) ||
|
|
|
|
is_multicast_ether_addr(hdr->addr1)))
|
2016-03-23 16:32:02 +02:00
|
|
|
return false;
|
|
|
|
|
|
|
|
if (unlikely(!ieee80211_is_data_present(hdr->frame_control)))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
baid_data = rcu_dereference(mvm->baid_map[baid]);
|
2017-02-02 12:51:39 +02:00
|
|
|
if (!baid_data) {
|
2017-07-27 15:34:12 +03:00
|
|
|
IWL_DEBUG_RX(mvm,
|
|
|
|
"Got valid BAID but no baid allocated, bypass the re-ordering buffer. Baid %d reorder 0x%x\n",
|
|
|
|
baid, reorder);
|
2016-03-23 16:32:02 +02:00
|
|
|
return false;
|
2017-02-02 12:51:39 +02:00
|
|
|
}
|
|
|
|
|
2023-04-16 15:47:34 +03:00
|
|
|
rcu_read_lock();
|
|
|
|
sta_mask = iwl_mvm_sta_fw_id_mask(mvm, sta, -1);
|
|
|
|
rcu_read_unlock();
|
|
|
|
|
2023-06-14 12:41:31 +03:00
|
|
|
if (IWL_FW_CHECK(mvm,
|
|
|
|
tid != baid_data->tid ||
|
|
|
|
!(sta_mask & baid_data->sta_mask),
|
|
|
|
"baid 0x%x is mapped to sta_mask:0x%x tid:%d, but was received for sta_mask:0x%x tid:%d\n",
|
|
|
|
baid, baid_data->sta_mask, baid_data->tid,
|
|
|
|
sta_mask, tid))
|
2016-03-23 16:32:02 +02:00
|
|
|
return false;
|
|
|
|
|
|
|
|
nssn = reorder & IWL_RX_MPDU_REORDER_NSSN_MASK;
|
|
|
|
sn = (reorder & IWL_RX_MPDU_REORDER_SN_MASK) >>
|
|
|
|
IWL_RX_MPDU_REORDER_SN_SHIFT;
|
|
|
|
|
|
|
|
buffer = &baid_data->reorder_buf[queue];
|
2017-09-26 12:24:51 +02:00
|
|
|
entries = &baid_data->entries[queue * baid_data->entries_per_queue];
|
2016-03-23 16:32:02 +02:00
|
|
|
|
2016-02-28 20:28:17 +02:00
|
|
|
spin_lock_bh(&buffer->lock);
|
|
|
|
|
2017-02-02 12:51:39 +02:00
|
|
|
if (!buffer->valid) {
|
|
|
|
if (reorder & IWL_RX_MPDU_REORDER_BA_OLD_SN) {
|
|
|
|
spin_unlock_bh(&buffer->lock);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
buffer->valid = true;
|
|
|
|
}
|
|
|
|
|
2023-10-17 12:16:47 +03:00
|
|
|
/* drop any duplicated packets */
|
|
|
|
if (desc->status & cpu_to_le32(IWL_RX_MPDU_STATUS_DUPLICATE))
|
2016-08-08 13:07:01 +03:00
|
|
|
goto drop;
|
2019-07-16 14:57:18 +02:00
|
|
|
|
2016-03-23 16:32:02 +02:00
|
|
|
/* drop any oudated packets */
|
2023-10-17 12:16:47 +03:00
|
|
|
if (reorder & IWL_RX_MPDU_REORDER_BA_OLD_SN)
|
2016-03-23 16:32:02 +02:00
|
|
|
goto drop;
|
|
|
|
|
|
|
|
/* release immediately if allowed by nssn and no stored frames */
|
|
|
|
if (!buffer->num_stored && ieee80211_sn_less(sn, nssn)) {
|
2023-10-17 12:16:47 +03:00
|
|
|
if (!amsdu || last_subframe)
|
2016-02-28 20:28:17 +02:00
|
|
|
buffer->head_sn = nssn;
|
2016-03-23 16:32:02 +02:00
|
|
|
/* No need to update AMSDU last SN - we are moving the head */
|
2016-02-28 20:28:17 +02:00
|
|
|
spin_unlock_bh(&buffer->lock);
|
2016-03-23 16:32:02 +02:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2017-10-17 12:06:52 +03:00
|
|
|
/*
|
|
|
|
* release immediately if there are no stored frames, and the sn is
|
|
|
|
* equal to the head.
|
|
|
|
* This can happen due to reorder timer, where NSSN is behind head_sn.
|
|
|
|
* When we released everything, and we got the next frame in the
|
|
|
|
* sequence, according to the NSSN we can't release immediately,
|
|
|
|
* while technically there is no hole and we can move forward.
|
|
|
|
*/
|
|
|
|
if (!buffer->num_stored && sn == buffer->head_sn) {
|
2023-10-17 12:16:47 +03:00
|
|
|
if (!amsdu || last_subframe)
|
2017-10-17 12:06:52 +03:00
|
|
|
buffer->head_sn = ieee80211_sn_inc(buffer->head_sn);
|
2023-10-17 12:16:47 +03:00
|
|
|
|
2017-10-17 12:06:52 +03:00
|
|
|
/* No need to update AMSDU last SN - we are moving the head */
|
|
|
|
spin_unlock_bh(&buffer->lock);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2016-03-23 16:32:02 +02:00
|
|
|
/* put in reorder buffer */
|
2023-10-17 12:16:47 +03:00
|
|
|
index = sn % buffer->buf_size;
|
|
|
|
__skb_queue_tail(&entries[index].frames, skb);
|
2016-03-23 16:32:02 +02:00
|
|
|
buffer->num_stored++;
|
2016-02-28 20:28:17 +02:00
|
|
|
|
2016-03-23 16:32:02 +02:00
|
|
|
if (amsdu) {
|
|
|
|
buffer->last_amsdu = sn;
|
|
|
|
buffer->last_sub_index = sub_frame_idx;
|
|
|
|
}
|
|
|
|
|
2016-04-17 14:15:17 +03:00
|
|
|
/*
|
|
|
|
* We cannot trust NSSN for AMSDU sub-frames that are not the last.
|
|
|
|
* The reason is that NSSN advances on the first sub-frame, and may
|
|
|
|
* cause the reorder buffer to advance before all the sub-frames arrive.
|
|
|
|
* Example: reorder buffer contains SN 0 & 2, and we receive AMSDU with
|
|
|
|
* SN 1. NSSN for first sub frame will be 3 with the result of driver
|
|
|
|
* releasing SN 0,1, 2. When sub-frame 1 arrives - reorder buffer is
|
|
|
|
* already ahead and it will be dropped.
|
|
|
|
* If the last sub-frame is not on this queue - we will get frame
|
|
|
|
* release notification with up to date NSSN.
|
|
|
|
*/
|
|
|
|
if (!amsdu || last_subframe)
|
iwlwifi: mvm: add a loose synchronization of the NSSN across Rx queues
In order to support MSI-X efficiently, we want to avoid
communication across Rx queues. Each Rx queue should have
all the data it needs to process a packet.
The reordering buffer is a challenge in the MSI-X world
since we can have a single BA session whose packets are
directed to different queues. This is why each queue has
its own reordering buffer. The hardware is able to hint
the driver whether we have a hole or not, which allows
the driver to know whether it can release a packet or not.
This indication is called NSSN. Roughly, if the packet's
SN is lower than the NSSN, we can release the packet to
the stack. The NSSN is the SN of the newest packet received
without any holes + 1.
This is working as long as we don't have packets that we
release because of a timeout. When that happens, we could
have taken the decision to release a packet after we have
been waiting for its predecessor for too long. If this
predecessor comes later, we have to drop it because we
can't release packets out of order. In that case, the
hardware will give us an indication that we can we release
the packet (SN < NSSN), but the packet still needs to be
dropped.
This is why we sometimes need to ignore the NSSN and we
track the head_sn in software.
Here is a specific example of this:
1) Rx queue 1 got packets: 480, 482, 483
2) We release 480 to to the stack and wait for 481
3) NSSN is now 481
4) The timeout expires
5) We release 482 and 483, NSSN is still 480
6) 481 arrives its NSSN is 484.
We need to drop 481 even if 481 < 484. This is why we'll
update the head_sn to 484 at step 2. The flow now is:
1) Rx queue 1 got packets: 480, 482, 483
2) We release 480 to to the stack and wait for 481
3) NSSN is now 481 / head_sn is 481
4) The timeout expires
5) We release 482 and 483, NSSN is still 480 but head_sn is 484.
6) 481 arrives its NSSN is 484, but head_sn is 484 and we drop it.
This code introduces another problem in case all the traffic
goes well (no hole, no timeout):
Rx queue 1: 0 -> 483 (head_sn = 484)
Rx queue 2: 501 -> 4095 (head_sn = 0)
Rx queue 2: 0 -> 480 (head_sn = 481)
Rx queue 1: 481 but head_sn = 484 and we drop it.
At this point, the SN of queue 1 is far behind: more than
4040 packets behind. Queue 1 will consider 481 "old"
because 481 is in [501-64:501] whereas it is a very new
packet.
In order to fix that, send an Rx notification from time to
time (twice across the full set of 4096 packets) to make
sure no Rx queue is lagging too far behind.
What will happen then is:
Rx queue 1: 0 -> 483 (head_sn = 484)
Rx queue 2: 501 -> 2047 (head_sn = 2048)
Rx queue 1: Sync nofication (head_sn = 2048)
Rx queue 2: 2048 -> 4095 (head_sn = 0)
Rx queue 1: Sync notification (head_sn = 0)
Rx queue 2: 1 -> 481 (head_sn = 482)
Rx queue 1: 481 and head_sn = 0.
In queue 1's data, head_sn is now 0, the packet coming in
is 481, it'll understand that the new packet is new and it
won't be dropped.
Signed-off-by: Emmanuel Grumbach <emmanuel.grumbach@intel.com>
Signed-off-by: Luca Coelho <luciano.coelho@intel.com>
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
2019-06-24 16:26:24 +03:00
|
|
|
iwl_mvm_release_frames(mvm, sta, napi, baid_data,
|
2023-10-17 12:16:47 +03:00
|
|
|
buffer, nssn);
|
2016-04-17 14:15:17 +03:00
|
|
|
|
2016-02-28 20:28:17 +02:00
|
|
|
spin_unlock_bh(&buffer->lock);
|
2016-03-23 16:32:02 +02:00
|
|
|
return true;
|
|
|
|
|
|
|
|
drop:
|
|
|
|
kfree_skb(skb);
|
2016-02-28 20:28:17 +02:00
|
|
|
spin_unlock_bh(&buffer->lock);
|
2016-03-23 16:32:02 +02:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2017-02-02 12:51:39 +02:00
|
|
|
static void iwl_mvm_agg_rx_received(struct iwl_mvm *mvm,
|
|
|
|
u32 reorder_data, u8 baid)
|
2016-03-20 16:23:41 +02:00
|
|
|
{
|
|
|
|
unsigned long now = jiffies;
|
|
|
|
unsigned long timeout;
|
|
|
|
struct iwl_mvm_baid_data *data;
|
|
|
|
|
|
|
|
rcu_read_lock();
|
|
|
|
|
|
|
|
data = rcu_dereference(mvm->baid_map[baid]);
|
2017-02-02 12:51:39 +02:00
|
|
|
if (!data) {
|
2017-07-27 15:34:12 +03:00
|
|
|
IWL_DEBUG_RX(mvm,
|
|
|
|
"Got valid BAID but no baid allocated, bypass the re-ordering buffer. Baid %d reorder 0x%x\n",
|
|
|
|
baid, reorder_data);
|
2016-03-20 16:23:41 +02:00
|
|
|
goto out;
|
2017-02-02 12:51:39 +02:00
|
|
|
}
|
2016-03-20 16:23:41 +02:00
|
|
|
|
|
|
|
if (!data->timeout)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
timeout = data->timeout;
|
|
|
|
/*
|
|
|
|
* Do not update last rx all the time to avoid cache bouncing
|
|
|
|
* between the rx queues.
|
|
|
|
* Update it every timeout. Worst case is the session will
|
|
|
|
* expire after ~ 2 * timeout, which doesn't matter that much.
|
|
|
|
*/
|
|
|
|
if (time_before(data->last_rx + TU_TO_JIFFIES(timeout), now))
|
|
|
|
/* Update is atomic */
|
|
|
|
data->last_rx = now;
|
|
|
|
|
|
|
|
out:
|
|
|
|
rcu_read_unlock();
|
|
|
|
}
|
|
|
|
|
2017-12-19 11:09:41 +02:00
|
|
|
static void iwl_mvm_flip_address(u8 *addr)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
u8 mac_addr[ETH_ALEN];
|
|
|
|
|
|
|
|
for (i = 0; i < ETH_ALEN; i++)
|
|
|
|
mac_addr[i] = addr[ETH_ALEN - i - 1];
|
|
|
|
ether_addr_copy(addr, mac_addr);
|
|
|
|
}
|
|
|
|
|
2018-07-02 14:35:26 +02:00
|
|
|
struct iwl_mvm_rx_phy_data {
|
2018-07-02 17:16:48 +02:00
|
|
|
enum iwl_rx_phy_info_type info_type;
|
2023-03-05 14:16:25 +02:00
|
|
|
__le32 d0, d1, d2, d3, eht_d4, d5;
|
2018-07-02 14:35:26 +02:00
|
|
|
__le16 d4;
|
2023-03-05 14:16:25 +02:00
|
|
|
bool with_data;
|
2023-03-05 14:16:35 +02:00
|
|
|
bool first_subframe;
|
2023-03-05 14:16:25 +02:00
|
|
|
__le32 rx_vec[4];
|
2022-09-06 16:42:07 +03:00
|
|
|
|
|
|
|
u32 rate_n_flags;
|
|
|
|
u32 gp2_on_air_rise;
|
|
|
|
u16 phy_info;
|
|
|
|
u8 energy_a, energy_b;
|
|
|
|
u8 channel;
|
2018-07-02 14:35:26 +02:00
|
|
|
};
|
|
|
|
|
|
|
|
static void iwl_mvm_decode_he_mu_ext(struct iwl_mvm *mvm,
|
|
|
|
struct iwl_mvm_rx_phy_data *phy_data,
|
|
|
|
struct ieee80211_radiotap_he_mu *he_mu)
|
2018-04-26 13:14:26 +02:00
|
|
|
{
|
2018-07-02 14:35:26 +02:00
|
|
|
u32 phy_data2 = le32_to_cpu(phy_data->d2);
|
|
|
|
u32 phy_data3 = le32_to_cpu(phy_data->d3);
|
|
|
|
u16 phy_data4 = le16_to_cpu(phy_data->d4);
|
2022-09-06 16:42:07 +03:00
|
|
|
u32 rate_n_flags = phy_data->rate_n_flags;
|
2018-04-26 13:14:26 +02:00
|
|
|
|
2018-07-02 14:35:26 +02:00
|
|
|
if (FIELD_GET(IWL_RX_PHY_DATA4_HE_MU_EXT_CH1_CRC_OK, phy_data4)) {
|
2018-04-26 13:14:26 +02:00
|
|
|
he_mu->flags1 |=
|
|
|
|
cpu_to_le16(IEEE80211_RADIOTAP_HE_MU_FLAGS1_CH1_RU_KNOWN |
|
|
|
|
IEEE80211_RADIOTAP_HE_MU_FLAGS1_CH1_CTR_26T_RU_KNOWN);
|
|
|
|
|
|
|
|
he_mu->flags1 |=
|
2018-07-02 14:35:26 +02:00
|
|
|
le16_encode_bits(FIELD_GET(IWL_RX_PHY_DATA4_HE_MU_EXT_CH1_CTR_RU,
|
|
|
|
phy_data4),
|
2018-04-26 13:14:26 +02:00
|
|
|
IEEE80211_RADIOTAP_HE_MU_FLAGS1_CH1_CTR_26T_RU);
|
|
|
|
|
2018-07-02 14:35:26 +02:00
|
|
|
he_mu->ru_ch1[0] = FIELD_GET(IWL_RX_PHY_DATA2_HE_MU_EXT_CH1_RU0,
|
|
|
|
phy_data2);
|
|
|
|
he_mu->ru_ch1[1] = FIELD_GET(IWL_RX_PHY_DATA3_HE_MU_EXT_CH1_RU1,
|
|
|
|
phy_data3);
|
|
|
|
he_mu->ru_ch1[2] = FIELD_GET(IWL_RX_PHY_DATA2_HE_MU_EXT_CH1_RU2,
|
|
|
|
phy_data2);
|
|
|
|
he_mu->ru_ch1[3] = FIELD_GET(IWL_RX_PHY_DATA3_HE_MU_EXT_CH1_RU3,
|
|
|
|
phy_data3);
|
2018-04-26 13:14:26 +02:00
|
|
|
}
|
|
|
|
|
2018-07-02 14:35:26 +02:00
|
|
|
if (FIELD_GET(IWL_RX_PHY_DATA4_HE_MU_EXT_CH2_CRC_OK, phy_data4) &&
|
2021-10-17 12:40:14 +03:00
|
|
|
(rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK_V1) != RATE_MCS_CHAN_WIDTH_20) {
|
2018-04-26 13:14:26 +02:00
|
|
|
he_mu->flags1 |=
|
|
|
|
cpu_to_le16(IEEE80211_RADIOTAP_HE_MU_FLAGS1_CH2_RU_KNOWN |
|
|
|
|
IEEE80211_RADIOTAP_HE_MU_FLAGS1_CH2_CTR_26T_RU_KNOWN);
|
|
|
|
|
|
|
|
he_mu->flags2 |=
|
2018-07-02 14:35:26 +02:00
|
|
|
le16_encode_bits(FIELD_GET(IWL_RX_PHY_DATA4_HE_MU_EXT_CH2_CTR_RU,
|
|
|
|
phy_data4),
|
2018-04-26 13:14:26 +02:00
|
|
|
IEEE80211_RADIOTAP_HE_MU_FLAGS2_CH2_CTR_26T_RU);
|
|
|
|
|
2018-07-02 14:35:26 +02:00
|
|
|
he_mu->ru_ch2[0] = FIELD_GET(IWL_RX_PHY_DATA2_HE_MU_EXT_CH2_RU0,
|
|
|
|
phy_data2);
|
|
|
|
he_mu->ru_ch2[1] = FIELD_GET(IWL_RX_PHY_DATA3_HE_MU_EXT_CH2_RU1,
|
|
|
|
phy_data3);
|
|
|
|
he_mu->ru_ch2[2] = FIELD_GET(IWL_RX_PHY_DATA2_HE_MU_EXT_CH2_RU2,
|
|
|
|
phy_data2);
|
|
|
|
he_mu->ru_ch2[3] = FIELD_GET(IWL_RX_PHY_DATA3_HE_MU_EXT_CH2_RU3,
|
|
|
|
phy_data3);
|
2018-04-26 13:14:26 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-06-14 14:48:27 +02:00
|
|
|
static void
|
2018-07-02 14:35:26 +02:00
|
|
|
iwl_mvm_decode_he_phy_ru_alloc(struct iwl_mvm_rx_phy_data *phy_data,
|
2018-06-14 14:48:27 +02:00
|
|
|
struct ieee80211_radiotap_he *he,
|
|
|
|
struct ieee80211_radiotap_he_mu *he_mu,
|
|
|
|
struct ieee80211_rx_status *rx_status)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* Unfortunately, we have to leave the mac80211 data
|
|
|
|
* incorrect for the case that we receive an HE-MU
|
|
|
|
* transmission and *don't* have the HE phy data (due
|
|
|
|
* to the bits being used for TSF). This shouldn't
|
|
|
|
* happen though as management frames where we need
|
|
|
|
* the TSF/timers are not be transmitted in HE-MU.
|
|
|
|
*/
|
2018-07-02 14:35:26 +02:00
|
|
|
u8 ru = le32_get_bits(phy_data->d1, IWL_RX_PHY_DATA1_HE_RU_ALLOC_MASK);
|
2022-09-06 16:42:07 +03:00
|
|
|
u32 rate_n_flags = phy_data->rate_n_flags;
|
2021-10-17 12:40:14 +03:00
|
|
|
u32 he_type = rate_n_flags & RATE_MCS_HE_TYPE_MSK_V1;
|
2018-06-14 14:48:27 +02:00
|
|
|
u8 offs = 0;
|
|
|
|
|
|
|
|
rx_status->bw = RATE_INFO_BW_HE_RU;
|
|
|
|
|
|
|
|
he->data1 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_BW_RU_ALLOC_KNOWN);
|
|
|
|
|
|
|
|
switch (ru) {
|
|
|
|
case 0 ... 36:
|
|
|
|
rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_26;
|
|
|
|
offs = ru;
|
|
|
|
break;
|
|
|
|
case 37 ... 52:
|
|
|
|
rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_52;
|
|
|
|
offs = ru - 37;
|
|
|
|
break;
|
|
|
|
case 53 ... 60:
|
|
|
|
rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_106;
|
|
|
|
offs = ru - 53;
|
|
|
|
break;
|
|
|
|
case 61 ... 64:
|
|
|
|
rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_242;
|
|
|
|
offs = ru - 61;
|
|
|
|
break;
|
|
|
|
case 65 ... 66:
|
|
|
|
rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_484;
|
|
|
|
offs = ru - 65;
|
|
|
|
break;
|
|
|
|
case 67:
|
|
|
|
rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_996;
|
|
|
|
break;
|
|
|
|
case 68:
|
|
|
|
rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_2x996;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
he->data2 |= le16_encode_bits(offs,
|
|
|
|
IEEE80211_RADIOTAP_HE_DATA2_RU_OFFSET);
|
|
|
|
he->data2 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_PRISEC_80_KNOWN |
|
|
|
|
IEEE80211_RADIOTAP_HE_DATA2_RU_OFFSET_KNOWN);
|
2018-07-02 14:35:26 +02:00
|
|
|
if (phy_data->d1 & cpu_to_le32(IWL_RX_PHY_DATA1_HE_RU_ALLOC_SEC80))
|
2018-06-14 14:48:27 +02:00
|
|
|
he->data2 |=
|
|
|
|
cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_PRISEC_80_SEC);
|
|
|
|
|
|
|
|
#define CHECK_BW(bw) \
|
|
|
|
BUILD_BUG_ON(IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW_ ## bw ## MHZ != \
|
2018-12-03 15:59:40 +01:00
|
|
|
RATE_MCS_CHAN_WIDTH_##bw >> RATE_MCS_CHAN_WIDTH_POS); \
|
|
|
|
BUILD_BUG_ON(IEEE80211_RADIOTAP_HE_DATA6_TB_PPDU_BW_ ## bw ## MHZ != \
|
2018-06-14 14:48:27 +02:00
|
|
|
RATE_MCS_CHAN_WIDTH_##bw >> RATE_MCS_CHAN_WIDTH_POS)
|
2018-12-03 15:59:40 +01:00
|
|
|
CHECK_BW(20);
|
|
|
|
CHECK_BW(40);
|
|
|
|
CHECK_BW(80);
|
|
|
|
CHECK_BW(160);
|
|
|
|
|
|
|
|
if (he_mu)
|
2018-06-14 14:48:27 +02:00
|
|
|
he_mu->flags2 |=
|
2021-10-17 12:40:14 +03:00
|
|
|
le16_encode_bits(FIELD_GET(RATE_MCS_CHAN_WIDTH_MSK_V1,
|
2018-06-14 14:48:27 +02:00
|
|
|
rate_n_flags),
|
|
|
|
IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW);
|
2021-10-17 12:40:14 +03:00
|
|
|
else if (he_type == RATE_MCS_HE_TYPE_TRIG_V1)
|
2018-12-03 15:59:40 +01:00
|
|
|
he->data6 |=
|
|
|
|
cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA6_TB_PPDU_BW_KNOWN) |
|
2021-10-17 12:40:14 +03:00
|
|
|
le16_encode_bits(FIELD_GET(RATE_MCS_CHAN_WIDTH_MSK_V1,
|
2018-12-03 15:59:40 +01:00
|
|
|
rate_n_flags),
|
|
|
|
IEEE80211_RADIOTAP_HE_DATA6_TB_PPDU_BW);
|
2018-06-14 14:48:27 +02:00
|
|
|
}
|
|
|
|
|
2018-06-14 14:36:22 +02:00
|
|
|
static void iwl_mvm_decode_he_phy_data(struct iwl_mvm *mvm,
|
2018-07-02 14:35:26 +02:00
|
|
|
struct iwl_mvm_rx_phy_data *phy_data,
|
2018-06-14 14:36:22 +02:00
|
|
|
struct ieee80211_radiotap_he *he,
|
|
|
|
struct ieee80211_radiotap_he_mu *he_mu,
|
2018-06-14 14:48:27 +02:00
|
|
|
struct ieee80211_rx_status *rx_status,
|
2022-09-06 16:42:07 +03:00
|
|
|
int queue)
|
2018-06-14 14:36:22 +02:00
|
|
|
{
|
2018-07-02 17:16:48 +02:00
|
|
|
switch (phy_data->info_type) {
|
2018-07-02 14:35:26 +02:00
|
|
|
case IWL_RX_PHY_INFO_TYPE_NONE:
|
|
|
|
case IWL_RX_PHY_INFO_TYPE_CCK:
|
|
|
|
case IWL_RX_PHY_INFO_TYPE_OFDM_LGCY:
|
|
|
|
case IWL_RX_PHY_INFO_TYPE_HT:
|
|
|
|
case IWL_RX_PHY_INFO_TYPE_VHT_SU:
|
|
|
|
case IWL_RX_PHY_INFO_TYPE_VHT_MU:
|
2023-01-27 00:28:16 +02:00
|
|
|
case IWL_RX_PHY_INFO_TYPE_EHT_MU:
|
|
|
|
case IWL_RX_PHY_INFO_TYPE_EHT_TB:
|
|
|
|
case IWL_RX_PHY_INFO_TYPE_EHT_MU_EXT:
|
|
|
|
case IWL_RX_PHY_INFO_TYPE_EHT_TB_EXT:
|
2018-07-02 14:35:26 +02:00
|
|
|
return;
|
|
|
|
case IWL_RX_PHY_INFO_TYPE_HE_TB_EXT:
|
|
|
|
he->data1 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_SPTL_REUSE_KNOWN |
|
|
|
|
IEEE80211_RADIOTAP_HE_DATA1_SPTL_REUSE2_KNOWN |
|
|
|
|
IEEE80211_RADIOTAP_HE_DATA1_SPTL_REUSE3_KNOWN |
|
|
|
|
IEEE80211_RADIOTAP_HE_DATA1_SPTL_REUSE4_KNOWN);
|
2019-01-16 19:49:24 -08:00
|
|
|
he->data4 |= le16_encode_bits(le32_get_bits(phy_data->d2,
|
2018-07-02 14:35:26 +02:00
|
|
|
IWL_RX_PHY_DATA2_HE_TB_EXT_SPTL_REUSE1),
|
|
|
|
IEEE80211_RADIOTAP_HE_DATA4_TB_SPTL_REUSE1);
|
2019-01-16 19:49:24 -08:00
|
|
|
he->data4 |= le16_encode_bits(le32_get_bits(phy_data->d2,
|
2018-07-02 14:35:26 +02:00
|
|
|
IWL_RX_PHY_DATA2_HE_TB_EXT_SPTL_REUSE2),
|
|
|
|
IEEE80211_RADIOTAP_HE_DATA4_TB_SPTL_REUSE2);
|
2019-01-16 19:49:24 -08:00
|
|
|
he->data4 |= le16_encode_bits(le32_get_bits(phy_data->d2,
|
2018-07-02 14:35:26 +02:00
|
|
|
IWL_RX_PHY_DATA2_HE_TB_EXT_SPTL_REUSE3),
|
|
|
|
IEEE80211_RADIOTAP_HE_DATA4_TB_SPTL_REUSE3);
|
2019-01-16 19:49:24 -08:00
|
|
|
he->data4 |= le16_encode_bits(le32_get_bits(phy_data->d2,
|
2018-07-02 14:35:26 +02:00
|
|
|
IWL_RX_PHY_DATA2_HE_TB_EXT_SPTL_REUSE4),
|
|
|
|
IEEE80211_RADIOTAP_HE_DATA4_TB_SPTL_REUSE4);
|
2020-11-17 07:50:53 -06:00
|
|
|
fallthrough;
|
2018-07-02 14:35:26 +02:00
|
|
|
case IWL_RX_PHY_INFO_TYPE_HE_SU:
|
|
|
|
case IWL_RX_PHY_INFO_TYPE_HE_MU:
|
|
|
|
case IWL_RX_PHY_INFO_TYPE_HE_MU_EXT:
|
|
|
|
case IWL_RX_PHY_INFO_TYPE_HE_TB:
|
|
|
|
/* HE common */
|
|
|
|
he->data1 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_LDPC_XSYMSEG_KNOWN |
|
|
|
|
IEEE80211_RADIOTAP_HE_DATA1_DOPPLER_KNOWN |
|
|
|
|
IEEE80211_RADIOTAP_HE_DATA1_BSS_COLOR_KNOWN);
|
|
|
|
he->data2 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_PRE_FEC_PAD_KNOWN |
|
|
|
|
IEEE80211_RADIOTAP_HE_DATA2_PE_DISAMBIG_KNOWN |
|
|
|
|
IEEE80211_RADIOTAP_HE_DATA2_TXOP_KNOWN |
|
|
|
|
IEEE80211_RADIOTAP_HE_DATA2_NUM_LTF_SYMS_KNOWN);
|
|
|
|
he->data3 |= le16_encode_bits(le32_get_bits(phy_data->d0,
|
|
|
|
IWL_RX_PHY_DATA0_HE_BSS_COLOR_MASK),
|
|
|
|
IEEE80211_RADIOTAP_HE_DATA3_BSS_COLOR);
|
2018-07-25 22:11:52 +03:00
|
|
|
if (phy_data->info_type != IWL_RX_PHY_INFO_TYPE_HE_TB &&
|
|
|
|
phy_data->info_type != IWL_RX_PHY_INFO_TYPE_HE_TB_EXT) {
|
|
|
|
he->data1 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_UL_DL_KNOWN);
|
|
|
|
he->data3 |= le16_encode_bits(le32_get_bits(phy_data->d0,
|
2018-07-02 14:35:26 +02:00
|
|
|
IWL_RX_PHY_DATA0_HE_UPLINK),
|
2018-07-25 22:11:52 +03:00
|
|
|
IEEE80211_RADIOTAP_HE_DATA3_UL_DL);
|
|
|
|
}
|
2018-07-02 14:35:26 +02:00
|
|
|
he->data3 |= le16_encode_bits(le32_get_bits(phy_data->d0,
|
|
|
|
IWL_RX_PHY_DATA0_HE_LDPC_EXT_SYM),
|
|
|
|
IEEE80211_RADIOTAP_HE_DATA3_LDPC_XSYMSEG);
|
|
|
|
he->data5 |= le16_encode_bits(le32_get_bits(phy_data->d0,
|
|
|
|
IWL_RX_PHY_DATA0_HE_PRE_FEC_PAD_MASK),
|
|
|
|
IEEE80211_RADIOTAP_HE_DATA5_PRE_FEC_PAD);
|
|
|
|
he->data5 |= le16_encode_bits(le32_get_bits(phy_data->d0,
|
|
|
|
IWL_RX_PHY_DATA0_HE_PE_DISAMBIG),
|
|
|
|
IEEE80211_RADIOTAP_HE_DATA5_PE_DISAMBIG);
|
|
|
|
he->data5 |= le16_encode_bits(le32_get_bits(phy_data->d1,
|
|
|
|
IWL_RX_PHY_DATA1_HE_LTF_NUM_MASK),
|
|
|
|
IEEE80211_RADIOTAP_HE_DATA5_NUM_LTF_SYMS);
|
|
|
|
he->data6 |= le16_encode_bits(le32_get_bits(phy_data->d0,
|
|
|
|
IWL_RX_PHY_DATA0_HE_TXOP_DUR_MASK),
|
|
|
|
IEEE80211_RADIOTAP_HE_DATA6_TXOP);
|
|
|
|
he->data6 |= le16_encode_bits(le32_get_bits(phy_data->d0,
|
|
|
|
IWL_RX_PHY_DATA0_HE_DOPPLER),
|
|
|
|
IEEE80211_RADIOTAP_HE_DATA6_DOPPLER);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2019-01-11 10:58:37 +01:00
|
|
|
switch (phy_data->info_type) {
|
|
|
|
case IWL_RX_PHY_INFO_TYPE_HE_MU_EXT:
|
|
|
|
case IWL_RX_PHY_INFO_TYPE_HE_MU:
|
|
|
|
case IWL_RX_PHY_INFO_TYPE_HE_SU:
|
|
|
|
he->data1 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_SPTL_REUSE_KNOWN);
|
|
|
|
he->data4 |= le16_encode_bits(le32_get_bits(phy_data->d0,
|
|
|
|
IWL_RX_PHY_DATA0_HE_SPATIAL_REUSE_MASK),
|
|
|
|
IEEE80211_RADIOTAP_HE_DATA4_SU_MU_SPTL_REUSE);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
/* nothing here */
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2018-07-02 17:16:48 +02:00
|
|
|
switch (phy_data->info_type) {
|
2018-07-02 14:35:26 +02:00
|
|
|
case IWL_RX_PHY_INFO_TYPE_HE_MU_EXT:
|
2018-06-14 14:36:22 +02:00
|
|
|
he_mu->flags1 |=
|
2018-07-02 14:35:26 +02:00
|
|
|
le16_encode_bits(le16_get_bits(phy_data->d4,
|
|
|
|
IWL_RX_PHY_DATA4_HE_MU_EXT_SIGB_DCM),
|
2018-06-14 14:36:22 +02:00
|
|
|
IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_DCM);
|
|
|
|
he_mu->flags1 |=
|
2018-07-02 14:35:26 +02:00
|
|
|
le16_encode_bits(le16_get_bits(phy_data->d4,
|
|
|
|
IWL_RX_PHY_DATA4_HE_MU_EXT_SIGB_MCS_MASK),
|
2018-06-14 14:36:22 +02:00
|
|
|
IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_MCS);
|
|
|
|
he_mu->flags2 |=
|
2018-07-02 14:35:26 +02:00
|
|
|
le16_encode_bits(le16_get_bits(phy_data->d4,
|
|
|
|
IWL_RX_PHY_DATA4_HE_MU_EXT_PREAMBLE_PUNC_TYPE_MASK),
|
|
|
|
IEEE80211_RADIOTAP_HE_MU_FLAGS2_PUNC_FROM_SIG_A_BW);
|
2022-09-06 16:42:07 +03:00
|
|
|
iwl_mvm_decode_he_mu_ext(mvm, phy_data, he_mu);
|
2020-11-17 07:50:53 -06:00
|
|
|
fallthrough;
|
2018-07-02 14:35:26 +02:00
|
|
|
case IWL_RX_PHY_INFO_TYPE_HE_MU:
|
2018-06-14 14:36:22 +02:00
|
|
|
he_mu->flags2 |=
|
2018-07-02 14:35:26 +02:00
|
|
|
le16_encode_bits(le32_get_bits(phy_data->d1,
|
|
|
|
IWL_RX_PHY_DATA1_HE_MU_SIBG_SYM_OR_USER_NUM_MASK),
|
|
|
|
IEEE80211_RADIOTAP_HE_MU_FLAGS2_SIG_B_SYMS_USERS);
|
2018-06-14 14:36:22 +02:00
|
|
|
he_mu->flags2 |=
|
2018-07-02 14:35:26 +02:00
|
|
|
le16_encode_bits(le32_get_bits(phy_data->d1,
|
|
|
|
IWL_RX_PHY_DATA1_HE_MU_SIGB_COMPRESSION),
|
|
|
|
IEEE80211_RADIOTAP_HE_MU_FLAGS2_SIG_B_COMP);
|
2020-11-17 07:50:53 -06:00
|
|
|
fallthrough;
|
2018-07-02 14:35:26 +02:00
|
|
|
case IWL_RX_PHY_INFO_TYPE_HE_TB:
|
|
|
|
case IWL_RX_PHY_INFO_TYPE_HE_TB_EXT:
|
2022-09-06 16:42:07 +03:00
|
|
|
iwl_mvm_decode_he_phy_ru_alloc(phy_data, he, he_mu, rx_status);
|
2018-06-14 14:48:27 +02:00
|
|
|
break;
|
2018-07-02 14:35:26 +02:00
|
|
|
case IWL_RX_PHY_INFO_TYPE_HE_SU:
|
|
|
|
he->data1 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_BEAM_CHANGE_KNOWN);
|
|
|
|
he->data3 |= le16_encode_bits(le32_get_bits(phy_data->d0,
|
|
|
|
IWL_RX_PHY_DATA0_HE_BEAM_CHNG),
|
|
|
|
IEEE80211_RADIOTAP_HE_DATA3_BEAM_CHANGE);
|
|
|
|
break;
|
2018-06-14 14:48:27 +02:00
|
|
|
default:
|
|
|
|
/* nothing */
|
|
|
|
break;
|
|
|
|
}
|
2018-06-14 14:36:22 +02:00
|
|
|
}
|
|
|
|
|
2023-03-05 14:16:21 +02:00
|
|
|
#define LE32_DEC_ENC(value, dec_bits, enc_bits) \
|
|
|
|
le32_encode_bits(le32_get_bits(value, dec_bits), enc_bits)
|
|
|
|
|
2023-03-05 14:16:25 +02:00
|
|
|
#define IWL_MVM_ENC_USIG_VALUE_MASK(usig, in_value, dec_bits, enc_bits) do { \
|
|
|
|
typeof(enc_bits) _enc_bits = enc_bits; \
|
|
|
|
typeof(usig) _usig = usig; \
|
|
|
|
(_usig)->mask |= cpu_to_le32(_enc_bits); \
|
|
|
|
(_usig)->value |= LE32_DEC_ENC(in_value, dec_bits, _enc_bits); \
|
|
|
|
} while (0)
|
|
|
|
|
2023-03-05 14:16:33 +02:00
|
|
|
#define __IWL_MVM_ENC_EHT_RU(rt_data, rt_ru, fw_data, fw_ru) \
|
|
|
|
eht->data[(rt_data)] |= \
|
|
|
|
(cpu_to_le32 \
|
|
|
|
(IEEE80211_RADIOTAP_EHT_DATA ## rt_data ## _RU_ALLOC_CC_ ## rt_ru ## _KNOWN) | \
|
|
|
|
LE32_DEC_ENC(data ## fw_data, \
|
|
|
|
IWL_RX_PHY_DATA ## fw_data ## _EHT_MU_EXT_RU_ALLOC_ ## fw_ru, \
|
|
|
|
IEEE80211_RADIOTAP_EHT_DATA ## rt_data ## _RU_ALLOC_CC_ ## rt_ru))
|
|
|
|
|
|
|
|
#define _IWL_MVM_ENC_EHT_RU(rt_data, rt_ru, fw_data, fw_ru) \
|
|
|
|
__IWL_MVM_ENC_EHT_RU(rt_data, rt_ru, fw_data, fw_ru)
|
|
|
|
|
|
|
|
#define IEEE80211_RADIOTAP_RU_DATA_1_1_1 1
|
|
|
|
#define IEEE80211_RADIOTAP_RU_DATA_2_1_1 2
|
|
|
|
#define IEEE80211_RADIOTAP_RU_DATA_1_1_2 2
|
|
|
|
#define IEEE80211_RADIOTAP_RU_DATA_2_1_2 2
|
|
|
|
#define IEEE80211_RADIOTAP_RU_DATA_1_2_1 3
|
|
|
|
#define IEEE80211_RADIOTAP_RU_DATA_2_2_1 3
|
|
|
|
#define IEEE80211_RADIOTAP_RU_DATA_1_2_2 3
|
|
|
|
#define IEEE80211_RADIOTAP_RU_DATA_2_2_2 4
|
|
|
|
|
|
|
|
#define IWL_RX_RU_DATA_A1 2
|
|
|
|
#define IWL_RX_RU_DATA_A2 2
|
|
|
|
#define IWL_RX_RU_DATA_B1 2
|
2023-08-30 11:31:00 +03:00
|
|
|
#define IWL_RX_RU_DATA_B2 4
|
2023-03-05 14:16:33 +02:00
|
|
|
#define IWL_RX_RU_DATA_C1 3
|
|
|
|
#define IWL_RX_RU_DATA_C2 3
|
|
|
|
#define IWL_RX_RU_DATA_D1 4
|
|
|
|
#define IWL_RX_RU_DATA_D2 4
|
|
|
|
|
|
|
|
#define IWL_MVM_ENC_EHT_RU(rt_ru, fw_ru) \
|
|
|
|
_IWL_MVM_ENC_EHT_RU(IEEE80211_RADIOTAP_RU_DATA_ ## rt_ru, \
|
|
|
|
rt_ru, \
|
|
|
|
IWL_RX_RU_DATA_ ## fw_ru, \
|
|
|
|
fw_ru)
|
|
|
|
|
2023-03-05 14:16:25 +02:00
|
|
|
static void iwl_mvm_decode_eht_ext_mu(struct iwl_mvm *mvm,
|
|
|
|
struct iwl_mvm_rx_phy_data *phy_data,
|
|
|
|
struct ieee80211_rx_status *rx_status,
|
|
|
|
struct ieee80211_radiotap_eht *eht,
|
|
|
|
struct ieee80211_radiotap_eht_usig *usig)
|
|
|
|
{
|
|
|
|
if (phy_data->with_data) {
|
2023-03-05 14:16:33 +02:00
|
|
|
__le32 data1 = phy_data->d1;
|
|
|
|
__le32 data2 = phy_data->d2;
|
|
|
|
__le32 data3 = phy_data->d3;
|
2023-03-05 14:16:25 +02:00
|
|
|
__le32 data4 = phy_data->eht_d4;
|
|
|
|
__le32 data5 = phy_data->d5;
|
2023-03-05 14:16:33 +02:00
|
|
|
u32 phy_bw = phy_data->rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK;
|
2023-03-05 14:16:25 +02:00
|
|
|
|
|
|
|
IWL_MVM_ENC_USIG_VALUE_MASK(usig, data5,
|
|
|
|
IWL_RX_PHY_DATA5_EHT_TYPE_AND_COMP,
|
|
|
|
IEEE80211_RADIOTAP_EHT_USIG2_MU_B0_B1_PPDU_TYPE);
|
|
|
|
IWL_MVM_ENC_USIG_VALUE_MASK(usig, data5,
|
|
|
|
IWL_RX_PHY_DATA5_EHT_MU_PUNC_CH_CODE,
|
|
|
|
IEEE80211_RADIOTAP_EHT_USIG2_MU_B3_B7_PUNCTURED_INFO);
|
|
|
|
IWL_MVM_ENC_USIG_VALUE_MASK(usig, data4,
|
|
|
|
IWL_RX_PHY_DATA4_EHT_MU_EXT_SIGB_MCS,
|
|
|
|
IEEE80211_RADIOTAP_EHT_USIG2_MU_B9_B10_SIG_MCS);
|
|
|
|
IWL_MVM_ENC_USIG_VALUE_MASK
|
|
|
|
(usig, data1, IWL_RX_PHY_DATA1_EHT_MU_NUM_SIG_SYM_USIGA2,
|
|
|
|
IEEE80211_RADIOTAP_EHT_USIG2_MU_B11_B15_EHT_SIG_SYMBOLS);
|
|
|
|
|
2023-03-05 14:16:33 +02:00
|
|
|
eht->user_info[0] |=
|
|
|
|
cpu_to_le32(IEEE80211_RADIOTAP_EHT_USER_INFO_STA_ID_KNOWN) |
|
|
|
|
LE32_DEC_ENC(data5, IWL_RX_PHY_DATA5_EHT_MU_STA_ID_USR,
|
|
|
|
IEEE80211_RADIOTAP_EHT_USER_INFO_STA_ID);
|
|
|
|
|
|
|
|
eht->known |= cpu_to_le32(IEEE80211_RADIOTAP_EHT_KNOWN_NR_NON_OFDMA_USERS_M);
|
|
|
|
eht->data[7] |= LE32_DEC_ENC
|
|
|
|
(data5, IWL_RX_PHY_DATA5_EHT_MU_NUM_USR_NON_OFDMA,
|
|
|
|
IEEE80211_RADIOTAP_EHT_DATA7_NUM_OF_NON_OFDMA_USERS);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Hardware labels the content channels/RU allocation values
|
|
|
|
* as follows:
|
|
|
|
* Content Channel 1 Content Channel 2
|
|
|
|
* 20 MHz: A1
|
|
|
|
* 40 MHz: A1 B1
|
|
|
|
* 80 MHz: A1 C1 B1 D1
|
|
|
|
* 160 MHz: A1 C1 A2 C2 B1 D1 B2 D2
|
|
|
|
* 320 MHz: A1 C1 A2 C2 A3 C3 A4 C4 B1 D1 B2 D2 B3 D3 B4 D4
|
|
|
|
*
|
|
|
|
* However firmware can only give us A1-D2, so the higher
|
|
|
|
* frequencies are missing.
|
|
|
|
*/
|
|
|
|
|
|
|
|
switch (phy_bw) {
|
|
|
|
case RATE_MCS_CHAN_WIDTH_320:
|
|
|
|
/* additional values are missing in RX metadata */
|
|
|
|
case RATE_MCS_CHAN_WIDTH_160:
|
|
|
|
/* content channel 1 */
|
|
|
|
IWL_MVM_ENC_EHT_RU(1_2_1, A2);
|
|
|
|
IWL_MVM_ENC_EHT_RU(1_2_2, C2);
|
|
|
|
/* content channel 2 */
|
|
|
|
IWL_MVM_ENC_EHT_RU(2_2_1, B2);
|
|
|
|
IWL_MVM_ENC_EHT_RU(2_2_2, D2);
|
|
|
|
fallthrough;
|
|
|
|
case RATE_MCS_CHAN_WIDTH_80:
|
|
|
|
/* content channel 1 */
|
|
|
|
IWL_MVM_ENC_EHT_RU(1_1_2, C1);
|
|
|
|
/* content channel 2 */
|
|
|
|
IWL_MVM_ENC_EHT_RU(2_1_2, D1);
|
|
|
|
fallthrough;
|
|
|
|
case RATE_MCS_CHAN_WIDTH_40:
|
|
|
|
/* content channel 2 */
|
|
|
|
IWL_MVM_ENC_EHT_RU(2_1_1, B1);
|
|
|
|
fallthrough;
|
|
|
|
case RATE_MCS_CHAN_WIDTH_20:
|
|
|
|
IWL_MVM_ENC_EHT_RU(1_1_1, A1);
|
|
|
|
break;
|
|
|
|
}
|
2023-03-05 14:16:25 +02:00
|
|
|
} else {
|
|
|
|
__le32 usig_a1 = phy_data->rx_vec[0];
|
|
|
|
__le32 usig_a2 = phy_data->rx_vec[1];
|
|
|
|
|
|
|
|
IWL_MVM_ENC_USIG_VALUE_MASK(usig, usig_a1,
|
|
|
|
IWL_RX_USIG_A1_DISREGARD,
|
|
|
|
IEEE80211_RADIOTAP_EHT_USIG1_MU_B20_B24_DISREGARD);
|
|
|
|
IWL_MVM_ENC_USIG_VALUE_MASK(usig, usig_a1,
|
|
|
|
IWL_RX_USIG_A1_VALIDATE,
|
|
|
|
IEEE80211_RADIOTAP_EHT_USIG1_MU_B25_VALIDATE);
|
|
|
|
IWL_MVM_ENC_USIG_VALUE_MASK(usig, usig_a2,
|
|
|
|
IWL_RX_USIG_A2_EHT_PPDU_TYPE,
|
|
|
|
IEEE80211_RADIOTAP_EHT_USIG2_MU_B0_B1_PPDU_TYPE);
|
|
|
|
IWL_MVM_ENC_USIG_VALUE_MASK(usig, usig_a2,
|
|
|
|
IWL_RX_USIG_A2_EHT_USIG2_VALIDATE_B2,
|
|
|
|
IEEE80211_RADIOTAP_EHT_USIG2_MU_B2_VALIDATE);
|
|
|
|
IWL_MVM_ENC_USIG_VALUE_MASK(usig, usig_a2,
|
|
|
|
IWL_RX_USIG_A2_EHT_PUNC_CHANNEL,
|
|
|
|
IEEE80211_RADIOTAP_EHT_USIG2_MU_B3_B7_PUNCTURED_INFO);
|
|
|
|
IWL_MVM_ENC_USIG_VALUE_MASK(usig, usig_a2,
|
|
|
|
IWL_RX_USIG_A2_EHT_USIG2_VALIDATE_B8,
|
|
|
|
IEEE80211_RADIOTAP_EHT_USIG2_MU_B8_VALIDATE);
|
|
|
|
IWL_MVM_ENC_USIG_VALUE_MASK(usig, usig_a2,
|
|
|
|
IWL_RX_USIG_A2_EHT_SIG_MCS,
|
|
|
|
IEEE80211_RADIOTAP_EHT_USIG2_MU_B9_B10_SIG_MCS);
|
|
|
|
IWL_MVM_ENC_USIG_VALUE_MASK
|
|
|
|
(usig, usig_a2, IWL_RX_USIG_A2_EHT_SIG_SYM_NUM,
|
|
|
|
IEEE80211_RADIOTAP_EHT_USIG2_MU_B11_B15_EHT_SIG_SYMBOLS);
|
|
|
|
IWL_MVM_ENC_USIG_VALUE_MASK(usig, usig_a2,
|
|
|
|
IWL_RX_USIG_A2_EHT_CRC_OK,
|
|
|
|
IEEE80211_RADIOTAP_EHT_USIG2_MU_B16_B19_CRC);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void iwl_mvm_decode_eht_ext_tb(struct iwl_mvm *mvm,
|
|
|
|
struct iwl_mvm_rx_phy_data *phy_data,
|
|
|
|
struct ieee80211_rx_status *rx_status,
|
|
|
|
struct ieee80211_radiotap_eht *eht,
|
|
|
|
struct ieee80211_radiotap_eht_usig *usig)
|
|
|
|
{
|
|
|
|
if (phy_data->with_data) {
|
|
|
|
__le32 data5 = phy_data->d5;
|
|
|
|
|
|
|
|
IWL_MVM_ENC_USIG_VALUE_MASK(usig, data5,
|
|
|
|
IWL_RX_PHY_DATA5_EHT_TYPE_AND_COMP,
|
|
|
|
IEEE80211_RADIOTAP_EHT_USIG2_TB_B0_B1_PPDU_TYPE);
|
|
|
|
IWL_MVM_ENC_USIG_VALUE_MASK(usig, data5,
|
|
|
|
IWL_RX_PHY_DATA5_EHT_TB_SPATIAL_REUSE1,
|
|
|
|
IEEE80211_RADIOTAP_EHT_USIG2_TB_B3_B6_SPATIAL_REUSE_1);
|
|
|
|
|
|
|
|
IWL_MVM_ENC_USIG_VALUE_MASK(usig, data5,
|
|
|
|
IWL_RX_PHY_DATA5_EHT_TB_SPATIAL_REUSE2,
|
|
|
|
IEEE80211_RADIOTAP_EHT_USIG2_TB_B7_B10_SPATIAL_REUSE_2);
|
|
|
|
} else {
|
|
|
|
__le32 usig_a1 = phy_data->rx_vec[0];
|
|
|
|
__le32 usig_a2 = phy_data->rx_vec[1];
|
|
|
|
|
|
|
|
IWL_MVM_ENC_USIG_VALUE_MASK(usig, usig_a1,
|
|
|
|
IWL_RX_USIG_A1_DISREGARD,
|
|
|
|
IEEE80211_RADIOTAP_EHT_USIG1_TB_B20_B25_DISREGARD);
|
|
|
|
IWL_MVM_ENC_USIG_VALUE_MASK(usig, usig_a2,
|
|
|
|
IWL_RX_USIG_A2_EHT_PPDU_TYPE,
|
|
|
|
IEEE80211_RADIOTAP_EHT_USIG2_TB_B0_B1_PPDU_TYPE);
|
|
|
|
IWL_MVM_ENC_USIG_VALUE_MASK(usig, usig_a2,
|
|
|
|
IWL_RX_USIG_A2_EHT_USIG2_VALIDATE_B2,
|
|
|
|
IEEE80211_RADIOTAP_EHT_USIG2_TB_B2_VALIDATE);
|
|
|
|
IWL_MVM_ENC_USIG_VALUE_MASK(usig, usig_a2,
|
|
|
|
IWL_RX_USIG_A2_EHT_TRIG_SPATIAL_REUSE_1,
|
|
|
|
IEEE80211_RADIOTAP_EHT_USIG2_TB_B3_B6_SPATIAL_REUSE_1);
|
|
|
|
IWL_MVM_ENC_USIG_VALUE_MASK(usig, usig_a2,
|
|
|
|
IWL_RX_USIG_A2_EHT_TRIG_SPATIAL_REUSE_2,
|
|
|
|
IEEE80211_RADIOTAP_EHT_USIG2_TB_B7_B10_SPATIAL_REUSE_2);
|
|
|
|
IWL_MVM_ENC_USIG_VALUE_MASK(usig, usig_a2,
|
|
|
|
IWL_RX_USIG_A2_EHT_TRIG_USIG2_DISREGARD,
|
|
|
|
IEEE80211_RADIOTAP_EHT_USIG2_TB_B11_B15_DISREGARD);
|
|
|
|
IWL_MVM_ENC_USIG_VALUE_MASK(usig, usig_a2,
|
|
|
|
IWL_RX_USIG_A2_EHT_CRC_OK,
|
|
|
|
IEEE80211_RADIOTAP_EHT_USIG2_TB_B16_B19_CRC);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-03-05 14:16:24 +02:00
|
|
|
static void iwl_mvm_decode_eht_ru(struct iwl_mvm *mvm,
|
|
|
|
struct ieee80211_rx_status *rx_status,
|
|
|
|
struct ieee80211_radiotap_eht *eht)
|
|
|
|
{
|
|
|
|
u32 ru = le32_get_bits(eht->data[8],
|
|
|
|
IEEE80211_RADIOTAP_EHT_DATA8_RU_ALLOC_TB_FMT_B7_B1);
|
|
|
|
enum nl80211_eht_ru_alloc nl_ru;
|
|
|
|
|
|
|
|
/* Using D1.5 Table 9-53a - Encoding of PS160 and RU Allocation subfields
|
|
|
|
* in an EHT variant User Info field
|
|
|
|
*/
|
|
|
|
|
|
|
|
switch (ru) {
|
|
|
|
case 0 ... 36:
|
|
|
|
nl_ru = NL80211_RATE_INFO_EHT_RU_ALLOC_26;
|
|
|
|
break;
|
|
|
|
case 37 ... 52:
|
|
|
|
nl_ru = NL80211_RATE_INFO_EHT_RU_ALLOC_52;
|
|
|
|
break;
|
|
|
|
case 53 ... 60:
|
|
|
|
nl_ru = NL80211_RATE_INFO_EHT_RU_ALLOC_106;
|
|
|
|
break;
|
|
|
|
case 61 ... 64:
|
|
|
|
nl_ru = NL80211_RATE_INFO_EHT_RU_ALLOC_242;
|
|
|
|
break;
|
|
|
|
case 65 ... 66:
|
|
|
|
nl_ru = NL80211_RATE_INFO_EHT_RU_ALLOC_484;
|
|
|
|
break;
|
|
|
|
case 67:
|
|
|
|
nl_ru = NL80211_RATE_INFO_EHT_RU_ALLOC_996;
|
|
|
|
break;
|
|
|
|
case 68:
|
|
|
|
nl_ru = NL80211_RATE_INFO_EHT_RU_ALLOC_2x996;
|
|
|
|
break;
|
|
|
|
case 69:
|
|
|
|
nl_ru = NL80211_RATE_INFO_EHT_RU_ALLOC_4x996;
|
|
|
|
break;
|
|
|
|
case 70 ... 81:
|
|
|
|
nl_ru = NL80211_RATE_INFO_EHT_RU_ALLOC_52P26;
|
|
|
|
break;
|
|
|
|
case 82 ... 89:
|
|
|
|
nl_ru = NL80211_RATE_INFO_EHT_RU_ALLOC_106P26;
|
|
|
|
break;
|
|
|
|
case 90 ... 93:
|
|
|
|
nl_ru = NL80211_RATE_INFO_EHT_RU_ALLOC_484P242;
|
|
|
|
break;
|
|
|
|
case 94 ... 95:
|
|
|
|
nl_ru = NL80211_RATE_INFO_EHT_RU_ALLOC_996P484;
|
|
|
|
break;
|
|
|
|
case 96 ... 99:
|
|
|
|
nl_ru = NL80211_RATE_INFO_EHT_RU_ALLOC_996P484P242;
|
|
|
|
break;
|
|
|
|
case 100 ... 103:
|
|
|
|
nl_ru = NL80211_RATE_INFO_EHT_RU_ALLOC_2x996P484;
|
|
|
|
break;
|
|
|
|
case 104:
|
|
|
|
nl_ru = NL80211_RATE_INFO_EHT_RU_ALLOC_3x996;
|
|
|
|
break;
|
|
|
|
case 105 ... 106:
|
|
|
|
nl_ru = NL80211_RATE_INFO_EHT_RU_ALLOC_3x996P484;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
rx_status->bw = RATE_INFO_BW_EHT_RU;
|
|
|
|
rx_status->eht.ru = nl_ru;
|
|
|
|
}
|
|
|
|
|
2023-03-05 14:16:21 +02:00
|
|
|
static void iwl_mvm_decode_eht_phy_data(struct iwl_mvm *mvm,
|
|
|
|
struct iwl_mvm_rx_phy_data *phy_data,
|
|
|
|
struct ieee80211_rx_status *rx_status,
|
|
|
|
struct ieee80211_radiotap_eht *eht,
|
|
|
|
struct ieee80211_radiotap_eht_usig *usig)
|
|
|
|
|
|
|
|
{
|
|
|
|
__le32 data0 = phy_data->d0;
|
|
|
|
__le32 data1 = phy_data->d1;
|
2023-03-05 14:16:25 +02:00
|
|
|
__le32 usig_a1 = phy_data->rx_vec[0];
|
2023-03-05 14:16:21 +02:00
|
|
|
u8 info_type = phy_data->info_type;
|
|
|
|
|
|
|
|
/* Not in EHT range */
|
|
|
|
if (info_type < IWL_RX_PHY_INFO_TYPE_EHT_MU ||
|
|
|
|
info_type > IWL_RX_PHY_INFO_TYPE_EHT_TB_EXT)
|
|
|
|
return;
|
|
|
|
|
|
|
|
usig->common |= cpu_to_le32
|
|
|
|
(IEEE80211_RADIOTAP_EHT_USIG_COMMON_UL_DL_KNOWN |
|
|
|
|
IEEE80211_RADIOTAP_EHT_USIG_COMMON_BSS_COLOR_KNOWN);
|
2023-03-05 14:16:25 +02:00
|
|
|
if (phy_data->with_data) {
|
|
|
|
usig->common |= LE32_DEC_ENC(data0,
|
|
|
|
IWL_RX_PHY_DATA0_EHT_UPLINK,
|
|
|
|
IEEE80211_RADIOTAP_EHT_USIG_COMMON_UL_DL);
|
|
|
|
usig->common |= LE32_DEC_ENC(data0,
|
|
|
|
IWL_RX_PHY_DATA0_EHT_BSS_COLOR_MASK,
|
|
|
|
IEEE80211_RADIOTAP_EHT_USIG_COMMON_BSS_COLOR);
|
|
|
|
} else {
|
|
|
|
usig->common |= LE32_DEC_ENC(usig_a1,
|
|
|
|
IWL_RX_USIG_A1_UL_FLAG,
|
|
|
|
IEEE80211_RADIOTAP_EHT_USIG_COMMON_UL_DL);
|
|
|
|
usig->common |= LE32_DEC_ENC(usig_a1,
|
|
|
|
IWL_RX_USIG_A1_BSS_COLOR,
|
|
|
|
IEEE80211_RADIOTAP_EHT_USIG_COMMON_BSS_COLOR);
|
|
|
|
}
|
2023-03-05 14:16:21 +02:00
|
|
|
|
2023-06-13 15:57:14 +03:00
|
|
|
if (fw_has_capa(&mvm->fw->ucode_capa,
|
|
|
|
IWL_UCODE_TLV_CAPA_SNIFF_VALIDATE_SUPPORT)) {
|
|
|
|
usig->common |=
|
|
|
|
cpu_to_le32(IEEE80211_RADIOTAP_EHT_USIG_COMMON_VALIDATE_BITS_CHECKED);
|
|
|
|
usig->common |=
|
|
|
|
LE32_DEC_ENC(data0, IWL_RX_PHY_DATA0_EHT_VALIDATE,
|
|
|
|
IEEE80211_RADIOTAP_EHT_USIG_COMMON_VALIDATE_BITS_OK);
|
|
|
|
}
|
|
|
|
|
2023-03-05 14:16:21 +02:00
|
|
|
eht->known |= cpu_to_le32(IEEE80211_RADIOTAP_EHT_KNOWN_SPATIAL_REUSE);
|
|
|
|
eht->data[0] |= LE32_DEC_ENC(data0,
|
|
|
|
IWL_RX_PHY_DATA0_ETH_SPATIAL_REUSE_MASK,
|
|
|
|
IEEE80211_RADIOTAP_EHT_DATA0_SPATIAL_REUSE);
|
|
|
|
|
|
|
|
/* All RU allocating size/index is in TB format */
|
|
|
|
eht->known |= cpu_to_le32(IEEE80211_RADIOTAP_EHT_KNOWN_RU_ALLOC_TB_FMT);
|
|
|
|
eht->data[8] |= LE32_DEC_ENC(data0, IWL_RX_PHY_DATA0_EHT_PS160,
|
|
|
|
IEEE80211_RADIOTAP_EHT_DATA8_RU_ALLOC_TB_FMT_PS_160);
|
2023-05-31 19:49:56 +03:00
|
|
|
eht->data[8] |= LE32_DEC_ENC(data1, IWL_RX_PHY_DATA1_EHT_RU_ALLOC_B0,
|
2023-03-05 14:16:21 +02:00
|
|
|
IEEE80211_RADIOTAP_EHT_DATA8_RU_ALLOC_TB_FMT_B0);
|
2023-05-31 19:49:56 +03:00
|
|
|
eht->data[8] |= LE32_DEC_ENC(data1, IWL_RX_PHY_DATA1_EHT_RU_ALLOC_B1_B7,
|
2023-03-05 14:16:21 +02:00
|
|
|
IEEE80211_RADIOTAP_EHT_DATA8_RU_ALLOC_TB_FMT_B7_B1);
|
|
|
|
|
2023-03-05 14:16:24 +02:00
|
|
|
iwl_mvm_decode_eht_ru(mvm, rx_status, eht);
|
|
|
|
|
2023-03-05 14:16:26 +02:00
|
|
|
/* We only get here in case of IWL_RX_MPDU_PHY_TSF_OVERLOAD is set
|
|
|
|
* which is on only in case of monitor mode so no need to check monitor
|
|
|
|
* mode
|
|
|
|
*/
|
|
|
|
eht->known |= cpu_to_le32(IEEE80211_RADIOTAP_EHT_KNOWN_PRIMARY_80);
|
|
|
|
eht->data[1] |=
|
|
|
|
le32_encode_bits(mvm->monitor_p80,
|
|
|
|
IEEE80211_RADIOTAP_EHT_DATA1_PRIMARY_80);
|
|
|
|
|
2023-03-05 14:16:21 +02:00
|
|
|
usig->common |= cpu_to_le32(IEEE80211_RADIOTAP_EHT_USIG_COMMON_TXOP_KNOWN);
|
2023-03-05 14:16:25 +02:00
|
|
|
if (phy_data->with_data)
|
|
|
|
usig->common |= LE32_DEC_ENC(data0, IWL_RX_PHY_DATA0_EHT_TXOP_DUR_MASK,
|
|
|
|
IEEE80211_RADIOTAP_EHT_USIG_COMMON_TXOP);
|
|
|
|
else
|
|
|
|
usig->common |= LE32_DEC_ENC(usig_a1, IWL_RX_USIG_A1_TXOP_DURATION,
|
|
|
|
IEEE80211_RADIOTAP_EHT_USIG_COMMON_TXOP);
|
|
|
|
|
2023-03-05 14:16:21 +02:00
|
|
|
eht->known |= cpu_to_le32(IEEE80211_RADIOTAP_EHT_KNOWN_LDPC_EXTRA_SYM_OM);
|
|
|
|
eht->data[0] |= LE32_DEC_ENC(data0, IWL_RX_PHY_DATA0_EHT_LDPC_EXT_SYM,
|
|
|
|
IEEE80211_RADIOTAP_EHT_DATA0_LDPC_EXTRA_SYM_OM);
|
|
|
|
|
|
|
|
eht->known |= cpu_to_le32(IEEE80211_RADIOTAP_EHT_KNOWN_PRE_PADD_FACOR_OM);
|
|
|
|
eht->data[0] |= LE32_DEC_ENC(data0, IWL_RX_PHY_DATA0_EHT_PRE_FEC_PAD_MASK,
|
|
|
|
IEEE80211_RADIOTAP_EHT_DATA0_PRE_PADD_FACOR_OM);
|
|
|
|
|
|
|
|
eht->known |= cpu_to_le32(IEEE80211_RADIOTAP_EHT_KNOWN_PE_DISAMBIGUITY_OM);
|
|
|
|
eht->data[0] |= LE32_DEC_ENC(data0, IWL_RX_PHY_DATA0_EHT_PE_DISAMBIG,
|
|
|
|
IEEE80211_RADIOTAP_EHT_DATA0_PE_DISAMBIGUITY_OM);
|
|
|
|
|
|
|
|
/* TODO: what about IWL_RX_PHY_DATA0_EHT_BW320_SLOT */
|
|
|
|
|
|
|
|
if (!le32_get_bits(data0, IWL_RX_PHY_DATA0_EHT_SIGA_CRC_OK))
|
|
|
|
usig->common |= cpu_to_le32(IEEE80211_RADIOTAP_EHT_USIG_COMMON_BAD_USIG_CRC);
|
|
|
|
|
|
|
|
usig->common |= cpu_to_le32(IEEE80211_RADIOTAP_EHT_USIG_COMMON_PHY_VER_KNOWN);
|
|
|
|
usig->common |= LE32_DEC_ENC(data0, IWL_RX_PHY_DATA0_EHT_PHY_VER,
|
|
|
|
IEEE80211_RADIOTAP_EHT_USIG_COMMON_PHY_VER);
|
2023-03-05 14:16:25 +02:00
|
|
|
|
|
|
|
/*
|
|
|
|
* TODO: what about TB - IWL_RX_PHY_DATA1_EHT_TB_PILOT_TYPE,
|
|
|
|
* IWL_RX_PHY_DATA1_EHT_TB_LOW_SS
|
|
|
|
*/
|
|
|
|
|
|
|
|
eht->known |= cpu_to_le32(IEEE80211_RADIOTAP_EHT_KNOWN_EHT_LTF);
|
|
|
|
eht->data[0] |= LE32_DEC_ENC(data1, IWL_RX_PHY_DATA1_EHT_SIG_LTF_NUM,
|
|
|
|
IEEE80211_RADIOTAP_EHT_DATA0_EHT_LTF);
|
|
|
|
|
|
|
|
if (info_type == IWL_RX_PHY_INFO_TYPE_EHT_TB_EXT ||
|
|
|
|
info_type == IWL_RX_PHY_INFO_TYPE_EHT_TB)
|
|
|
|
iwl_mvm_decode_eht_ext_tb(mvm, phy_data, rx_status, eht, usig);
|
|
|
|
|
|
|
|
if (info_type == IWL_RX_PHY_INFO_TYPE_EHT_MU_EXT ||
|
|
|
|
info_type == IWL_RX_PHY_INFO_TYPE_EHT_MU)
|
|
|
|
iwl_mvm_decode_eht_ext_mu(mvm, phy_data, rx_status, eht, usig);
|
2023-03-05 14:16:21 +02:00
|
|
|
}
|
|
|
|
|
2023-03-05 14:16:20 +02:00
|
|
|
static void iwl_mvm_rx_eht(struct iwl_mvm *mvm, struct sk_buff *skb,
|
|
|
|
struct iwl_mvm_rx_phy_data *phy_data,
|
|
|
|
int queue)
|
|
|
|
{
|
|
|
|
struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb);
|
|
|
|
|
|
|
|
struct ieee80211_radiotap_eht *eht;
|
|
|
|
struct ieee80211_radiotap_eht_usig *usig;
|
2023-03-05 14:16:34 +02:00
|
|
|
size_t eht_len = sizeof(*eht);
|
2023-03-05 14:16:20 +02:00
|
|
|
|
|
|
|
u32 rate_n_flags = phy_data->rate_n_flags;
|
|
|
|
u32 he_type = rate_n_flags & RATE_MCS_HE_TYPE_MSK;
|
|
|
|
/* EHT and HE have the same valus for LTF */
|
|
|
|
u8 ltf = IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_UNKNOWN;
|
|
|
|
u16 phy_info = phy_data->phy_info;
|
|
|
|
u32 bw;
|
|
|
|
|
|
|
|
/* u32 for 1 user_info */
|
2023-03-05 14:16:34 +02:00
|
|
|
if (phy_data->with_data)
|
|
|
|
eht_len += sizeof(u32);
|
|
|
|
|
|
|
|
eht = iwl_mvm_radiotap_put_tlv(skb, IEEE80211_RADIOTAP_EHT, eht_len);
|
2023-03-05 14:16:20 +02:00
|
|
|
|
|
|
|
usig = iwl_mvm_radiotap_put_tlv(skb, IEEE80211_RADIOTAP_EHT_USIG,
|
|
|
|
sizeof(*usig));
|
|
|
|
rx_status->flag |= RX_FLAG_RADIOTAP_TLV_AT_END;
|
|
|
|
usig->common |=
|
|
|
|
cpu_to_le32(IEEE80211_RADIOTAP_EHT_USIG_COMMON_BW_KNOWN);
|
|
|
|
|
|
|
|
/* specific handling for 320MHz */
|
|
|
|
bw = FIELD_GET(RATE_MCS_CHAN_WIDTH_MSK, rate_n_flags);
|
|
|
|
if (bw == RATE_MCS_CHAN_WIDTH_320_VAL)
|
|
|
|
bw += FIELD_GET(IWL_RX_PHY_DATA0_EHT_BW320_SLOT,
|
|
|
|
le32_to_cpu(phy_data->d0));
|
|
|
|
|
|
|
|
usig->common |= cpu_to_le32
|
|
|
|
(FIELD_PREP(IEEE80211_RADIOTAP_EHT_USIG_COMMON_BW, bw));
|
|
|
|
|
|
|
|
/* report the AMPDU-EOF bit on single frames */
|
|
|
|
if (!queue && !(phy_info & IWL_RX_MPDU_PHY_AMPDU)) {
|
|
|
|
rx_status->flag |= RX_FLAG_AMPDU_DETAILS;
|
|
|
|
rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT_KNOWN;
|
|
|
|
if (phy_data->d0 & cpu_to_le32(IWL_RX_PHY_DATA0_EHT_DELIM_EOF))
|
|
|
|
rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* update aggregation data for monitor sake on default queue */
|
|
|
|
if (!queue && (phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD) &&
|
2023-03-05 14:16:35 +02:00
|
|
|
(phy_info & IWL_RX_MPDU_PHY_AMPDU) && phy_data->first_subframe) {
|
|
|
|
rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT_KNOWN;
|
|
|
|
if (phy_data->d0 & cpu_to_le32(IWL_RX_PHY_DATA0_EHT_DELIM_EOF))
|
|
|
|
rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT;
|
2023-03-05 14:16:20 +02:00
|
|
|
}
|
|
|
|
|
2023-03-05 14:16:21 +02:00
|
|
|
if (phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD)
|
|
|
|
iwl_mvm_decode_eht_phy_data(mvm, phy_data, rx_status, eht, usig);
|
2023-03-05 14:16:20 +02:00
|
|
|
|
|
|
|
#define CHECK_TYPE(F) \
|
|
|
|
BUILD_BUG_ON(IEEE80211_RADIOTAP_HE_DATA1_FORMAT_ ## F != \
|
|
|
|
(RATE_MCS_HE_TYPE_ ## F >> RATE_MCS_HE_TYPE_POS))
|
|
|
|
|
|
|
|
CHECK_TYPE(SU);
|
|
|
|
CHECK_TYPE(EXT_SU);
|
|
|
|
CHECK_TYPE(MU);
|
|
|
|
CHECK_TYPE(TRIG);
|
|
|
|
|
|
|
|
switch (FIELD_GET(RATE_MCS_HE_GI_LTF_MSK, rate_n_flags)) {
|
|
|
|
case 0:
|
|
|
|
if (he_type == RATE_MCS_HE_TYPE_TRIG) {
|
|
|
|
rx_status->eht.gi = NL80211_RATE_INFO_EHT_GI_1_6;
|
|
|
|
ltf = IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_1X;
|
|
|
|
} else {
|
|
|
|
rx_status->eht.gi = NL80211_RATE_INFO_EHT_GI_0_8;
|
|
|
|
ltf = IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_2X;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case 1:
|
|
|
|
rx_status->eht.gi = NL80211_RATE_INFO_EHT_GI_1_6;
|
|
|
|
ltf = IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_2X;
|
|
|
|
break;
|
|
|
|
case 2:
|
|
|
|
ltf = IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_4X;
|
|
|
|
if (he_type == RATE_MCS_HE_TYPE_TRIG)
|
|
|
|
rx_status->eht.gi = NL80211_RATE_INFO_EHT_GI_3_2;
|
|
|
|
else
|
|
|
|
rx_status->eht.gi = NL80211_RATE_INFO_EHT_GI_0_8;
|
|
|
|
break;
|
|
|
|
case 3:
|
|
|
|
if (he_type != RATE_MCS_HE_TYPE_TRIG) {
|
|
|
|
ltf = IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_4X;
|
|
|
|
rx_status->eht.gi = NL80211_RATE_INFO_EHT_GI_3_2;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
/* nothing here */
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (ltf != IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_UNKNOWN) {
|
|
|
|
eht->known |= cpu_to_le32(IEEE80211_RADIOTAP_EHT_KNOWN_GI);
|
|
|
|
eht->data[0] |= cpu_to_le32
|
|
|
|
(FIELD_PREP(IEEE80211_RADIOTAP_EHT_DATA0_LTF,
|
|
|
|
ltf) |
|
|
|
|
FIELD_PREP(IEEE80211_RADIOTAP_EHT_DATA0_GI,
|
|
|
|
rx_status->eht.gi));
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2023-03-05 14:16:34 +02:00
|
|
|
if (!phy_data->with_data) {
|
|
|
|
eht->known |= cpu_to_le32(IEEE80211_RADIOTAP_EHT_KNOWN_NSS_S |
|
|
|
|
IEEE80211_RADIOTAP_EHT_KNOWN_BEAMFORMED_S);
|
|
|
|
eht->data[7] |=
|
|
|
|
le32_encode_bits(le32_get_bits(phy_data->rx_vec[2],
|
|
|
|
RX_NO_DATA_RX_VEC2_EHT_NSTS_MSK),
|
|
|
|
IEEE80211_RADIOTAP_EHT_DATA7_NSS_S);
|
|
|
|
if (rate_n_flags & RATE_MCS_BF_MSK)
|
|
|
|
eht->data[7] |=
|
|
|
|
cpu_to_le32(IEEE80211_RADIOTAP_EHT_DATA7_BEAMFORMED_S);
|
|
|
|
} else {
|
2023-03-05 14:16:20 +02:00
|
|
|
eht->user_info[0] |=
|
2023-03-05 14:16:34 +02:00
|
|
|
cpu_to_le32(IEEE80211_RADIOTAP_EHT_USER_INFO_MCS_KNOWN |
|
|
|
|
IEEE80211_RADIOTAP_EHT_USER_INFO_CODING_KNOWN |
|
|
|
|
IEEE80211_RADIOTAP_EHT_USER_INFO_NSS_KNOWN_O |
|
|
|
|
IEEE80211_RADIOTAP_EHT_USER_INFO_BEAMFORMING_KNOWN_O |
|
|
|
|
IEEE80211_RADIOTAP_EHT_USER_INFO_DATA_FOR_USER);
|
|
|
|
|
|
|
|
if (rate_n_flags & RATE_MCS_BF_MSK)
|
|
|
|
eht->user_info[0] |=
|
|
|
|
cpu_to_le32(IEEE80211_RADIOTAP_EHT_USER_INFO_BEAMFORMING_O);
|
|
|
|
|
|
|
|
if (rate_n_flags & RATE_MCS_LDPC_MSK)
|
|
|
|
eht->user_info[0] |=
|
|
|
|
cpu_to_le32(IEEE80211_RADIOTAP_EHT_USER_INFO_CODING);
|
|
|
|
|
|
|
|
eht->user_info[0] |= cpu_to_le32
|
|
|
|
(FIELD_PREP(IEEE80211_RADIOTAP_EHT_USER_INFO_MCS,
|
|
|
|
FIELD_GET(RATE_VHT_MCS_RATE_CODE_MSK,
|
|
|
|
rate_n_flags)) |
|
|
|
|
FIELD_PREP(IEEE80211_RADIOTAP_EHT_USER_INFO_NSS_O,
|
|
|
|
FIELD_GET(RATE_MCS_NSS_MSK, rate_n_flags)));
|
|
|
|
}
|
2023-03-05 14:16:20 +02:00
|
|
|
}
|
|
|
|
|
2018-04-09 11:20:09 +03:00
|
|
|
static void iwl_mvm_rx_he(struct iwl_mvm *mvm, struct sk_buff *skb,
|
2018-07-02 17:16:48 +02:00
|
|
|
struct iwl_mvm_rx_phy_data *phy_data,
|
2022-09-06 16:42:07 +03:00
|
|
|
int queue)
|
2018-04-09 11:20:09 +03:00
|
|
|
{
|
|
|
|
struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb);
|
|
|
|
struct ieee80211_radiotap_he *he = NULL;
|
|
|
|
struct ieee80211_radiotap_he_mu *he_mu = NULL;
|
2022-09-06 16:42:07 +03:00
|
|
|
u32 rate_n_flags = phy_data->rate_n_flags;
|
2021-10-17 16:25:58 +03:00
|
|
|
u32 he_type = rate_n_flags & RATE_MCS_HE_TYPE_MSK;
|
2022-09-06 16:42:08 +03:00
|
|
|
u8 ltf;
|
2018-04-09 11:20:09 +03:00
|
|
|
static const struct ieee80211_radiotap_he known = {
|
|
|
|
.data1 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_DATA_MCS_KNOWN |
|
|
|
|
IEEE80211_RADIOTAP_HE_DATA1_DATA_DCM_KNOWN |
|
|
|
|
IEEE80211_RADIOTAP_HE_DATA1_STBC_KNOWN |
|
|
|
|
IEEE80211_RADIOTAP_HE_DATA1_CODING_KNOWN),
|
|
|
|
.data2 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_GI_KNOWN |
|
|
|
|
IEEE80211_RADIOTAP_HE_DATA2_TXBF_KNOWN),
|
|
|
|
};
|
|
|
|
static const struct ieee80211_radiotap_he_mu mu_known = {
|
|
|
|
.flags1 = cpu_to_le16(IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_MCS_KNOWN |
|
|
|
|
IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_DCM_KNOWN |
|
|
|
|
IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_SYMS_USERS_KNOWN |
|
|
|
|
IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_COMP_KNOWN),
|
2018-04-26 13:14:26 +02:00
|
|
|
.flags2 = cpu_to_le16(IEEE80211_RADIOTAP_HE_MU_FLAGS2_PUNC_FROM_SIG_A_BW_KNOWN |
|
|
|
|
IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW_KNOWN),
|
2018-04-09 11:20:09 +03:00
|
|
|
};
|
2022-09-06 16:42:07 +03:00
|
|
|
u16 phy_info = phy_data->phy_info;
|
2018-04-09 11:20:09 +03:00
|
|
|
|
|
|
|
he = skb_put_data(skb, &known, sizeof(known));
|
|
|
|
rx_status->flag |= RX_FLAG_RADIOTAP_HE;
|
|
|
|
|
2018-07-02 17:16:48 +02:00
|
|
|
if (phy_data->info_type == IWL_RX_PHY_INFO_TYPE_HE_MU ||
|
|
|
|
phy_data->info_type == IWL_RX_PHY_INFO_TYPE_HE_MU_EXT) {
|
2018-07-02 14:35:26 +02:00
|
|
|
he_mu = skb_put_data(skb, &mu_known, sizeof(mu_known));
|
|
|
|
rx_status->flag |= RX_FLAG_RADIOTAP_HE_MU;
|
2018-04-09 11:20:09 +03:00
|
|
|
}
|
|
|
|
|
2018-10-24 12:36:18 +03:00
|
|
|
/* report the AMPDU-EOF bit on single frames */
|
|
|
|
if (!queue && !(phy_info & IWL_RX_MPDU_PHY_AMPDU)) {
|
|
|
|
rx_status->flag |= RX_FLAG_AMPDU_DETAILS;
|
|
|
|
rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT_KNOWN;
|
|
|
|
if (phy_data->d0 & cpu_to_le32(IWL_RX_PHY_DATA0_HE_DELIM_EOF))
|
|
|
|
rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT;
|
2018-04-09 11:20:09 +03:00
|
|
|
}
|
|
|
|
|
2018-07-02 14:35:26 +02:00
|
|
|
if (phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD)
|
2018-07-02 17:16:48 +02:00
|
|
|
iwl_mvm_decode_he_phy_data(mvm, phy_data, he, he_mu, rx_status,
|
2022-09-06 16:42:07 +03:00
|
|
|
queue);
|
2018-06-14 14:36:22 +02:00
|
|
|
|
2018-04-09 11:20:09 +03:00
|
|
|
/* update aggregation data for monitor sake on default queue */
|
2018-07-02 14:35:26 +02:00
|
|
|
if (!queue && (phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD) &&
|
2023-03-05 14:16:35 +02:00
|
|
|
(phy_info & IWL_RX_MPDU_PHY_AMPDU) && phy_data->first_subframe) {
|
|
|
|
rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT_KNOWN;
|
|
|
|
if (phy_data->d0 & cpu_to_le32(IWL_RX_PHY_DATA0_EHT_DELIM_EOF))
|
|
|
|
rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT;
|
2018-04-09 11:20:09 +03:00
|
|
|
}
|
|
|
|
|
2021-10-17 16:25:58 +03:00
|
|
|
if (he_type == RATE_MCS_HE_TYPE_EXT_SU &&
|
|
|
|
rate_n_flags & RATE_MCS_HE_106T_MSK) {
|
2018-04-09 11:20:09 +03:00
|
|
|
rx_status->bw = RATE_INFO_BW_HE_RU;
|
|
|
|
rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_106;
|
|
|
|
}
|
2018-04-26 13:14:26 +02:00
|
|
|
|
2018-06-14 14:48:27 +02:00
|
|
|
/* actually data is filled in mac80211 */
|
2021-10-17 16:25:58 +03:00
|
|
|
if (he_type == RATE_MCS_HE_TYPE_SU ||
|
|
|
|
he_type == RATE_MCS_HE_TYPE_EXT_SU)
|
2018-04-26 13:14:26 +02:00
|
|
|
he->data1 |=
|
|
|
|
cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_BW_RU_ALLOC_KNOWN);
|
|
|
|
|
2018-04-09 11:20:09 +03:00
|
|
|
#define CHECK_TYPE(F) \
|
|
|
|
BUILD_BUG_ON(IEEE80211_RADIOTAP_HE_DATA1_FORMAT_ ## F != \
|
2021-10-17 16:25:58 +03:00
|
|
|
(RATE_MCS_HE_TYPE_ ## F >> RATE_MCS_HE_TYPE_POS))
|
2018-04-09 11:20:09 +03:00
|
|
|
|
|
|
|
CHECK_TYPE(SU);
|
|
|
|
CHECK_TYPE(EXT_SU);
|
|
|
|
CHECK_TYPE(MU);
|
|
|
|
CHECK_TYPE(TRIG);
|
|
|
|
|
2021-10-17 16:25:58 +03:00
|
|
|
he->data1 |= cpu_to_le16(he_type >> RATE_MCS_HE_TYPE_POS);
|
2018-04-09 11:20:09 +03:00
|
|
|
|
2018-05-22 17:37:31 +03:00
|
|
|
if (rate_n_flags & RATE_MCS_BF_MSK)
|
2018-04-09 11:20:09 +03:00
|
|
|
he->data5 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA5_TXBF);
|
|
|
|
|
2021-10-17 16:25:58 +03:00
|
|
|
switch ((rate_n_flags & RATE_MCS_HE_GI_LTF_MSK) >>
|
2018-04-09 11:20:09 +03:00
|
|
|
RATE_MCS_HE_GI_LTF_POS) {
|
|
|
|
case 0:
|
2021-10-17 16:25:58 +03:00
|
|
|
if (he_type == RATE_MCS_HE_TYPE_TRIG)
|
2018-05-02 22:53:34 +02:00
|
|
|
rx_status->he_gi = NL80211_RATE_INFO_HE_GI_1_6;
|
|
|
|
else
|
|
|
|
rx_status->he_gi = NL80211_RATE_INFO_HE_GI_0_8;
|
2021-10-17 16:25:58 +03:00
|
|
|
if (he_type == RATE_MCS_HE_TYPE_MU)
|
2018-05-02 11:20:00 +02:00
|
|
|
ltf = IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_4X;
|
|
|
|
else
|
|
|
|
ltf = IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_1X;
|
2018-04-09 11:20:09 +03:00
|
|
|
break;
|
|
|
|
case 1:
|
2021-10-17 16:25:58 +03:00
|
|
|
if (he_type == RATE_MCS_HE_TYPE_TRIG)
|
2018-05-02 22:53:34 +02:00
|
|
|
rx_status->he_gi = NL80211_RATE_INFO_HE_GI_1_6;
|
|
|
|
else
|
|
|
|
rx_status->he_gi = NL80211_RATE_INFO_HE_GI_0_8;
|
2018-05-02 11:20:00 +02:00
|
|
|
ltf = IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_2X;
|
2018-04-09 11:20:09 +03:00
|
|
|
break;
|
|
|
|
case 2:
|
2021-10-17 16:25:58 +03:00
|
|
|
if (he_type == RATE_MCS_HE_TYPE_TRIG) {
|
2018-05-02 22:53:34 +02:00
|
|
|
rx_status->he_gi = NL80211_RATE_INFO_HE_GI_3_2;
|
2018-05-02 11:20:00 +02:00
|
|
|
ltf = IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_4X;
|
2018-05-02 22:53:34 +02:00
|
|
|
} else {
|
|
|
|
rx_status->he_gi = NL80211_RATE_INFO_HE_GI_1_6;
|
2018-05-02 11:20:00 +02:00
|
|
|
ltf = IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_2X;
|
2018-05-02 22:53:34 +02:00
|
|
|
}
|
2018-04-09 11:20:09 +03:00
|
|
|
break;
|
|
|
|
case 3:
|
2021-10-17 16:25:58 +03:00
|
|
|
rx_status->he_gi = NL80211_RATE_INFO_HE_GI_3_2;
|
2018-05-02 11:20:00 +02:00
|
|
|
ltf = IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_4X;
|
2018-04-09 11:20:09 +03:00
|
|
|
break;
|
2021-10-17 16:25:58 +03:00
|
|
|
case 4:
|
|
|
|
rx_status->he_gi = NL80211_RATE_INFO_HE_GI_0_8;
|
|
|
|
ltf = IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_4X;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
ltf = IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_UNKNOWN;
|
2018-04-09 11:20:09 +03:00
|
|
|
}
|
|
|
|
|
2018-07-02 14:35:26 +02:00
|
|
|
he->data5 |= le16_encode_bits(ltf,
|
|
|
|
IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE);
|
2018-04-09 11:20:09 +03:00
|
|
|
}
|
|
|
|
|
2018-07-02 17:16:48 +02:00
|
|
|
static void iwl_mvm_decode_lsig(struct sk_buff *skb,
|
|
|
|
struct iwl_mvm_rx_phy_data *phy_data)
|
|
|
|
{
|
|
|
|
struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb);
|
|
|
|
struct ieee80211_radiotap_lsig *lsig;
|
|
|
|
|
|
|
|
switch (phy_data->info_type) {
|
|
|
|
case IWL_RX_PHY_INFO_TYPE_HT:
|
|
|
|
case IWL_RX_PHY_INFO_TYPE_VHT_SU:
|
|
|
|
case IWL_RX_PHY_INFO_TYPE_VHT_MU:
|
|
|
|
case IWL_RX_PHY_INFO_TYPE_HE_TB_EXT:
|
|
|
|
case IWL_RX_PHY_INFO_TYPE_HE_SU:
|
|
|
|
case IWL_RX_PHY_INFO_TYPE_HE_MU:
|
|
|
|
case IWL_RX_PHY_INFO_TYPE_HE_MU_EXT:
|
|
|
|
case IWL_RX_PHY_INFO_TYPE_HE_TB:
|
2023-03-05 14:16:15 +02:00
|
|
|
case IWL_RX_PHY_INFO_TYPE_EHT_MU:
|
|
|
|
case IWL_RX_PHY_INFO_TYPE_EHT_TB:
|
|
|
|
case IWL_RX_PHY_INFO_TYPE_EHT_MU_EXT:
|
|
|
|
case IWL_RX_PHY_INFO_TYPE_EHT_TB_EXT:
|
2018-07-02 17:16:48 +02:00
|
|
|
lsig = skb_put(skb, sizeof(*lsig));
|
|
|
|
lsig->data1 = cpu_to_le16(IEEE80211_RADIOTAP_LSIG_DATA1_LENGTH_KNOWN);
|
|
|
|
lsig->data2 = le16_encode_bits(le32_get_bits(phy_data->d1,
|
|
|
|
IWL_RX_PHY_DATA1_LSIG_LEN_MASK),
|
|
|
|
IEEE80211_RADIOTAP_LSIG_DATA2_LENGTH);
|
|
|
|
rx_status->flag |= RX_FLAG_RADIOTAP_LSIG;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-02-05 11:06:42 +02:00
|
|
|
struct iwl_rx_sta_csa {
|
|
|
|
bool all_sta_unblocked;
|
|
|
|
struct ieee80211_vif *vif;
|
|
|
|
};
|
|
|
|
|
|
|
|
static void iwl_mvm_rx_get_sta_block_tx(void *data, struct ieee80211_sta *sta)
|
|
|
|
{
|
|
|
|
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
|
|
|
|
struct iwl_rx_sta_csa *rx_sta_csa = data;
|
|
|
|
|
|
|
|
if (mvmsta->vif != rx_sta_csa->vif)
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (mvmsta->disable_tx)
|
|
|
|
rx_sta_csa->all_sta_unblocked = false;
|
|
|
|
}
|
|
|
|
|
2022-09-06 16:42:07 +03:00
|
|
|
/*
|
|
|
|
* Note: requires also rx_status->band to be prefilled, as well
|
|
|
|
* as phy_data (apart from phy_data->info_type)
|
|
|
|
*/
|
|
|
|
static void iwl_mvm_rx_fill_status(struct iwl_mvm *mvm,
|
|
|
|
struct sk_buff *skb,
|
|
|
|
struct iwl_mvm_rx_phy_data *phy_data,
|
|
|
|
int queue)
|
|
|
|
{
|
|
|
|
struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb);
|
|
|
|
u32 rate_n_flags = phy_data->rate_n_flags;
|
|
|
|
u8 stbc = u32_get_bits(rate_n_flags, RATE_MCS_STBC_MSK);
|
|
|
|
u32 format = rate_n_flags & RATE_MCS_MOD_TYPE_MSK;
|
|
|
|
bool is_sgi;
|
|
|
|
|
|
|
|
phy_data->info_type = IWL_RX_PHY_INFO_TYPE_NONE;
|
|
|
|
|
|
|
|
if (phy_data->phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD)
|
|
|
|
phy_data->info_type =
|
|
|
|
le32_get_bits(phy_data->d1,
|
|
|
|
IWL_RX_PHY_DATA1_INFO_TYPE_MASK);
|
|
|
|
|
|
|
|
/* This may be overridden by iwl_mvm_rx_he() to HE_RU */
|
|
|
|
switch (rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK) {
|
|
|
|
case RATE_MCS_CHAN_WIDTH_20:
|
|
|
|
break;
|
|
|
|
case RATE_MCS_CHAN_WIDTH_40:
|
|
|
|
rx_status->bw = RATE_INFO_BW_40;
|
|
|
|
break;
|
|
|
|
case RATE_MCS_CHAN_WIDTH_80:
|
|
|
|
rx_status->bw = RATE_INFO_BW_80;
|
|
|
|
break;
|
|
|
|
case RATE_MCS_CHAN_WIDTH_160:
|
|
|
|
rx_status->bw = RATE_INFO_BW_160;
|
|
|
|
break;
|
2023-01-09 13:07:22 +02:00
|
|
|
case RATE_MCS_CHAN_WIDTH_320:
|
|
|
|
rx_status->bw = RATE_INFO_BW_320;
|
|
|
|
break;
|
2022-09-06 16:42:07 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/* must be before L-SIG data */
|
|
|
|
if (format == RATE_MCS_HE_MSK)
|
|
|
|
iwl_mvm_rx_he(mvm, skb, phy_data, queue);
|
|
|
|
|
|
|
|
iwl_mvm_decode_lsig(skb, phy_data);
|
|
|
|
|
|
|
|
rx_status->device_timestamp = phy_data->gp2_on_air_rise;
|
|
|
|
rx_status->freq = ieee80211_channel_to_frequency(phy_data->channel,
|
|
|
|
rx_status->band);
|
|
|
|
iwl_mvm_get_signal_strength(mvm, rx_status, rate_n_flags,
|
|
|
|
phy_data->energy_a, phy_data->energy_b);
|
|
|
|
|
2023-03-05 14:16:20 +02:00
|
|
|
/* using TLV format and must be after all fixed len fields */
|
|
|
|
if (format == RATE_MCS_EHT_MSK)
|
|
|
|
iwl_mvm_rx_eht(mvm, skb, phy_data, queue);
|
|
|
|
|
2022-09-06 16:42:07 +03:00
|
|
|
if (unlikely(mvm->monitor_on))
|
|
|
|
iwl_mvm_add_rtap_sniffer_config(mvm, skb);
|
|
|
|
|
|
|
|
is_sgi = format == RATE_MCS_HE_MSK ?
|
|
|
|
iwl_he_is_sgi(rate_n_flags) :
|
|
|
|
rate_n_flags & RATE_MCS_SGI_MSK;
|
|
|
|
|
|
|
|
if (!(format == RATE_MCS_CCK_MSK) && is_sgi)
|
|
|
|
rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
|
|
|
|
|
|
|
|
if (rate_n_flags & RATE_MCS_LDPC_MSK)
|
|
|
|
rx_status->enc_flags |= RX_ENC_FLAG_LDPC;
|
|
|
|
|
2022-09-06 16:42:08 +03:00
|
|
|
switch (format) {
|
|
|
|
case RATE_MCS_VHT_MSK:
|
|
|
|
rx_status->encoding = RX_ENC_VHT;
|
|
|
|
break;
|
|
|
|
case RATE_MCS_HE_MSK:
|
|
|
|
rx_status->encoding = RX_ENC_HE;
|
|
|
|
rx_status->he_dcm =
|
|
|
|
!!(rate_n_flags & RATE_HE_DUAL_CARRIER_MODE_MSK);
|
|
|
|
break;
|
2023-01-09 13:07:22 +02:00
|
|
|
case RATE_MCS_EHT_MSK:
|
|
|
|
rx_status->encoding = RX_ENC_EHT;
|
|
|
|
break;
|
2022-09-06 16:42:08 +03:00
|
|
|
}
|
|
|
|
|
2022-09-06 16:42:07 +03:00
|
|
|
switch (format) {
|
|
|
|
case RATE_MCS_HT_MSK:
|
|
|
|
rx_status->encoding = RX_ENC_HT;
|
|
|
|
rx_status->rate_idx = RATE_HT_MCS_INDEX(rate_n_flags);
|
|
|
|
rx_status->enc_flags |= stbc << RX_ENC_FLAG_STBC_SHIFT;
|
|
|
|
break;
|
|
|
|
case RATE_MCS_VHT_MSK:
|
2022-09-06 16:42:08 +03:00
|
|
|
case RATE_MCS_HE_MSK:
|
2023-01-09 13:07:22 +02:00
|
|
|
case RATE_MCS_EHT_MSK:
|
2022-09-06 16:42:07 +03:00
|
|
|
rx_status->nss =
|
2022-09-06 16:42:08 +03:00
|
|
|
u32_get_bits(rate_n_flags, RATE_MCS_NSS_MSK) + 1;
|
2022-09-06 16:42:07 +03:00
|
|
|
rx_status->rate_idx = rate_n_flags & RATE_MCS_CODE_MSK;
|
|
|
|
rx_status->enc_flags |= stbc << RX_ENC_FLAG_STBC_SHIFT;
|
|
|
|
break;
|
|
|
|
default: {
|
|
|
|
int rate = iwl_mvm_legacy_hw_idx_to_mac80211_idx(rate_n_flags,
|
|
|
|
rx_status->band);
|
|
|
|
|
|
|
|
rx_status->rate_idx = rate;
|
|
|
|
|
2023-01-27 00:28:18 +02:00
|
|
|
if ((rate < 0 || rate > 0xFF)) {
|
2022-09-06 16:42:07 +03:00
|
|
|
rx_status->rate_idx = 0;
|
2023-01-27 00:28:18 +02:00
|
|
|
if (net_ratelimit())
|
|
|
|
IWL_ERR(mvm, "Invalid rate flags 0x%x, band %d,\n",
|
|
|
|
rate_n_flags, rx_status->band);
|
2022-11-02 16:59:51 +02:00
|
|
|
}
|
|
|
|
|
2022-09-06 16:42:07 +03:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-09-03 14:56:10 +02:00
|
|
|
void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
|
|
|
|
struct iwl_rx_cmd_buffer *rxb, int queue)
|
|
|
|
{
|
|
|
|
struct ieee80211_rx_status *rx_status;
|
|
|
|
struct iwl_rx_packet *pkt = rxb_addr(rxb);
|
|
|
|
struct iwl_rx_mpdu_desc *desc = (void *)pkt->data;
|
2018-02-05 12:54:36 +02:00
|
|
|
struct ieee80211_hdr *hdr;
|
2020-12-09 23:16:46 +02:00
|
|
|
u32 len;
|
2020-12-09 23:16:24 +02:00
|
|
|
u32 pkt_len = iwl_rx_packet_payload_len(pkt);
|
2015-09-03 14:56:10 +02:00
|
|
|
struct ieee80211_sta *sta = NULL;
|
2023-03-29 10:05:21 +03:00
|
|
|
struct ieee80211_link_sta *link_sta = NULL;
|
2015-09-03 14:56:10 +02:00
|
|
|
struct sk_buff *skb;
|
2022-09-06 16:42:07 +03:00
|
|
|
u8 crypt_len = 0;
|
2024-05-05 09:19:50 +03:00
|
|
|
u8 sta_id = le32_get_bits(desc->status, IWL_RX_MPDU_STATUS_STA_ID);
|
2018-02-05 12:54:36 +02:00
|
|
|
size_t desc_size;
|
2022-09-06 16:42:07 +03:00
|
|
|
struct iwl_mvm_rx_phy_data phy_data = {};
|
2021-10-17 16:25:58 +03:00
|
|
|
u32 format;
|
2015-09-03 14:56:10 +02:00
|
|
|
|
2017-10-25 11:40:24 +03:00
|
|
|
if (unlikely(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)))
|
|
|
|
return;
|
|
|
|
|
2020-12-09 23:16:46 +02:00
|
|
|
if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
|
|
|
|
desc_size = sizeof(*desc);
|
|
|
|
else
|
|
|
|
desc_size = IWL_RX_DESC_SIZE_V1;
|
|
|
|
|
|
|
|
if (unlikely(pkt_len < desc_size)) {
|
|
|
|
IWL_DEBUG_DROP(mvm, "Bad REPLY_RX_MPDU_CMD size\n");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2019-11-15 09:28:25 +02:00
|
|
|
if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
|
2022-09-06 16:42:07 +03:00
|
|
|
phy_data.rate_n_flags = le32_to_cpu(desc->v3.rate_n_flags);
|
|
|
|
phy_data.channel = desc->v3.channel;
|
|
|
|
phy_data.gp2_on_air_rise = le32_to_cpu(desc->v3.gp2_on_air_rise);
|
|
|
|
phy_data.energy_a = desc->v3.energy_a;
|
|
|
|
phy_data.energy_b = desc->v3.energy_b;
|
2018-07-02 17:16:48 +02:00
|
|
|
|
|
|
|
phy_data.d0 = desc->v3.phy_data0;
|
|
|
|
phy_data.d1 = desc->v3.phy_data1;
|
|
|
|
phy_data.d2 = desc->v3.phy_data2;
|
|
|
|
phy_data.d3 = desc->v3.phy_data3;
|
2023-03-05 14:16:25 +02:00
|
|
|
phy_data.eht_d4 = desc->phy_eht_data4;
|
|
|
|
phy_data.d5 = desc->v3.phy_data5;
|
2018-02-05 12:54:36 +02:00
|
|
|
} else {
|
2022-09-06 16:42:07 +03:00
|
|
|
phy_data.rate_n_flags = le32_to_cpu(desc->v1.rate_n_flags);
|
|
|
|
phy_data.channel = desc->v1.channel;
|
|
|
|
phy_data.gp2_on_air_rise = le32_to_cpu(desc->v1.gp2_on_air_rise);
|
|
|
|
phy_data.energy_a = desc->v1.energy_a;
|
|
|
|
phy_data.energy_b = desc->v1.energy_b;
|
2018-07-02 17:16:48 +02:00
|
|
|
|
|
|
|
phy_data.d0 = desc->v1.phy_data0;
|
|
|
|
phy_data.d1 = desc->v1.phy_data1;
|
|
|
|
phy_data.d2 = desc->v1.phy_data2;
|
|
|
|
phy_data.d3 = desc->v1.phy_data3;
|
2018-02-05 12:54:36 +02:00
|
|
|
}
|
2022-09-06 16:42:07 +03:00
|
|
|
|
2021-10-17 16:25:58 +03:00
|
|
|
if (iwl_fw_lookup_notif_ver(mvm->fw, LEGACY_GROUP,
|
|
|
|
REPLY_RX_MPDU_CMD, 0) < 4) {
|
2022-09-06 16:42:07 +03:00
|
|
|
phy_data.rate_n_flags = iwl_new_rate_from_v1(phy_data.rate_n_flags);
|
2021-10-17 16:25:58 +03:00
|
|
|
IWL_DEBUG_DROP(mvm, "Got old format rate, converting. New rate: 0x%x\n",
|
2022-09-06 16:42:07 +03:00
|
|
|
phy_data.rate_n_flags);
|
2021-10-17 16:25:58 +03:00
|
|
|
}
|
2022-09-06 16:42:07 +03:00
|
|
|
|
|
|
|
format = phy_data.rate_n_flags & RATE_MCS_MOD_TYPE_MSK;
|
2018-02-05 12:54:36 +02:00
|
|
|
|
2020-12-09 23:16:46 +02:00
|
|
|
len = le16_to_cpu(desc->mpdu_len);
|
2018-07-02 17:16:48 +02:00
|
|
|
|
2020-12-09 23:16:46 +02:00
|
|
|
if (unlikely(len + desc_size > pkt_len)) {
|
2020-12-09 23:16:24 +02:00
|
|
|
IWL_DEBUG_DROP(mvm, "FW lied about packet len\n");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2023-03-05 14:16:25 +02:00
|
|
|
phy_data.with_data = true;
|
2022-09-06 16:42:07 +03:00
|
|
|
phy_data.phy_info = le16_to_cpu(desc->phy_info);
|
2020-12-09 23:16:46 +02:00
|
|
|
phy_data.d4 = desc->phy_data4;
|
|
|
|
|
2018-02-05 12:54:36 +02:00
|
|
|
hdr = (void *)(pkt->data + desc_size);
|
2015-09-03 14:56:10 +02:00
|
|
|
/* Dont use dev_alloc_skb(), we'll have enough headroom once
|
|
|
|
* ieee80211_hdr pulled.
|
|
|
|
*/
|
|
|
|
skb = alloc_skb(128, GFP_ATOMIC);
|
|
|
|
if (!skb) {
|
|
|
|
IWL_ERR(mvm, "alloc_skb failed\n");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2018-02-20 16:32:36 +01:00
|
|
|
if (desc->mac_flags2 & IWL_RX_MPDU_MFLG2_PAD) {
|
|
|
|
/*
|
|
|
|
* If the device inserted padding it means that (it thought)
|
|
|
|
* the 802.11 header wasn't a multiple of 4 bytes long. In
|
|
|
|
* this case, reserve two bytes at the start of the SKB to
|
|
|
|
* align the payload properly in case we end up copying it.
|
|
|
|
*/
|
|
|
|
skb_reserve(skb, 2);
|
|
|
|
}
|
|
|
|
|
2015-09-03 14:56:10 +02:00
|
|
|
rx_status = IEEE80211_SKB_RXCB(skb);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Keep packets with CRC errors (and with overrun) for monitor mode
|
|
|
|
* (otherwise the firmware discards them) but mark them as bad.
|
|
|
|
*/
|
2020-09-30 19:19:49 +03:00
|
|
|
if (!(desc->status & cpu_to_le32(IWL_RX_MPDU_STATUS_CRC_OK)) ||
|
|
|
|
!(desc->status & cpu_to_le32(IWL_RX_MPDU_STATUS_OVERRUN_OK))) {
|
2015-09-03 14:56:10 +02:00
|
|
|
IWL_DEBUG_RX(mvm, "Bad CRC or FIFO: 0x%08X.\n",
|
2020-09-30 19:19:49 +03:00
|
|
|
le32_to_cpu(desc->status));
|
2015-09-03 14:56:10 +02:00
|
|
|
rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
|
|
|
|
}
|
2022-09-06 16:42:07 +03:00
|
|
|
|
2016-04-04 19:28:45 +03:00
|
|
|
/* set the preamble flag if appropriate */
|
2021-10-17 16:25:58 +03:00
|
|
|
if (format == RATE_MCS_CCK_MSK &&
|
2022-09-06 16:42:07 +03:00
|
|
|
phy_data.phy_info & IWL_RX_MPDU_PHY_SHORT_PREAMBLE)
|
2017-04-26 11:13:00 +02:00
|
|
|
rx_status->enc_flags |= RX_ENC_FLAG_SHORTPRE;
|
2016-04-04 19:28:45 +03:00
|
|
|
|
2022-09-06 16:42:07 +03:00
|
|
|
if (likely(!(phy_data.phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD))) {
|
2018-02-05 12:54:36 +02:00
|
|
|
u64 tsf_on_air_rise;
|
|
|
|
|
2019-07-12 15:03:48 +03:00
|
|
|
if (mvm->trans->trans_cfg->device_family >=
|
2019-11-15 09:28:25 +02:00
|
|
|
IWL_DEVICE_FAMILY_AX210)
|
2018-02-05 12:54:36 +02:00
|
|
|
tsf_on_air_rise = le64_to_cpu(desc->v3.tsf_on_air_rise);
|
|
|
|
else
|
|
|
|
tsf_on_air_rise = le64_to_cpu(desc->v1.tsf_on_air_rise);
|
|
|
|
|
|
|
|
rx_status->mactime = tsf_on_air_rise;
|
2016-04-04 19:28:45 +03:00
|
|
|
/* TSF as indicated by the firmware is at INA time */
|
|
|
|
rx_status->flag |= RX_FLAG_MACTIME_PLCP_START;
|
2018-03-04 14:21:49 +02:00
|
|
|
}
|
|
|
|
|
2019-08-01 14:00:24 +03:00
|
|
|
if (iwl_mvm_is_band_in_rx_supported(mvm)) {
|
|
|
|
u8 band = BAND_IN_RX_STATUS(desc->mac_phy_idx);
|
|
|
|
|
2024-04-15 11:54:36 +03:00
|
|
|
rx_status->band = iwl_mvm_nl80211_band_from_phy(band);
|
2019-08-01 14:00:24 +03:00
|
|
|
} else {
|
2022-09-06 16:42:07 +03:00
|
|
|
rx_status->band = phy_data.channel > 14 ? NL80211_BAND_5GHZ :
|
2019-08-01 14:00:24 +03:00
|
|
|
NL80211_BAND_2GHZ;
|
|
|
|
}
|
2016-04-04 19:28:45 +03:00
|
|
|
|
|
|
|
/* update aggregation data for monitor sake on default queue */
|
2022-09-06 16:42:07 +03:00
|
|
|
if (!queue && (phy_data.phy_info & IWL_RX_MPDU_PHY_AMPDU)) {
|
|
|
|
bool toggle_bit;
|
2016-04-04 19:28:45 +03:00
|
|
|
|
2022-09-06 16:42:07 +03:00
|
|
|
toggle_bit = phy_data.phy_info & IWL_RX_MPDU_PHY_AMPDU_TOGGLE;
|
2016-04-04 19:28:45 +03:00
|
|
|
rx_status->flag |= RX_FLAG_AMPDU_DETAILS;
|
2018-10-25 09:15:21 +02:00
|
|
|
/*
|
|
|
|
* Toggle is switched whenever new aggregation starts. Make
|
|
|
|
* sure ampdu_reference is never 0 so we can later use it to
|
|
|
|
* see if the frame was really part of an A-MPDU or not.
|
|
|
|
*/
|
2016-04-04 19:28:45 +03:00
|
|
|
if (toggle_bit != mvm->ampdu_toggle) {
|
|
|
|
mvm->ampdu_ref++;
|
2018-10-25 09:15:21 +02:00
|
|
|
if (mvm->ampdu_ref == 0)
|
|
|
|
mvm->ampdu_ref++;
|
2016-04-04 19:28:45 +03:00
|
|
|
mvm->ampdu_toggle = toggle_bit;
|
2023-03-05 14:16:35 +02:00
|
|
|
phy_data.first_subframe = true;
|
2016-04-04 19:28:45 +03:00
|
|
|
}
|
2018-11-08 09:51:56 +01:00
|
|
|
rx_status->ampdu_reference = mvm->ampdu_ref;
|
2016-04-04 19:28:45 +03:00
|
|
|
}
|
2015-09-03 14:56:10 +02:00
|
|
|
|
|
|
|
rcu_read_lock();
|
|
|
|
|
2020-09-30 19:19:49 +03:00
|
|
|
if (desc->status & cpu_to_le32(IWL_RX_MPDU_STATUS_SRC_STA_FOUND)) {
|
2024-05-05 09:19:50 +03:00
|
|
|
if (!WARN_ON_ONCE(sta_id >= mvm->fw->ucode_capa.num_stations)) {
|
|
|
|
sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
|
2015-09-03 14:56:10 +02:00
|
|
|
if (IS_ERR(sta))
|
|
|
|
sta = NULL;
|
2024-05-05 09:19:50 +03:00
|
|
|
link_sta = rcu_dereference(mvm->fw_id_to_link_sta[sta_id]);
|
2024-03-20 23:26:22 +02:00
|
|
|
|
|
|
|
if (sta && sta->valid_links && link_sta) {
|
|
|
|
rx_status->link_valid = 1;
|
|
|
|
rx_status->link_id = link_sta->link_id;
|
|
|
|
}
|
2015-09-03 14:56:10 +02:00
|
|
|
}
|
|
|
|
} else if (!is_multicast_ether_addr(hdr->addr2)) {
|
|
|
|
/*
|
|
|
|
* This is fine since we prevent two stations with the same
|
|
|
|
* address from being added.
|
|
|
|
*/
|
|
|
|
sta = ieee80211_find_sta_by_ifaddr(mvm->hw, hdr->addr2, NULL);
|
|
|
|
}
|
|
|
|
|
2022-09-06 16:42:07 +03:00
|
|
|
if (iwl_mvm_rx_crypto(mvm, sta, hdr, rx_status, phy_data.phy_info, desc,
|
2021-02-05 11:06:31 +02:00
|
|
|
le32_to_cpu(pkt->len_n_flags), queue,
|
|
|
|
&crypt_len)) {
|
|
|
|
kfree_skb(skb);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2022-09-06 16:42:07 +03:00
|
|
|
iwl_mvm_rx_fill_status(mvm, skb, &phy_data, queue);
|
|
|
|
|
2015-09-03 14:56:10 +02:00
|
|
|
if (sta) {
|
|
|
|
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
|
2016-02-28 17:12:21 +02:00
|
|
|
struct ieee80211_vif *tx_blocked_vif =
|
|
|
|
rcu_dereference(mvm->csa_tx_blocked_vif);
|
2016-03-20 16:23:41 +02:00
|
|
|
u8 baid = (u8)((le32_to_cpu(desc->reorder_data) &
|
|
|
|
IWL_RX_MPDU_REORDER_BAID_MASK) >>
|
|
|
|
IWL_RX_MPDU_REORDER_BAID_SHIFT);
|
2018-06-12 10:41:35 +03:00
|
|
|
struct iwl_fw_dbg_trigger_tlv *trig;
|
|
|
|
struct ieee80211_vif *vif = mvmsta->vif;
|
2015-09-03 14:56:10 +02:00
|
|
|
|
2018-04-12 16:15:07 +03:00
|
|
|
if (!mvm->tcm.paused && len >= sizeof(*hdr) &&
|
|
|
|
!is_multicast_ether_addr(hdr->addr1) &&
|
|
|
|
ieee80211_is_data(hdr->frame_control) &&
|
|
|
|
time_after(jiffies, mvm->tcm.ts + MVM_TCM_PERIOD))
|
|
|
|
schedule_delayed_work(&mvm->tcm.work, 0);
|
|
|
|
|
2015-09-03 14:56:10 +02:00
|
|
|
/*
|
|
|
|
* We have tx blocked stations (with CS bit). If we heard
|
|
|
|
* frames from a blocked station on a new channel we can
|
|
|
|
* TX to it again.
|
|
|
|
*/
|
2018-06-12 10:41:35 +03:00
|
|
|
if (unlikely(tx_blocked_vif) && tx_blocked_vif == vif) {
|
2016-02-28 17:12:21 +02:00
|
|
|
struct iwl_mvm_vif *mvmvif =
|
|
|
|
iwl_mvm_vif_from_mac80211(tx_blocked_vif);
|
2021-02-05 11:06:42 +02:00
|
|
|
struct iwl_rx_sta_csa rx_sta_csa = {
|
|
|
|
.all_sta_unblocked = true,
|
|
|
|
.vif = tx_blocked_vif,
|
|
|
|
};
|
2016-02-28 17:12:21 +02:00
|
|
|
|
|
|
|
if (mvmvif->csa_target_freq == rx_status->freq)
|
|
|
|
iwl_mvm_sta_modify_disable_tx_ap(mvm, sta,
|
|
|
|
false);
|
2021-02-05 11:06:42 +02:00
|
|
|
ieee80211_iterate_stations_atomic(mvm->hw,
|
|
|
|
iwl_mvm_rx_get_sta_block_tx,
|
|
|
|
&rx_sta_csa);
|
|
|
|
|
|
|
|
if (rx_sta_csa.all_sta_unblocked) {
|
|
|
|
RCU_INIT_POINTER(mvm->csa_tx_blocked_vif, NULL);
|
|
|
|
/* Unblock BCAST / MCAST station */
|
|
|
|
iwl_mvm_modify_all_sta_disable_tx(mvm, mvmvif, false);
|
2023-05-14 12:15:46 +03:00
|
|
|
cancel_delayed_work(&mvm->cs_tx_unblock_dwork);
|
2021-02-05 11:06:42 +02:00
|
|
|
}
|
2016-02-28 17:12:21 +02:00
|
|
|
}
|
2015-09-03 14:56:10 +02:00
|
|
|
|
2017-11-01 07:16:29 +02:00
|
|
|
rs_update_last_rssi(mvm, mvmsta, rx_status);
|
2015-09-03 14:56:10 +02:00
|
|
|
|
2018-06-12 10:41:35 +03:00
|
|
|
trig = iwl_fw_dbg_trigger_on(&mvm->fwrt,
|
|
|
|
ieee80211_vif_to_wdev(vif),
|
|
|
|
FW_DBG_TRIGGER_RSSI);
|
|
|
|
|
|
|
|
if (trig && ieee80211_is_beacon(hdr->frame_control)) {
|
2015-09-03 14:56:10 +02:00
|
|
|
struct iwl_fw_dbg_trigger_low_rssi *rssi_trig;
|
|
|
|
s32 rssi;
|
|
|
|
|
|
|
|
rssi_trig = (void *)trig->data;
|
|
|
|
rssi = le32_to_cpu(rssi_trig->rssi);
|
|
|
|
|
2018-06-12 10:41:35 +03:00
|
|
|
if (rx_status->signal < rssi)
|
2017-06-01 16:03:19 +02:00
|
|
|
iwl_fw_dbg_collect_trig(&mvm->fwrt, trig,
|
|
|
|
NULL);
|
2015-09-03 14:56:10 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
if (ieee80211_is_data(hdr->frame_control))
|
2020-09-26 00:30:50 +03:00
|
|
|
iwl_mvm_rx_csum(mvm, sta, skb, pkt);
|
2015-12-07 12:50:58 +02:00
|
|
|
|
2016-03-29 10:56:57 +03:00
|
|
|
if (iwl_mvm_is_dup(sta, queue, rx_status, hdr, desc)) {
|
2023-09-13 14:56:42 +03:00
|
|
|
IWL_DEBUG_DROP(mvm, "Dropping duplicate packet 0x%x\n",
|
|
|
|
le16_to_cpu(hdr->seq_ctrl));
|
2015-12-07 12:50:58 +02:00
|
|
|
kfree_skb(skb);
|
2017-02-13 13:36:31 +02:00
|
|
|
goto out;
|
2015-12-07 12:50:58 +02:00
|
|
|
}
|
2016-03-06 09:51:29 +02:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Our hardware de-aggregates AMSDUs but copies the mac header
|
|
|
|
* as it to the de-aggregated MPDUs. We need to turn off the
|
|
|
|
* AMSDU bit in the QoS control ourselves.
|
2017-02-14 14:58:21 +02:00
|
|
|
* In addition, HW reverses addr3 and addr4 - reverse it back.
|
2016-03-06 09:51:29 +02:00
|
|
|
*/
|
|
|
|
if ((desc->mac_flags2 & IWL_RX_MPDU_MFLG2_AMSDU) &&
|
|
|
|
!WARN_ON(!ieee80211_is_data_qos(hdr->frame_control))) {
|
|
|
|
u8 *qc = ieee80211_get_qos_ctl(hdr);
|
|
|
|
|
|
|
|
*qc &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT;
|
2017-01-31 14:36:10 +02:00
|
|
|
|
2019-07-12 15:03:48 +03:00
|
|
|
if (mvm->trans->trans_cfg->device_family ==
|
2017-12-19 11:09:41 +02:00
|
|
|
IWL_DEVICE_FAMILY_9000) {
|
|
|
|
iwl_mvm_flip_address(hdr->addr3);
|
2017-02-14 14:58:21 +02:00
|
|
|
|
2017-12-19 11:09:41 +02:00
|
|
|
if (ieee80211_has_a4(hdr->frame_control))
|
|
|
|
iwl_mvm_flip_address(hdr->addr4);
|
2017-02-14 14:58:21 +02:00
|
|
|
}
|
2016-03-06 09:51:29 +02:00
|
|
|
}
|
2017-02-02 12:51:39 +02:00
|
|
|
if (baid != IWL_RX_REORDER_DATA_INVALID_BAID) {
|
|
|
|
u32 reorder_data = le32_to_cpu(desc->reorder_data);
|
|
|
|
|
|
|
|
iwl_mvm_agg_rx_received(mvm, reorder_data, baid);
|
|
|
|
}
|
2024-05-05 09:19:50 +03:00
|
|
|
|
|
|
|
if (ieee80211_is_data(hdr->frame_control)) {
|
|
|
|
u8 sub_frame_idx = desc->amsdu_info &
|
|
|
|
IWL_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK;
|
|
|
|
|
|
|
|
/* 0 means not an A-MSDU, and 1 means a new A-MSDU */
|
|
|
|
if (!sub_frame_idx || sub_frame_idx == 1)
|
|
|
|
iwl_mvm_count_mpdu(mvmsta, sta_id, 1, false,
|
|
|
|
queue);
|
|
|
|
}
|
2015-09-03 14:56:10 +02:00
|
|
|
}
|
|
|
|
|
2016-04-04 19:28:45 +03:00
|
|
|
/* management stuff on default queue */
|
|
|
|
if (!queue) {
|
|
|
|
if (unlikely((ieee80211_is_beacon(hdr->frame_control) ||
|
|
|
|
ieee80211_is_probe_resp(hdr->frame_control)) &&
|
|
|
|
mvm->sched_scan_pass_all ==
|
|
|
|
SCHED_SCAN_PASS_ALL_ENABLED))
|
|
|
|
mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_FOUND;
|
|
|
|
|
|
|
|
if (unlikely(ieee80211_is_beacon(hdr->frame_control) ||
|
|
|
|
ieee80211_is_probe_resp(hdr->frame_control)))
|
2019-06-21 22:32:48 +02:00
|
|
|
rx_status->boottime_ns = ktime_get_boottime_ns();
|
2016-04-04 19:28:45 +03:00
|
|
|
}
|
2015-09-03 14:56:10 +02:00
|
|
|
|
2019-04-16 12:57:21 +03:00
|
|
|
if (iwl_mvm_create_skb(mvm, skb, hdr, len, crypt_len, rxb)) {
|
|
|
|
kfree_skb(skb);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2023-03-20 12:33:04 +02:00
|
|
|
if (!iwl_mvm_reorder(mvm, napi, queue, sta, skb, desc) &&
|
2023-04-18 12:28:11 +03:00
|
|
|
likely(!iwl_mvm_time_sync_frame(mvm, skb, hdr->addr2)) &&
|
2023-10-17 12:16:46 +03:00
|
|
|
likely(!iwl_mvm_mei_filter_scan(mvm, skb))) {
|
|
|
|
if (mvm->trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_9000 &&
|
|
|
|
(desc->mac_flags2 & IWL_RX_MPDU_MFLG2_AMSDU) &&
|
|
|
|
!(desc->amsdu_info & IWL_RX_MPDU_AMSDU_LAST_SUBFRAME))
|
|
|
|
rx_status->flag |= RX_FLAG_AMSDU_MORE;
|
|
|
|
|
2024-03-20 23:26:22 +02:00
|
|
|
iwl_mvm_pass_packet_to_mac80211(mvm, napi, skb, queue, sta);
|
2023-10-17 12:16:46 +03:00
|
|
|
}
|
2017-02-13 13:36:31 +02:00
|
|
|
out:
|
2015-12-06 14:58:08 +02:00
|
|
|
rcu_read_unlock();
|
2015-09-03 14:56:10 +02:00
|
|
|
}
|
2015-12-01 13:48:18 +02:00
|
|
|
|
2019-01-02 10:31:05 +01:00
|
|
|
void iwl_mvm_rx_monitor_no_data(struct iwl_mvm *mvm, struct napi_struct *napi,
|
|
|
|
struct iwl_rx_cmd_buffer *rxb, int queue)
|
2018-07-23 11:40:30 +03:00
|
|
|
{
|
|
|
|
struct ieee80211_rx_status *rx_status;
|
|
|
|
struct iwl_rx_packet *pkt = rxb_addr(rxb);
|
2023-01-27 00:28:15 +02:00
|
|
|
struct iwl_rx_no_data_ver_3 *desc = (void *)pkt->data;
|
2022-12-05 10:35:40 +02:00
|
|
|
u32 rssi;
|
|
|
|
u32 info_type;
|
2018-07-23 11:40:30 +03:00
|
|
|
struct ieee80211_sta *sta = NULL;
|
|
|
|
struct sk_buff *skb;
|
2022-12-05 10:35:40 +02:00
|
|
|
struct iwl_mvm_rx_phy_data phy_data;
|
2022-09-06 16:42:07 +03:00
|
|
|
u32 format;
|
2021-10-17 16:25:58 +03:00
|
|
|
|
2022-12-05 10:35:40 +02:00
|
|
|
if (unlikely(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)))
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (unlikely(iwl_rx_packet_payload_len(pkt) < sizeof(struct iwl_rx_no_data)))
|
|
|
|
return;
|
|
|
|
|
|
|
|
rssi = le32_to_cpu(desc->rssi);
|
|
|
|
info_type = le32_to_cpu(desc->info) & RX_NO_DATA_INFO_TYPE_MSK;
|
|
|
|
phy_data.d0 = desc->phy_info[0];
|
|
|
|
phy_data.d1 = desc->phy_info[1];
|
|
|
|
phy_data.phy_info = IWL_RX_MPDU_PHY_TSF_OVERLOAD;
|
|
|
|
phy_data.gp2_on_air_rise = le32_to_cpu(desc->on_air_rise_time);
|
|
|
|
phy_data.rate_n_flags = le32_to_cpu(desc->rate);
|
|
|
|
phy_data.energy_a = u32_get_bits(rssi, RX_NO_DATA_CHAIN_A_MSK);
|
|
|
|
phy_data.energy_b = u32_get_bits(rssi, RX_NO_DATA_CHAIN_B_MSK);
|
|
|
|
phy_data.channel = u32_get_bits(rssi, RX_NO_DATA_CHANNEL_MSK);
|
2023-03-05 14:16:25 +02:00
|
|
|
phy_data.with_data = false;
|
2023-06-13 15:57:16 +03:00
|
|
|
phy_data.rx_vec[0] = desc->rx_vec[0];
|
|
|
|
phy_data.rx_vec[1] = desc->rx_vec[1];
|
2022-12-05 10:35:40 +02:00
|
|
|
|
2021-10-17 16:25:58 +03:00
|
|
|
if (iwl_fw_lookup_notif_ver(mvm->fw, DATA_PATH_GROUP,
|
|
|
|
RX_NO_DATA_NOTIF, 0) < 2) {
|
|
|
|
IWL_DEBUG_DROP(mvm, "Got an old rate format. Old rate: 0x%x\n",
|
2022-09-06 16:42:07 +03:00
|
|
|
phy_data.rate_n_flags);
|
|
|
|
phy_data.rate_n_flags = iwl_new_rate_from_v1(phy_data.rate_n_flags);
|
2021-10-17 16:25:58 +03:00
|
|
|
IWL_DEBUG_DROP(mvm, " Rate after conversion to the new format: 0x%x\n",
|
2022-09-06 16:42:07 +03:00
|
|
|
phy_data.rate_n_flags);
|
2021-10-17 16:25:58 +03:00
|
|
|
}
|
2022-09-06 16:42:07 +03:00
|
|
|
|
|
|
|
format = phy_data.rate_n_flags & RATE_MCS_MOD_TYPE_MSK;
|
2018-07-23 11:40:30 +03:00
|
|
|
|
2023-01-27 00:28:15 +02:00
|
|
|
if (iwl_fw_lookup_notif_ver(mvm->fw, DATA_PATH_GROUP,
|
|
|
|
RX_NO_DATA_NOTIF, 0) >= 3) {
|
|
|
|
if (unlikely(iwl_rx_packet_payload_len(pkt) <
|
|
|
|
sizeof(struct iwl_rx_no_data_ver_3)))
|
|
|
|
/* invalid len for ver 3 */
|
|
|
|
return;
|
2023-06-13 15:57:16 +03:00
|
|
|
phy_data.rx_vec[2] = desc->rx_vec[2];
|
|
|
|
phy_data.rx_vec[3] = desc->rx_vec[3];
|
2023-01-27 00:28:15 +02:00
|
|
|
} else {
|
|
|
|
if (format == RATE_MCS_EHT_MSK)
|
|
|
|
/* no support for EHT before version 3 API */
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2018-07-23 11:40:30 +03:00
|
|
|
/* Dont use dev_alloc_skb(), we'll have enough headroom once
|
|
|
|
* ieee80211_hdr pulled.
|
|
|
|
*/
|
|
|
|
skb = alloc_skb(128, GFP_ATOMIC);
|
|
|
|
if (!skb) {
|
|
|
|
IWL_ERR(mvm, "alloc_skb failed\n");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
rx_status = IEEE80211_SKB_RXCB(skb);
|
|
|
|
|
|
|
|
/* 0-length PSDU */
|
|
|
|
rx_status->flag |= RX_FLAG_NO_PSDU;
|
2019-01-02 10:31:05 +01:00
|
|
|
|
|
|
|
switch (info_type) {
|
|
|
|
case RX_NO_DATA_INFO_TYPE_NDP:
|
|
|
|
rx_status->zero_length_psdu_type =
|
|
|
|
IEEE80211_RADIOTAP_ZERO_LEN_PSDU_SOUNDING;
|
|
|
|
break;
|
|
|
|
case RX_NO_DATA_INFO_TYPE_MU_UNMATCHED:
|
2023-03-05 14:16:23 +02:00
|
|
|
case RX_NO_DATA_INFO_TYPE_TB_UNMATCHED:
|
2019-01-02 10:31:05 +01:00
|
|
|
rx_status->zero_length_psdu_type =
|
|
|
|
IEEE80211_RADIOTAP_ZERO_LEN_PSDU_NOT_CAPTURED;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
rx_status->zero_length_psdu_type =
|
|
|
|
IEEE80211_RADIOTAP_ZERO_LEN_PSDU_VENDOR;
|
|
|
|
break;
|
|
|
|
}
|
2018-07-23 11:40:30 +03:00
|
|
|
|
2022-09-06 16:42:07 +03:00
|
|
|
rx_status->band = phy_data.channel > 14 ? NL80211_BAND_5GHZ :
|
2018-07-23 11:40:30 +03:00
|
|
|
NL80211_BAND_2GHZ;
|
|
|
|
|
2022-09-06 16:42:07 +03:00
|
|
|
iwl_mvm_rx_fill_status(mvm, skb, &phy_data, queue);
|
2021-10-17 16:25:58 +03:00
|
|
|
|
2023-01-27 00:28:17 +02:00
|
|
|
/* no more radio tap info should be put after this point.
|
|
|
|
*
|
|
|
|
* We mark it as mac header, for upper layers to know where
|
|
|
|
* all radio tap header ends.
|
|
|
|
*/
|
2023-03-05 14:16:16 +02:00
|
|
|
skb_reset_mac_header(skb);
|
2023-01-27 00:28:17 +02:00
|
|
|
|
2022-09-06 16:42:07 +03:00
|
|
|
/*
|
|
|
|
* Override the nss from the rx_vec since the rate_n_flags has
|
|
|
|
* only 2 bits for the nss which gives a max of 4 ss but there
|
|
|
|
* may be up to 8 spatial streams.
|
|
|
|
*/
|
|
|
|
switch (format) {
|
|
|
|
case RATE_MCS_VHT_MSK:
|
2018-10-24 18:37:46 +03:00
|
|
|
rx_status->nss =
|
|
|
|
le32_get_bits(desc->rx_vec[0],
|
|
|
|
RX_NO_DATA_RX_VEC0_VHT_NSTS_MSK) + 1;
|
2022-09-06 16:42:07 +03:00
|
|
|
break;
|
|
|
|
case RATE_MCS_HE_MSK:
|
2018-10-24 18:37:46 +03:00
|
|
|
rx_status->nss =
|
|
|
|
le32_get_bits(desc->rx_vec[0],
|
|
|
|
RX_NO_DATA_RX_VEC0_HE_NSTS_MSK) + 1;
|
2022-09-06 16:42:07 +03:00
|
|
|
break;
|
2023-01-27 00:28:15 +02:00
|
|
|
case RATE_MCS_EHT_MSK:
|
|
|
|
rx_status->nss =
|
|
|
|
le32_get_bits(desc->rx_vec[2],
|
|
|
|
RX_NO_DATA_RX_VEC2_EHT_NSTS_MSK) + 1;
|
2018-07-23 11:40:30 +03:00
|
|
|
}
|
|
|
|
|
2022-09-06 16:42:07 +03:00
|
|
|
rcu_read_lock();
|
2018-12-10 10:40:45 +01:00
|
|
|
ieee80211_rx_napi(mvm->hw, sta, skb, napi);
|
2018-07-23 11:40:30 +03:00
|
|
|
rcu_read_unlock();
|
|
|
|
}
|
2019-06-24 13:57:34 +03:00
|
|
|
|
2016-02-28 15:41:47 +02:00
|
|
|
void iwl_mvm_rx_frame_release(struct iwl_mvm *mvm, struct napi_struct *napi,
|
2015-12-01 13:48:18 +02:00
|
|
|
struct iwl_rx_cmd_buffer *rxb, int queue)
|
|
|
|
{
|
2016-02-28 15:41:47 +02:00
|
|
|
struct iwl_rx_packet *pkt = rxb_addr(rxb);
|
|
|
|
struct iwl_frame_release *release = (void *)pkt->data;
|
|
|
|
|
2021-01-17 13:10:29 +02:00
|
|
|
if (unlikely(iwl_rx_packet_payload_len(pkt) < sizeof(*release)))
|
|
|
|
return;
|
|
|
|
|
2019-06-24 13:57:34 +03:00
|
|
|
iwl_mvm_release_frames_from_notif(mvm, napi, release->baid,
|
2019-06-27 21:50:24 +03:00
|
|
|
le16_to_cpu(release->nssn),
|
2023-10-17 12:16:47 +03:00
|
|
|
queue);
|
2015-12-01 13:48:18 +02:00
|
|
|
}
|
2019-07-04 17:24:47 +02:00
|
|
|
|
|
|
|
void iwl_mvm_rx_bar_frame_release(struct iwl_mvm *mvm, struct napi_struct *napi,
|
|
|
|
struct iwl_rx_cmd_buffer *rxb, int queue)
|
|
|
|
{
|
|
|
|
struct iwl_rx_packet *pkt = rxb_addr(rxb);
|
|
|
|
struct iwl_bar_frame_release *release = (void *)pkt->data;
|
|
|
|
unsigned int baid = le32_get_bits(release->ba_info,
|
|
|
|
IWL_BAR_FRAME_RELEASE_BAID_MASK);
|
|
|
|
unsigned int nssn = le32_get_bits(release->ba_info,
|
|
|
|
IWL_BAR_FRAME_RELEASE_NSSN_MASK);
|
|
|
|
unsigned int sta_id = le32_get_bits(release->sta_tid,
|
|
|
|
IWL_BAR_FRAME_RELEASE_STA_MASK);
|
|
|
|
unsigned int tid = le32_get_bits(release->sta_tid,
|
|
|
|
IWL_BAR_FRAME_RELEASE_TID_MASK);
|
|
|
|
struct iwl_mvm_baid_data *baid_data;
|
|
|
|
|
2021-01-17 13:10:29 +02:00
|
|
|
if (unlikely(iwl_rx_packet_payload_len(pkt) < sizeof(*release)))
|
|
|
|
return;
|
|
|
|
|
2019-07-04 17:24:47 +02:00
|
|
|
if (WARN_ON_ONCE(baid == IWL_RX_REORDER_DATA_INVALID_BAID ||
|
|
|
|
baid >= ARRAY_SIZE(mvm->baid_map)))
|
|
|
|
return;
|
|
|
|
|
|
|
|
rcu_read_lock();
|
|
|
|
baid_data = rcu_dereference(mvm->baid_map[baid]);
|
|
|
|
if (!baid_data) {
|
|
|
|
IWL_DEBUG_RX(mvm,
|
|
|
|
"Got valid BAID %d but not allocated, invalid BAR release!\n",
|
|
|
|
baid);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2023-04-16 15:47:34 +03:00
|
|
|
if (WARN(tid != baid_data->tid || sta_id > IWL_MVM_STATION_COUNT_MAX ||
|
|
|
|
!(baid_data->sta_mask & BIT(sta_id)),
|
|
|
|
"baid 0x%x is mapped to sta_mask:0x%x tid:%d, but BAR release received for sta:%d tid:%d\n",
|
|
|
|
baid, baid_data->sta_mask, baid_data->tid, sta_id,
|
2019-07-04 17:24:47 +02:00
|
|
|
tid))
|
|
|
|
goto out;
|
|
|
|
|
2023-09-13 14:56:51 +03:00
|
|
|
IWL_DEBUG_DROP(mvm, "Received a BAR, expect packet loss: nssn %d\n",
|
|
|
|
nssn);
|
|
|
|
|
2023-10-17 12:16:47 +03:00
|
|
|
iwl_mvm_release_frames_from_notif(mvm, napi, baid, nssn, queue);
|
2019-07-04 17:24:47 +02:00
|
|
|
out:
|
|
|
|
rcu_read_unlock();
|
|
|
|
}
|