wifi: ath12k: alloc REO queue per station

In MLO case, all link peers share the same REO queue, so the queue
should be allocated only once, currently this is done by checking
primary_link flag in ath12k_dp_rx_peer_tid_setup(). However, the
check not only avoids duplicate allocation, but also bypasses sending
queue configuration to firmware for non-primary links. In an upcoming
patch, changes will be added to make this check no-ops for WCN7850,
as WCN7850 firmware needs to be explicitly notified each link peer's
queue configuration. That said, the duplicate allocation would arise
again after that change, hence it needs to be resolved before hand.

Since all link peers share the same queue, it should be allocated per
MLD peer, not per link peer. So change to do allocation once and save
it in MLD peer, link peers can simply get queue configuration from
there.

Also relocate ath12k_reoq_buf structure to core.h to avoid circular
dependency.

Tested-on: WCN7850 hw2.0 PCI WLAN.HMT.1.0.c5-00481-QCAHMTSWPL_V1.0_V2.0_SILICONZ-3
Tested-on: WCN7850 hw2.0 PCI WLAN.HMT.1.1.c5-00284-QCAHMTSWPL_V1.0_V2.0_SILICONZ-1
Tested-on: QCN9274 hw2.0 PCI WLAN.WBE.1.3.1-00209-QCAHKSWPL_SILICONZ-1

Signed-off-by: Baochen Qiang <quic_bqiang@quicinc.com>
Link: https://patch.msgid.link/20250409-ath12k-wcn7850-mlo-support-v2-7-3801132ca2c3@quicinc.com
Signed-off-by: Jeff Johnson <jeff.johnson@oss.qualcomm.com>
This commit is contained in:
Baochen Qiang 2025-04-09 10:26:40 +08:00 committed by Jeff Johnson
parent 3aba3a1422
commit 3b9cbce6fd
3 changed files with 65 additions and 44 deletions

View file

@ -535,6 +535,12 @@ struct ath12k_link_sta {
u8 link_idx; u8 link_idx;
}; };
struct ath12k_reoq_buf {
void *vaddr;
dma_addr_t paddr_aligned;
u32 size;
};
struct ath12k_sta { struct ath12k_sta {
struct ath12k_vif *ahvif; struct ath12k_vif *ahvif;
enum hal_pn_type pn_type; enum hal_pn_type pn_type;
@ -547,6 +553,8 @@ struct ath12k_sta {
u8 num_peer; u8 num_peer;
enum ieee80211_sta_state state; enum ieee80211_sta_state state;
struct ath12k_reoq_buf reoq_bufs[IEEE80211_NUM_TIDS + 1];
}; };
#define ATH12K_HALF_20MHZ_BW 10 #define ATH12K_HALF_20MHZ_BW 10

View file

@ -929,17 +929,66 @@ static int ath12k_peer_rx_tid_reo_update(struct ath12k *ar,
return 0; return 0;
} }
static int ath12k_dp_rx_assign_reoq(struct ath12k_base *ab,
struct ath12k_sta *ahsta,
struct ath12k_dp_rx_tid *rx_tid,
u16 ssn, enum hal_pn_type pn_type)
{
u32 ba_win_sz = rx_tid->ba_win_sz;
struct ath12k_reoq_buf *buf;
void *vaddr, *vaddr_aligned;
dma_addr_t paddr_aligned;
u8 tid = rx_tid->tid;
u32 hw_desc_sz;
int ret;
buf = &ahsta->reoq_bufs[tid];
if (!buf->vaddr) {
/* TODO: Optimize the memory allocation for qos tid based on
* the actual BA window size in REO tid update path.
*/
if (tid == HAL_DESC_REO_NON_QOS_TID)
hw_desc_sz = ath12k_hal_reo_qdesc_size(ba_win_sz, tid);
else
hw_desc_sz = ath12k_hal_reo_qdesc_size(DP_BA_WIN_SZ_MAX, tid);
vaddr = kzalloc(hw_desc_sz + HAL_LINK_DESC_ALIGN - 1, GFP_ATOMIC);
if (!vaddr)
return -ENOMEM;
vaddr_aligned = PTR_ALIGN(vaddr, HAL_LINK_DESC_ALIGN);
ath12k_hal_reo_qdesc_setup(vaddr_aligned, tid, ba_win_sz,
ssn, pn_type);
paddr_aligned = dma_map_single(ab->dev, vaddr_aligned, hw_desc_sz,
DMA_BIDIRECTIONAL);
ret = dma_mapping_error(ab->dev, paddr_aligned);
if (ret) {
kfree(vaddr);
return ret;
}
buf->vaddr = vaddr;
buf->paddr_aligned = paddr_aligned;
buf->size = hw_desc_sz;
}
rx_tid->qbuf = *buf;
rx_tid->active = true;
return 0;
}
int ath12k_dp_rx_peer_tid_setup(struct ath12k *ar, const u8 *peer_mac, int vdev_id, int ath12k_dp_rx_peer_tid_setup(struct ath12k *ar, const u8 *peer_mac, int vdev_id,
u8 tid, u32 ba_win_sz, u16 ssn, u8 tid, u32 ba_win_sz, u16 ssn,
enum hal_pn_type pn_type) enum hal_pn_type pn_type)
{ {
struct ath12k_base *ab = ar->ab; struct ath12k_base *ab = ar->ab;
struct ath12k_dp *dp = &ab->dp; struct ath12k_dp *dp = &ab->dp;
struct hal_rx_reo_queue *addr_aligned;
struct ath12k_peer *peer; struct ath12k_peer *peer;
struct ath12k_sta *ahsta;
struct ath12k_dp_rx_tid *rx_tid; struct ath12k_dp_rx_tid *rx_tid;
u32 hw_desc_sz;
void *vaddr;
dma_addr_t paddr_aligned; dma_addr_t paddr_aligned;
int ret; int ret;
@ -972,9 +1021,9 @@ int ath12k_dp_rx_peer_tid_setup(struct ath12k *ar, const u8 *peer_mac, int vdev_
} }
rx_tid = &peer->rx_tid[tid]; rx_tid = &peer->rx_tid[tid];
paddr_aligned = rx_tid->qbuf.paddr_aligned;
/* Update the tid queue if it is already setup */ /* Update the tid queue if it is already setup */
if (rx_tid->active) { if (rx_tid->active) {
paddr_aligned = rx_tid->qbuf.paddr_aligned;
ret = ath12k_peer_rx_tid_reo_update(ar, peer, rx_tid, ret = ath12k_peer_rx_tid_reo_update(ar, peer, rx_tid,
ba_win_sz, ssn, true); ba_win_sz, ssn, true);
spin_unlock_bh(&ab->base_lock); spin_unlock_bh(&ab->base_lock);
@ -1002,39 +1051,14 @@ int ath12k_dp_rx_peer_tid_setup(struct ath12k *ar, const u8 *peer_mac, int vdev_
rx_tid->ba_win_sz = ba_win_sz; rx_tid->ba_win_sz = ba_win_sz;
/* TODO: Optimize the memory allocation for qos tid based on ahsta = ath12k_sta_to_ahsta(peer->sta);
* the actual BA window size in REO tid update path. ret = ath12k_dp_rx_assign_reoq(ab, ahsta, rx_tid, ssn, pn_type);
*/
if (tid == HAL_DESC_REO_NON_QOS_TID)
hw_desc_sz = ath12k_hal_reo_qdesc_size(ba_win_sz, tid);
else
hw_desc_sz = ath12k_hal_reo_qdesc_size(DP_BA_WIN_SZ_MAX, tid);
vaddr = kzalloc(hw_desc_sz + HAL_LINK_DESC_ALIGN - 1, GFP_ATOMIC);
if (!vaddr) {
spin_unlock_bh(&ab->base_lock);
return -ENOMEM;
}
addr_aligned = PTR_ALIGN(vaddr, HAL_LINK_DESC_ALIGN);
ath12k_hal_reo_qdesc_setup(addr_aligned, tid, ba_win_sz,
ssn, pn_type);
paddr_aligned = dma_map_single(ab->dev, addr_aligned, hw_desc_sz,
DMA_BIDIRECTIONAL);
ret = dma_mapping_error(ab->dev, paddr_aligned);
if (ret) { if (ret) {
spin_unlock_bh(&ab->base_lock); spin_unlock_bh(&ab->base_lock);
goto err_mem_free; ath12k_warn(ab, "failed to assign reoq buf for rx tid %u\n", tid);
return ret;
} }
rx_tid->qbuf.vaddr = vaddr;
rx_tid->qbuf.paddr_aligned = paddr_aligned;
rx_tid->qbuf.size = hw_desc_sz;
rx_tid->active = true;
if (ab->hw_params->reoq_lut_support) { if (ab->hw_params->reoq_lut_support) {
/* Update the REO queue LUT at the corresponding peer id /* Update the REO queue LUT at the corresponding peer id
* and tid with qaddr. * and tid with qaddr.
@ -1054,11 +1078,6 @@ int ath12k_dp_rx_peer_tid_setup(struct ath12k *ar, const u8 *peer_mac, int vdev_
ba_win_sz); ba_win_sz);
} }
return ret;
err_mem_free:
kfree(vaddr);
return ret; return ret;
} }

View file

@ -12,12 +12,6 @@
#define DP_MAX_NWIFI_HDR_LEN 30 #define DP_MAX_NWIFI_HDR_LEN 30
struct ath12k_reoq_buf {
void *vaddr;
dma_addr_t paddr_aligned;
u32 size;
};
struct ath12k_dp_rx_tid { struct ath12k_dp_rx_tid {
u8 tid; u8 tid;
u32 ba_win_sz; u32 ba_win_sz;