wifi: iwlwifi: use bc entries instead of bc table also for pre-ax210

iwlagn_scd_bc_tbl is used for pre-ax210 devices,
and iwl_gen3_bc_tbl_entry is used for ax210 and on. But there is no
difference between the the 22000 version and the AX210+ one.

In order to unify the two, as first step make iwlagn_scd_bc_tbl an entry
as well, and adjust the code. In a later patch both structures will be
unified.

Signed-off-by: Miri Korenblit <miriam.rachel.korenblit@intel.com>
Link: https://patch.msgid.link/20250511195137.645cd82ebf48.Iaa7e88179372d60ef31157e379737b5babe54012@changeid
This commit is contained in:
Miri Korenblit 2025-05-11 19:53:14 +03:00
parent f74cb4d02c
commit 6204d5130a
4 changed files with 16 additions and 13 deletions

View file

@ -717,7 +717,7 @@ struct iwl_tfh_tfd {
/* Fixed (non-configurable) rx data from phy */ /* Fixed (non-configurable) rx data from phy */
/** /**
* struct iwlagn_scd_bc_tbl - scheduler byte count table * struct iwlagn_scd_bc_tbl_entry - scheduler byte count table entry
* base physical address provided by SCD_DRAM_BASE_ADDR * base physical address provided by SCD_DRAM_BASE_ADDR
* For devices up to 22000: * For devices up to 22000:
* @tfd_offset: * @tfd_offset:
@ -729,8 +729,8 @@ struct iwl_tfh_tfd {
* 12-13 - number of 64 byte chunks * 12-13 - number of 64 byte chunks
* 14-16 - reserved * 14-16 - reserved
*/ */
struct iwlagn_scd_bc_tbl { struct iwlagn_scd_bc_tbl_entry {
__le16 tfd_offset[TFD_QUEUE_BC_SIZE]; __le16 tfd_offset;
} __packed; } __packed;
/** /**

View file

@ -3839,7 +3839,8 @@ iwl_trans_pcie_alloc(struct pci_dev *pdev,
trans_pcie->txqs.bc_tbl_size = trans_pcie->txqs.bc_tbl_size =
sizeof(struct iwl_gen3_bc_tbl_entry) * TFD_QUEUE_BC_SIZE_AX210; sizeof(struct iwl_gen3_bc_tbl_entry) * TFD_QUEUE_BC_SIZE_AX210;
else else
trans_pcie->txqs.bc_tbl_size = sizeof(struct iwlagn_scd_bc_tbl); trans_pcie->txqs.bc_tbl_size =
sizeof(struct iwlagn_scd_bc_tbl_entry) * TFD_QUEUE_BC_SIZE;
/* /*
* For gen2 devices, we use a single allocation for each byte-count * For gen2 devices, we use a single allocation for each byte-count
* table, but they're pretty small (1k) so use a DMA pool that we * table, but they're pretty small (1k) so use a DMA pool that we

View file

@ -587,12 +587,12 @@ static void iwl_pcie_gen2_update_byte_tbl(struct iwl_trans *trans,
bc_ent = cpu_to_le16(len | (num_fetch_chunks << 14)); bc_ent = cpu_to_le16(len | (num_fetch_chunks << 14));
scd_bc_tbl_gen3[idx].tfd_offset = bc_ent; scd_bc_tbl_gen3[idx].tfd_offset = bc_ent;
} else { } else {
struct iwlagn_scd_bc_tbl *scd_bc_tbl = txq->bc_tbl.addr; struct iwlagn_scd_bc_tbl_entry *scd_bc_tbl = txq->bc_tbl.addr;
len = DIV_ROUND_UP(len, 4); len = DIV_ROUND_UP(len, 4);
WARN_ON(len > 0xFFF); WARN_ON(len > 0xFFF);
bc_ent = cpu_to_le16(len | (num_fetch_chunks << 12)); bc_ent = cpu_to_le16(len | (num_fetch_chunks << 12));
scd_bc_tbl->tfd_offset[idx] = bc_ent; scd_bc_tbl[idx].tfd_offset = bc_ent;
} }
} }

View file

@ -796,6 +796,8 @@ error:
return -ENOMEM; return -ENOMEM;
} }
#define BC_TABLE_SIZE (sizeof(struct iwlagn_scd_bc_tbl_entry) * TFD_QUEUE_BC_SIZE)
/* /*
* iwl_pcie_tx_alloc - allocate TX context * iwl_pcie_tx_alloc - allocate TX context
* Allocate all Tx DMA structures and initialize them * Allocate all Tx DMA structures and initialize them
@ -810,7 +812,7 @@ static int iwl_pcie_tx_alloc(struct iwl_trans *trans)
if (WARN_ON(trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)) if (WARN_ON(trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210))
return -EINVAL; return -EINVAL;
bc_tbls_size *= sizeof(struct iwlagn_scd_bc_tbl); bc_tbls_size *= BC_TABLE_SIZE;
/*It is not allowed to alloc twice, so warn when this happens. /*It is not allowed to alloc twice, so warn when this happens.
* We cannot rely on the previous allocation, so free and fail */ * We cannot rely on the previous allocation, so free and fail */
@ -2065,7 +2067,7 @@ static void iwl_txq_gen1_update_byte_cnt_tbl(struct iwl_trans *trans,
int num_tbs) int num_tbs)
{ {
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwlagn_scd_bc_tbl *scd_bc_tbl; struct iwlagn_scd_bc_tbl_entry *scd_bc_tbl;
int write_ptr = txq->write_ptr; int write_ptr = txq->write_ptr;
int txq_id = txq->id; int txq_id = txq->id;
u8 sec_ctl = 0; u8 sec_ctl = 0;
@ -2099,10 +2101,10 @@ static void iwl_txq_gen1_update_byte_cnt_tbl(struct iwl_trans *trans,
bc_ent = cpu_to_le16(len | (sta_id << 12)); bc_ent = cpu_to_le16(len | (sta_id << 12));
scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent; scd_bc_tbl[txq_id * BC_TABLE_SIZE + write_ptr].tfd_offset = bc_ent;
if (write_ptr < TFD_QUEUE_SIZE_BC_DUP) if (write_ptr < TFD_QUEUE_SIZE_BC_DUP)
scd_bc_tbl[txq_id].tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] = scd_bc_tbl[txq_id * BC_TABLE_SIZE + TFD_QUEUE_SIZE_MAX + write_ptr].tfd_offset =
bc_ent; bc_ent;
} }
@ -2312,7 +2314,7 @@ static void iwl_txq_gen1_inval_byte_cnt_tbl(struct iwl_trans *trans,
int read_ptr) int read_ptr)
{ {
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwlagn_scd_bc_tbl *scd_bc_tbl = trans_pcie->txqs.scd_bc_tbls.addr; struct iwlagn_scd_bc_tbl_entry *scd_bc_tbl = trans_pcie->txqs.scd_bc_tbls.addr;
int txq_id = txq->id; int txq_id = txq->id;
u8 sta_id = 0; u8 sta_id = 0;
__le16 bc_ent; __le16 bc_ent;
@ -2326,10 +2328,10 @@ static void iwl_txq_gen1_inval_byte_cnt_tbl(struct iwl_trans *trans,
bc_ent = cpu_to_le16(1 | (sta_id << 12)); bc_ent = cpu_to_le16(1 | (sta_id << 12));
scd_bc_tbl[txq_id].tfd_offset[read_ptr] = bc_ent; scd_bc_tbl[txq_id * BC_TABLE_SIZE + read_ptr].tfd_offset = bc_ent;
if (read_ptr < TFD_QUEUE_SIZE_BC_DUP) if (read_ptr < TFD_QUEUE_SIZE_BC_DUP)
scd_bc_tbl[txq_id].tfd_offset[TFD_QUEUE_SIZE_MAX + read_ptr] = scd_bc_tbl[txq_id * BC_TABLE_SIZE + TFD_QUEUE_SIZE_MAX + read_ptr].tfd_offset =
bc_ent; bc_ent;
} }