2022-05-06 11:12:59 -07:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-only
|
|
|
|
/*
|
|
|
|
* Copyright (c) 2021, MediaTek Inc.
|
|
|
|
* Copyright (c) 2021-2022, Intel Corporation.
|
|
|
|
*
|
|
|
|
* Authors:
|
|
|
|
* Amir Hanania <amir.hanania@intel.com>
|
|
|
|
* Haijun Liu <haijun.liu@mediatek.com>
|
|
|
|
* Moises Veleta <moises.veleta@intel.com>
|
|
|
|
* Ricardo Martinez <ricardo.martinez@linux.intel.com>
|
|
|
|
* Sreehari Kancharla <sreehari.kancharla@intel.com>
|
|
|
|
*
|
|
|
|
* Contributors:
|
|
|
|
* Andy Shevchenko <andriy.shevchenko@linux.intel.com>
|
|
|
|
* Chiranjeevi Rapolu <chiranjeevi.rapolu@intel.com>
|
|
|
|
* Eliot Lee <eliot.lee@intel.com>
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/bits.h>
|
|
|
|
#include <linux/bitops.h>
|
|
|
|
#include <linux/delay.h>
|
|
|
|
#include <linux/device.h>
|
|
|
|
#include <linux/dmapool.h>
|
|
|
|
#include <linux/dma-mapping.h>
|
|
|
|
#include <linux/dma-direction.h>
|
|
|
|
#include <linux/gfp.h>
|
|
|
|
#include <linux/io.h>
|
|
|
|
#include <linux/io-64-nonatomic-lo-hi.h>
|
|
|
|
#include <linux/iopoll.h>
|
|
|
|
#include <linux/irqreturn.h>
|
|
|
|
#include <linux/kernel.h>
|
|
|
|
#include <linux/kthread.h>
|
|
|
|
#include <linux/list.h>
|
|
|
|
#include <linux/netdevice.h>
|
|
|
|
#include <linux/pci.h>
|
2022-05-06 11:13:08 -07:00
|
|
|
#include <linux/pm_runtime.h>
|
2022-05-06 11:12:59 -07:00
|
|
|
#include <linux/sched.h>
|
|
|
|
#include <linux/skbuff.h>
|
|
|
|
#include <linux/slab.h>
|
|
|
|
#include <linux/spinlock.h>
|
|
|
|
#include <linux/types.h>
|
|
|
|
#include <linux/wait.h>
|
|
|
|
#include <linux/workqueue.h>
|
|
|
|
|
|
|
|
#include "t7xx_cldma.h"
|
|
|
|
#include "t7xx_hif_cldma.h"
|
|
|
|
#include "t7xx_mhccif.h"
|
|
|
|
#include "t7xx_pci.h"
|
|
|
|
#include "t7xx_pcie_mac.h"
|
2022-05-06 11:13:01 -07:00
|
|
|
#include "t7xx_port_proxy.h"
|
2022-05-06 11:12:59 -07:00
|
|
|
#include "t7xx_reg.h"
|
|
|
|
#include "t7xx_state_monitor.h"
|
|
|
|
|
|
|
|
#define MAX_TX_BUDGET 16
|
|
|
|
#define MAX_RX_BUDGET 16
|
|
|
|
|
|
|
|
#define CHECK_Q_STOP_TIMEOUT_US 1000000
|
|
|
|
#define CHECK_Q_STOP_STEP_US 10000
|
|
|
|
|
|
|
|
static void md_cd_queue_struct_reset(struct cldma_queue *queue, struct cldma_ctrl *md_ctrl,
|
|
|
|
enum mtk_txrx tx_rx, unsigned int index)
|
|
|
|
{
|
|
|
|
queue->dir = tx_rx;
|
|
|
|
queue->index = index;
|
|
|
|
queue->md_ctrl = md_ctrl;
|
|
|
|
queue->tr_ring = NULL;
|
|
|
|
queue->tr_done = NULL;
|
|
|
|
queue->tx_next = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void md_cd_queue_struct_init(struct cldma_queue *queue, struct cldma_ctrl *md_ctrl,
|
|
|
|
enum mtk_txrx tx_rx, unsigned int index)
|
|
|
|
{
|
|
|
|
md_cd_queue_struct_reset(queue, md_ctrl, tx_rx, index);
|
|
|
|
init_waitqueue_head(&queue->req_wq);
|
|
|
|
spin_lock_init(&queue->ring_lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void t7xx_cldma_gpd_set_data_ptr(struct cldma_gpd *gpd, dma_addr_t data_ptr)
|
|
|
|
{
|
|
|
|
gpd->data_buff_bd_ptr_h = cpu_to_le32(upper_32_bits(data_ptr));
|
|
|
|
gpd->data_buff_bd_ptr_l = cpu_to_le32(lower_32_bits(data_ptr));
|
|
|
|
}
|
|
|
|
|
|
|
|
static void t7xx_cldma_gpd_set_next_ptr(struct cldma_gpd *gpd, dma_addr_t next_ptr)
|
|
|
|
{
|
|
|
|
gpd->next_gpd_ptr_h = cpu_to_le32(upper_32_bits(next_ptr));
|
|
|
|
gpd->next_gpd_ptr_l = cpu_to_le32(lower_32_bits(next_ptr));
|
|
|
|
}
|
|
|
|
|
|
|
|
static int t7xx_cldma_alloc_and_map_skb(struct cldma_ctrl *md_ctrl, struct cldma_request *req,
|
2022-05-19 11:21:08 +08:00
|
|
|
size_t size, gfp_t gfp_mask)
|
2022-05-06 11:12:59 -07:00
|
|
|
{
|
2022-05-19 11:21:08 +08:00
|
|
|
req->skb = __dev_alloc_skb(size, gfp_mask);
|
2022-05-06 11:12:59 -07:00
|
|
|
if (!req->skb)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
2022-05-13 10:33:59 -07:00
|
|
|
req->mapped_buff = dma_map_single(md_ctrl->dev, req->skb->data, size, DMA_FROM_DEVICE);
|
2022-05-06 11:12:59 -07:00
|
|
|
if (dma_mapping_error(md_ctrl->dev, req->mapped_buff)) {
|
|
|
|
dev_kfree_skb_any(req->skb);
|
|
|
|
req->skb = NULL;
|
|
|
|
req->mapped_buff = 0;
|
|
|
|
dev_err(md_ctrl->dev, "DMA mapping failed\n");
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int t7xx_cldma_gpd_rx_from_q(struct cldma_queue *queue, int budget, bool *over_budget)
|
|
|
|
{
|
|
|
|
struct cldma_ctrl *md_ctrl = queue->md_ctrl;
|
|
|
|
unsigned int hwo_polling_count = 0;
|
|
|
|
struct t7xx_cldma_hw *hw_info;
|
|
|
|
bool rx_not_done = true;
|
|
|
|
unsigned long flags;
|
|
|
|
int count = 0;
|
|
|
|
|
|
|
|
hw_info = &md_ctrl->hw_info;
|
|
|
|
|
|
|
|
do {
|
|
|
|
struct cldma_request *req;
|
|
|
|
struct cldma_gpd *gpd;
|
|
|
|
struct sk_buff *skb;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
req = queue->tr_done;
|
|
|
|
if (!req)
|
|
|
|
return -ENODATA;
|
|
|
|
|
|
|
|
gpd = req->gpd;
|
|
|
|
if ((gpd->flags & GPD_FLAGS_HWO) || !req->skb) {
|
|
|
|
dma_addr_t gpd_addr;
|
|
|
|
|
|
|
|
if (!pci_device_is_present(to_pci_dev(md_ctrl->dev))) {
|
|
|
|
dev_err(md_ctrl->dev, "PCIe Link disconnected\n");
|
|
|
|
return -ENODEV;
|
|
|
|
}
|
|
|
|
|
net: wwan: t7xx: Split 64bit accesses to fix alignment issues
Some of the registers are aligned on a 32bit boundary, causing
alignment faults on 64bit platforms.
Unable to handle kernel paging request at virtual address ffffffc084a1d004
Mem abort info:
ESR = 0x0000000096000061
EC = 0x25: DABT (current EL), IL = 32 bits
SET = 0, FnV = 0
EA = 0, S1PTW = 0
FSC = 0x21: alignment fault
Data abort info:
ISV = 0, ISS = 0x00000061, ISS2 = 0x00000000
CM = 0, WnR = 1, TnD = 0, TagAccess = 0
GCS = 0, Overlay = 0, DirtyBit = 0, Xs = 0
swapper pgtable: 4k pages, 39-bit VAs, pgdp=0000000046ad6000
[ffffffc084a1d004] pgd=100000013ffff003, p4d=100000013ffff003, pud=100000013ffff003, pmd=0068000020a00711
Internal error: Oops: 0000000096000061 [#1] SMP
Modules linked in: mtk_t7xx(+) qcserial pppoe ppp_async option nft_fib_inet nf_flow_table_inet mt7921u(O) mt7921s(O) mt7921e(O) mt7921_common(O) iwlmvm(O) iwldvm(O) usb_wwan rndis_host qmi_wwan pppox ppp_generic nft_reject_ipv6 nft_reject_ipv4 nft_reject_inet nft_reject nft_redir nft_quota nft_numgen nft_nat nft_masq nft_log nft_limit nft_hash nft_flow_offload nft_fib_ipv6 nft_fib_ipv4 nft_fib nft_ct nft_chain_nat nf_tables nf_nat nf_flow_table nf_conntrack mt7996e(O) mt792x_usb(O) mt792x_lib(O) mt7915e(O) mt76_usb(O) mt76_sdio(O) mt76_connac_lib(O) mt76(O) mac80211(O) iwlwifi(O) huawei_cdc_ncm cfg80211(O) cdc_ncm cdc_ether wwan usbserial usbnet slhc sfp rtc_pcf8563 nfnetlink nf_reject_ipv6 nf_reject_ipv4 nf_log_syslog nf_defrag_ipv6 nf_defrag_ipv4 mt6577_auxadc mdio_i2c libcrc32c compat(O) cdc_wdm cdc_acm at24 crypto_safexcel pwm_fan i2c_gpio i2c_smbus industrialio i2c_algo_bit i2c_mux_reg i2c_mux_pca954x i2c_mux_pca9541 i2c_mux_gpio i2c_mux dummy oid_registry tun sha512_arm64 sha1_ce sha1_generic seqiv
md5 geniv des_generic libdes cbc authencesn authenc leds_gpio xhci_plat_hcd xhci_pci xhci_mtk_hcd xhci_hcd nvme nvme_core gpio_button_hotplug(O) dm_mirror dm_region_hash dm_log dm_crypt dm_mod dax usbcore usb_common ptp aquantia pps_core mii tpm encrypted_keys trusted
CPU: 3 PID: 5266 Comm: kworker/u9:1 Tainted: G O 6.6.22 #0
Hardware name: Bananapi BPI-R4 (DT)
Workqueue: md_hk_wq t7xx_fsm_uninit [mtk_t7xx]
pstate: 804000c5 (Nzcv daIF +PAN -UAO -TCO -DIT -SSBS BTYPE=--)
pc : t7xx_cldma_hw_set_start_addr+0x1c/0x3c [mtk_t7xx]
lr : t7xx_cldma_start+0xac/0x13c [mtk_t7xx]
sp : ffffffc085d63d30
x29: ffffffc085d63d30 x28: 0000000000000000 x27: 0000000000000000
x26: 0000000000000000 x25: ffffff80c804f2c0 x24: ffffff80ca196c05
x23: 0000000000000000 x22: ffffff80c814b9b8 x21: ffffff80c814b128
x20: 0000000000000001 x19: ffffff80c814b080 x18: 0000000000000014
x17: 0000000055c9806b x16: 000000007c5296d0 x15: 000000000f6bca68
x14: 00000000dbdbdce4 x13: 000000001aeaf72a x12: 0000000000000001
x11: 0000000000000000 x10: 0000000000000000 x9 : 0000000000000000
x8 : ffffff80ca1ef6b4 x7 : ffffff80c814b818 x6 : 0000000000000018
x5 : 0000000000000870 x4 : 0000000000000000 x3 : 0000000000000000
x2 : 000000010a947000 x1 : ffffffc084a1d004 x0 : ffffffc084a1d004
Call trace:
t7xx_cldma_hw_set_start_addr+0x1c/0x3c [mtk_t7xx]
t7xx_fsm_uninit+0x578/0x5ec [mtk_t7xx]
process_one_work+0x154/0x2a0
worker_thread+0x2ac/0x488
kthread+0xe0/0xec
ret_from_fork+0x10/0x20
Code: f9400800 91001000 8b214001 d50332bf (f9000022)
---[ end trace 0000000000000000 ]---
The inclusion of io-64-nonatomic-lo-hi.h indicates that all 64bit
accesses can be replaced by pairs of nonatomic 32bit access. Fix
alignment by forcing all accesses to be 32bit on 64bit platforms.
Link: https://forum.openwrt.org/t/fibocom-fm350-gl-support/142682/72
Fixes: 39d439047f1d ("net: wwan: t7xx: Add control DMA interface")
Signed-off-by: Bjørn Mork <bjorn@mork.no>
Reviewed-by: Sergey Ryazanov <ryazanov.s.a@gmail.com>
Tested-by: Liviu Dudau <liviu@dudau.co.uk>
Link: https://lore.kernel.org/r/20240322144000.1683822-1-bjorn@mork.no
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2024-03-22 15:40:00 +01:00
|
|
|
gpd_addr = ioread64_lo_hi(hw_info->ap_pdn_base +
|
|
|
|
REG_CLDMA_DL_CURRENT_ADDRL_0 +
|
|
|
|
queue->index * sizeof(u64));
|
2022-05-06 11:12:59 -07:00
|
|
|
if (req->gpd_addr == gpd_addr || hwo_polling_count++ >= 100)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
udelay(1);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
hwo_polling_count = 0;
|
|
|
|
skb = req->skb;
|
|
|
|
|
|
|
|
if (req->mapped_buff) {
|
|
|
|
dma_unmap_single(md_ctrl->dev, req->mapped_buff,
|
2022-05-13 10:33:59 -07:00
|
|
|
queue->tr_ring->pkt_size, DMA_FROM_DEVICE);
|
2022-05-06 11:12:59 -07:00
|
|
|
req->mapped_buff = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
skb->len = 0;
|
|
|
|
skb_reset_tail_pointer(skb);
|
|
|
|
skb_put(skb, le16_to_cpu(gpd->data_buff_len));
|
|
|
|
|
2024-02-05 18:22:29 +08:00
|
|
|
ret = queue->recv_skb(queue, skb);
|
2022-05-06 11:12:59 -07:00
|
|
|
/* Break processing, will try again later */
|
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
req->skb = NULL;
|
|
|
|
t7xx_cldma_gpd_set_data_ptr(gpd, 0);
|
|
|
|
|
|
|
|
spin_lock_irqsave(&queue->ring_lock, flags);
|
|
|
|
queue->tr_done = list_next_entry_circular(req, &queue->tr_ring->gpd_ring, entry);
|
|
|
|
spin_unlock_irqrestore(&queue->ring_lock, flags);
|
|
|
|
req = queue->rx_refill;
|
|
|
|
|
2022-05-19 11:21:08 +08:00
|
|
|
ret = t7xx_cldma_alloc_and_map_skb(md_ctrl, req, queue->tr_ring->pkt_size, GFP_KERNEL);
|
2022-05-06 11:12:59 -07:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
gpd = req->gpd;
|
|
|
|
t7xx_cldma_gpd_set_data_ptr(gpd, req->mapped_buff);
|
|
|
|
gpd->data_buff_len = 0;
|
|
|
|
gpd->flags = GPD_FLAGS_IOC | GPD_FLAGS_HWO;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&queue->ring_lock, flags);
|
|
|
|
queue->rx_refill = list_next_entry_circular(req, &queue->tr_ring->gpd_ring, entry);
|
|
|
|
spin_unlock_irqrestore(&queue->ring_lock, flags);
|
|
|
|
|
|
|
|
rx_not_done = ++count < budget || !need_resched();
|
|
|
|
} while (rx_not_done);
|
|
|
|
|
|
|
|
*over_budget = true;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int t7xx_cldma_gpd_rx_collect(struct cldma_queue *queue, int budget)
|
|
|
|
{
|
|
|
|
struct cldma_ctrl *md_ctrl = queue->md_ctrl;
|
|
|
|
struct t7xx_cldma_hw *hw_info;
|
|
|
|
unsigned int pending_rx_int;
|
|
|
|
bool over_budget = false;
|
|
|
|
unsigned long flags;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
hw_info = &md_ctrl->hw_info;
|
|
|
|
|
|
|
|
do {
|
|
|
|
ret = t7xx_cldma_gpd_rx_from_q(queue, budget, &over_budget);
|
|
|
|
if (ret == -ENODATA)
|
|
|
|
return 0;
|
|
|
|
else if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
pending_rx_int = 0;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
|
|
|
|
if (md_ctrl->rxq_active & BIT(queue->index)) {
|
|
|
|
if (!t7xx_cldma_hw_queue_status(hw_info, queue->index, MTK_RX))
|
|
|
|
t7xx_cldma_hw_resume_queue(hw_info, queue->index, MTK_RX);
|
|
|
|
|
|
|
|
pending_rx_int = t7xx_cldma_hw_int_status(hw_info, BIT(queue->index),
|
|
|
|
MTK_RX);
|
|
|
|
if (pending_rx_int) {
|
|
|
|
t7xx_cldma_hw_rx_done(hw_info, pending_rx_int);
|
|
|
|
|
|
|
|
if (over_budget) {
|
|
|
|
spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
|
|
|
|
return -EAGAIN;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
|
|
|
|
} while (pending_rx_int);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void t7xx_cldma_rx_done(struct work_struct *work)
|
|
|
|
{
|
|
|
|
struct cldma_queue *queue = container_of(work, struct cldma_queue, cldma_work);
|
|
|
|
struct cldma_ctrl *md_ctrl = queue->md_ctrl;
|
|
|
|
int value;
|
|
|
|
|
|
|
|
value = t7xx_cldma_gpd_rx_collect(queue, queue->budget);
|
|
|
|
if (value && md_ctrl->rxq_active & BIT(queue->index)) {
|
|
|
|
queue_work(queue->worker, &queue->cldma_work);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
t7xx_cldma_clear_ip_busy(&md_ctrl->hw_info);
|
|
|
|
t7xx_cldma_hw_irq_en_txrx(&md_ctrl->hw_info, queue->index, MTK_RX);
|
|
|
|
t7xx_cldma_hw_irq_en_eq(&md_ctrl->hw_info, queue->index, MTK_RX);
|
2022-05-06 11:13:08 -07:00
|
|
|
pm_runtime_mark_last_busy(md_ctrl->dev);
|
|
|
|
pm_runtime_put_autosuspend(md_ctrl->dev);
|
2022-05-06 11:12:59 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
static int t7xx_cldma_gpd_tx_collect(struct cldma_queue *queue)
|
|
|
|
{
|
|
|
|
struct cldma_ctrl *md_ctrl = queue->md_ctrl;
|
|
|
|
unsigned int dma_len, count = 0;
|
|
|
|
struct cldma_request *req;
|
|
|
|
struct cldma_gpd *gpd;
|
|
|
|
unsigned long flags;
|
|
|
|
dma_addr_t dma_free;
|
|
|
|
struct sk_buff *skb;
|
|
|
|
|
|
|
|
while (!kthread_should_stop()) {
|
|
|
|
spin_lock_irqsave(&queue->ring_lock, flags);
|
|
|
|
req = queue->tr_done;
|
|
|
|
if (!req) {
|
|
|
|
spin_unlock_irqrestore(&queue->ring_lock, flags);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
gpd = req->gpd;
|
|
|
|
if ((gpd->flags & GPD_FLAGS_HWO) || !req->skb) {
|
|
|
|
spin_unlock_irqrestore(&queue->ring_lock, flags);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
queue->budget++;
|
|
|
|
dma_free = req->mapped_buff;
|
|
|
|
dma_len = le16_to_cpu(gpd->data_buff_len);
|
|
|
|
skb = req->skb;
|
|
|
|
req->skb = NULL;
|
|
|
|
queue->tr_done = list_next_entry_circular(req, &queue->tr_ring->gpd_ring, entry);
|
|
|
|
spin_unlock_irqrestore(&queue->ring_lock, flags);
|
|
|
|
|
|
|
|
count++;
|
|
|
|
dma_unmap_single(md_ctrl->dev, dma_free, dma_len, DMA_TO_DEVICE);
|
|
|
|
dev_kfree_skb_any(skb);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (count)
|
|
|
|
wake_up_nr(&queue->req_wq, count);
|
|
|
|
|
|
|
|
return count;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void t7xx_cldma_txq_empty_hndl(struct cldma_queue *queue)
|
|
|
|
{
|
|
|
|
struct cldma_ctrl *md_ctrl = queue->md_ctrl;
|
|
|
|
struct cldma_request *req;
|
|
|
|
dma_addr_t ul_curr_addr;
|
|
|
|
unsigned long flags;
|
|
|
|
bool pending_gpd;
|
|
|
|
|
|
|
|
if (!(md_ctrl->txq_active & BIT(queue->index)))
|
|
|
|
return;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&queue->ring_lock, flags);
|
|
|
|
req = list_prev_entry_circular(queue->tx_next, &queue->tr_ring->gpd_ring, entry);
|
|
|
|
spin_unlock_irqrestore(&queue->ring_lock, flags);
|
|
|
|
|
|
|
|
pending_gpd = (req->gpd->flags & GPD_FLAGS_HWO) && req->skb;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
|
|
|
|
if (pending_gpd) {
|
|
|
|
struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info;
|
|
|
|
|
|
|
|
/* Check current processing TGPD, 64-bit address is in a table by Q index */
|
net: wwan: t7xx: Split 64bit accesses to fix alignment issues
Some of the registers are aligned on a 32bit boundary, causing
alignment faults on 64bit platforms.
Unable to handle kernel paging request at virtual address ffffffc084a1d004
Mem abort info:
ESR = 0x0000000096000061
EC = 0x25: DABT (current EL), IL = 32 bits
SET = 0, FnV = 0
EA = 0, S1PTW = 0
FSC = 0x21: alignment fault
Data abort info:
ISV = 0, ISS = 0x00000061, ISS2 = 0x00000000
CM = 0, WnR = 1, TnD = 0, TagAccess = 0
GCS = 0, Overlay = 0, DirtyBit = 0, Xs = 0
swapper pgtable: 4k pages, 39-bit VAs, pgdp=0000000046ad6000
[ffffffc084a1d004] pgd=100000013ffff003, p4d=100000013ffff003, pud=100000013ffff003, pmd=0068000020a00711
Internal error: Oops: 0000000096000061 [#1] SMP
Modules linked in: mtk_t7xx(+) qcserial pppoe ppp_async option nft_fib_inet nf_flow_table_inet mt7921u(O) mt7921s(O) mt7921e(O) mt7921_common(O) iwlmvm(O) iwldvm(O) usb_wwan rndis_host qmi_wwan pppox ppp_generic nft_reject_ipv6 nft_reject_ipv4 nft_reject_inet nft_reject nft_redir nft_quota nft_numgen nft_nat nft_masq nft_log nft_limit nft_hash nft_flow_offload nft_fib_ipv6 nft_fib_ipv4 nft_fib nft_ct nft_chain_nat nf_tables nf_nat nf_flow_table nf_conntrack mt7996e(O) mt792x_usb(O) mt792x_lib(O) mt7915e(O) mt76_usb(O) mt76_sdio(O) mt76_connac_lib(O) mt76(O) mac80211(O) iwlwifi(O) huawei_cdc_ncm cfg80211(O) cdc_ncm cdc_ether wwan usbserial usbnet slhc sfp rtc_pcf8563 nfnetlink nf_reject_ipv6 nf_reject_ipv4 nf_log_syslog nf_defrag_ipv6 nf_defrag_ipv4 mt6577_auxadc mdio_i2c libcrc32c compat(O) cdc_wdm cdc_acm at24 crypto_safexcel pwm_fan i2c_gpio i2c_smbus industrialio i2c_algo_bit i2c_mux_reg i2c_mux_pca954x i2c_mux_pca9541 i2c_mux_gpio i2c_mux dummy oid_registry tun sha512_arm64 sha1_ce sha1_generic seqiv
md5 geniv des_generic libdes cbc authencesn authenc leds_gpio xhci_plat_hcd xhci_pci xhci_mtk_hcd xhci_hcd nvme nvme_core gpio_button_hotplug(O) dm_mirror dm_region_hash dm_log dm_crypt dm_mod dax usbcore usb_common ptp aquantia pps_core mii tpm encrypted_keys trusted
CPU: 3 PID: 5266 Comm: kworker/u9:1 Tainted: G O 6.6.22 #0
Hardware name: Bananapi BPI-R4 (DT)
Workqueue: md_hk_wq t7xx_fsm_uninit [mtk_t7xx]
pstate: 804000c5 (Nzcv daIF +PAN -UAO -TCO -DIT -SSBS BTYPE=--)
pc : t7xx_cldma_hw_set_start_addr+0x1c/0x3c [mtk_t7xx]
lr : t7xx_cldma_start+0xac/0x13c [mtk_t7xx]
sp : ffffffc085d63d30
x29: ffffffc085d63d30 x28: 0000000000000000 x27: 0000000000000000
x26: 0000000000000000 x25: ffffff80c804f2c0 x24: ffffff80ca196c05
x23: 0000000000000000 x22: ffffff80c814b9b8 x21: ffffff80c814b128
x20: 0000000000000001 x19: ffffff80c814b080 x18: 0000000000000014
x17: 0000000055c9806b x16: 000000007c5296d0 x15: 000000000f6bca68
x14: 00000000dbdbdce4 x13: 000000001aeaf72a x12: 0000000000000001
x11: 0000000000000000 x10: 0000000000000000 x9 : 0000000000000000
x8 : ffffff80ca1ef6b4 x7 : ffffff80c814b818 x6 : 0000000000000018
x5 : 0000000000000870 x4 : 0000000000000000 x3 : 0000000000000000
x2 : 000000010a947000 x1 : ffffffc084a1d004 x0 : ffffffc084a1d004
Call trace:
t7xx_cldma_hw_set_start_addr+0x1c/0x3c [mtk_t7xx]
t7xx_fsm_uninit+0x578/0x5ec [mtk_t7xx]
process_one_work+0x154/0x2a0
worker_thread+0x2ac/0x488
kthread+0xe0/0xec
ret_from_fork+0x10/0x20
Code: f9400800 91001000 8b214001 d50332bf (f9000022)
---[ end trace 0000000000000000 ]---
The inclusion of io-64-nonatomic-lo-hi.h indicates that all 64bit
accesses can be replaced by pairs of nonatomic 32bit access. Fix
alignment by forcing all accesses to be 32bit on 64bit platforms.
Link: https://forum.openwrt.org/t/fibocom-fm350-gl-support/142682/72
Fixes: 39d439047f1d ("net: wwan: t7xx: Add control DMA interface")
Signed-off-by: Bjørn Mork <bjorn@mork.no>
Reviewed-by: Sergey Ryazanov <ryazanov.s.a@gmail.com>
Tested-by: Liviu Dudau <liviu@dudau.co.uk>
Link: https://lore.kernel.org/r/20240322144000.1683822-1-bjorn@mork.no
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2024-03-22 15:40:00 +01:00
|
|
|
ul_curr_addr = ioread64_lo_hi(hw_info->ap_pdn_base + REG_CLDMA_UL_CURRENT_ADDRL_0 +
|
|
|
|
queue->index * sizeof(u64));
|
2022-05-06 11:12:59 -07:00
|
|
|
if (req->gpd_addr != ul_curr_addr) {
|
|
|
|
spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
|
|
|
|
dev_err(md_ctrl->dev, "CLDMA%d queue %d is not empty\n",
|
|
|
|
md_ctrl->hif_id, queue->index);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
t7xx_cldma_hw_resume_queue(hw_info, queue->index, MTK_TX);
|
|
|
|
}
|
|
|
|
spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void t7xx_cldma_tx_done(struct work_struct *work)
|
|
|
|
{
|
|
|
|
struct cldma_queue *queue = container_of(work, struct cldma_queue, cldma_work);
|
|
|
|
struct cldma_ctrl *md_ctrl = queue->md_ctrl;
|
|
|
|
struct t7xx_cldma_hw *hw_info;
|
|
|
|
unsigned int l2_tx_int;
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
hw_info = &md_ctrl->hw_info;
|
|
|
|
t7xx_cldma_gpd_tx_collect(queue);
|
|
|
|
l2_tx_int = t7xx_cldma_hw_int_status(hw_info, BIT(queue->index) | EQ_STA_BIT(queue->index),
|
|
|
|
MTK_TX);
|
|
|
|
if (l2_tx_int & EQ_STA_BIT(queue->index)) {
|
|
|
|
t7xx_cldma_hw_tx_done(hw_info, EQ_STA_BIT(queue->index));
|
|
|
|
t7xx_cldma_txq_empty_hndl(queue);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (l2_tx_int & BIT(queue->index)) {
|
|
|
|
t7xx_cldma_hw_tx_done(hw_info, BIT(queue->index));
|
|
|
|
queue_work(queue->worker, &queue->cldma_work);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
|
|
|
|
if (md_ctrl->txq_active & BIT(queue->index)) {
|
|
|
|
t7xx_cldma_clear_ip_busy(hw_info);
|
|
|
|
t7xx_cldma_hw_irq_en_eq(hw_info, queue->index, MTK_TX);
|
|
|
|
t7xx_cldma_hw_irq_en_txrx(hw_info, queue->index, MTK_TX);
|
|
|
|
}
|
|
|
|
spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
|
2022-05-06 11:13:08 -07:00
|
|
|
|
|
|
|
pm_runtime_mark_last_busy(md_ctrl->dev);
|
|
|
|
pm_runtime_put_autosuspend(md_ctrl->dev);
|
2022-05-06 11:12:59 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
static void t7xx_cldma_ring_free(struct cldma_ctrl *md_ctrl,
|
|
|
|
struct cldma_ring *ring, enum dma_data_direction tx_rx)
|
|
|
|
{
|
|
|
|
struct cldma_request *req_cur, *req_next;
|
|
|
|
|
|
|
|
list_for_each_entry_safe(req_cur, req_next, &ring->gpd_ring, entry) {
|
|
|
|
if (req_cur->mapped_buff && req_cur->skb) {
|
|
|
|
dma_unmap_single(md_ctrl->dev, req_cur->mapped_buff,
|
2022-05-13 10:33:59 -07:00
|
|
|
ring->pkt_size, tx_rx);
|
2022-05-06 11:12:59 -07:00
|
|
|
req_cur->mapped_buff = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
dev_kfree_skb_any(req_cur->skb);
|
|
|
|
|
|
|
|
if (req_cur->gpd)
|
|
|
|
dma_pool_free(md_ctrl->gpd_dmapool, req_cur->gpd, req_cur->gpd_addr);
|
|
|
|
|
|
|
|
list_del(&req_cur->entry);
|
|
|
|
kfree(req_cur);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct cldma_request *t7xx_alloc_rx_request(struct cldma_ctrl *md_ctrl, size_t pkt_size)
|
|
|
|
{
|
|
|
|
struct cldma_request *req;
|
|
|
|
int val;
|
|
|
|
|
|
|
|
req = kzalloc(sizeof(*req), GFP_KERNEL);
|
|
|
|
if (!req)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
req->gpd = dma_pool_zalloc(md_ctrl->gpd_dmapool, GFP_KERNEL, &req->gpd_addr);
|
|
|
|
if (!req->gpd)
|
|
|
|
goto err_free_req;
|
|
|
|
|
2022-05-19 11:21:08 +08:00
|
|
|
val = t7xx_cldma_alloc_and_map_skb(md_ctrl, req, pkt_size, GFP_KERNEL);
|
2022-05-06 11:12:59 -07:00
|
|
|
if (val)
|
|
|
|
goto err_free_pool;
|
|
|
|
|
|
|
|
return req;
|
|
|
|
|
|
|
|
err_free_pool:
|
|
|
|
dma_pool_free(md_ctrl->gpd_dmapool, req->gpd, req->gpd_addr);
|
|
|
|
|
|
|
|
err_free_req:
|
|
|
|
kfree(req);
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int t7xx_cldma_rx_ring_init(struct cldma_ctrl *md_ctrl, struct cldma_ring *ring)
|
|
|
|
{
|
|
|
|
struct cldma_request *req;
|
|
|
|
struct cldma_gpd *gpd;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
INIT_LIST_HEAD(&ring->gpd_ring);
|
|
|
|
ring->length = MAX_RX_BUDGET;
|
|
|
|
|
|
|
|
for (i = 0; i < ring->length; i++) {
|
|
|
|
req = t7xx_alloc_rx_request(md_ctrl, ring->pkt_size);
|
|
|
|
if (!req) {
|
|
|
|
t7xx_cldma_ring_free(md_ctrl, ring, DMA_FROM_DEVICE);
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
gpd = req->gpd;
|
|
|
|
t7xx_cldma_gpd_set_data_ptr(gpd, req->mapped_buff);
|
|
|
|
gpd->rx_data_allow_len = cpu_to_le16(ring->pkt_size);
|
|
|
|
gpd->flags = GPD_FLAGS_IOC | GPD_FLAGS_HWO;
|
|
|
|
INIT_LIST_HEAD(&req->entry);
|
|
|
|
list_add_tail(&req->entry, &ring->gpd_ring);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Link previous GPD to next GPD, circular */
|
|
|
|
list_for_each_entry(req, &ring->gpd_ring, entry) {
|
|
|
|
t7xx_cldma_gpd_set_next_ptr(gpd, req->gpd_addr);
|
|
|
|
gpd = req->gpd;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct cldma_request *t7xx_alloc_tx_request(struct cldma_ctrl *md_ctrl)
|
|
|
|
{
|
|
|
|
struct cldma_request *req;
|
|
|
|
|
|
|
|
req = kzalloc(sizeof(*req), GFP_KERNEL);
|
|
|
|
if (!req)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
req->gpd = dma_pool_zalloc(md_ctrl->gpd_dmapool, GFP_KERNEL, &req->gpd_addr);
|
|
|
|
if (!req->gpd) {
|
|
|
|
kfree(req);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
return req;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int t7xx_cldma_tx_ring_init(struct cldma_ctrl *md_ctrl, struct cldma_ring *ring)
|
|
|
|
{
|
|
|
|
struct cldma_request *req;
|
|
|
|
struct cldma_gpd *gpd;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
INIT_LIST_HEAD(&ring->gpd_ring);
|
|
|
|
ring->length = MAX_TX_BUDGET;
|
|
|
|
|
|
|
|
for (i = 0; i < ring->length; i++) {
|
|
|
|
req = t7xx_alloc_tx_request(md_ctrl);
|
|
|
|
if (!req) {
|
|
|
|
t7xx_cldma_ring_free(md_ctrl, ring, DMA_TO_DEVICE);
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
gpd = req->gpd;
|
|
|
|
gpd->flags = GPD_FLAGS_IOC;
|
|
|
|
INIT_LIST_HEAD(&req->entry);
|
|
|
|
list_add_tail(&req->entry, &ring->gpd_ring);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Link previous GPD to next GPD, circular */
|
|
|
|
list_for_each_entry(req, &ring->gpd_ring, entry) {
|
|
|
|
t7xx_cldma_gpd_set_next_ptr(gpd, req->gpd_addr);
|
|
|
|
gpd = req->gpd;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* t7xx_cldma_q_reset() - Reset CLDMA request pointers to their initial values.
|
|
|
|
* @queue: Pointer to the queue structure.
|
|
|
|
*
|
|
|
|
* Called with ring_lock (unless called during initialization phase)
|
|
|
|
*/
|
|
|
|
static void t7xx_cldma_q_reset(struct cldma_queue *queue)
|
|
|
|
{
|
|
|
|
struct cldma_request *req;
|
|
|
|
|
|
|
|
req = list_first_entry(&queue->tr_ring->gpd_ring, struct cldma_request, entry);
|
|
|
|
queue->tr_done = req;
|
|
|
|
queue->budget = queue->tr_ring->length;
|
|
|
|
|
|
|
|
if (queue->dir == MTK_TX)
|
|
|
|
queue->tx_next = req;
|
|
|
|
else
|
|
|
|
queue->rx_refill = req;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void t7xx_cldma_rxq_init(struct cldma_queue *queue)
|
|
|
|
{
|
|
|
|
struct cldma_ctrl *md_ctrl = queue->md_ctrl;
|
|
|
|
|
|
|
|
queue->dir = MTK_RX;
|
|
|
|
queue->tr_ring = &md_ctrl->rx_ring[queue->index];
|
|
|
|
t7xx_cldma_q_reset(queue);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void t7xx_cldma_txq_init(struct cldma_queue *queue)
|
|
|
|
{
|
|
|
|
struct cldma_ctrl *md_ctrl = queue->md_ctrl;
|
|
|
|
|
|
|
|
queue->dir = MTK_TX;
|
|
|
|
queue->tr_ring = &md_ctrl->tx_ring[queue->index];
|
|
|
|
t7xx_cldma_q_reset(queue);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void t7xx_cldma_enable_irq(struct cldma_ctrl *md_ctrl)
|
|
|
|
{
|
|
|
|
t7xx_pcie_mac_set_int(md_ctrl->t7xx_dev, md_ctrl->hw_info.phy_interrupt_id);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void t7xx_cldma_disable_irq(struct cldma_ctrl *md_ctrl)
|
|
|
|
{
|
|
|
|
t7xx_pcie_mac_clear_int(md_ctrl->t7xx_dev, md_ctrl->hw_info.phy_interrupt_id);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void t7xx_cldma_irq_work_cb(struct cldma_ctrl *md_ctrl)
|
|
|
|
{
|
|
|
|
unsigned long l2_tx_int_msk, l2_rx_int_msk, l2_tx_int, l2_rx_int, val;
|
|
|
|
struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
/* L2 raw interrupt status */
|
|
|
|
l2_tx_int = ioread32(hw_info->ap_pdn_base + REG_CLDMA_L2TISAR0);
|
|
|
|
l2_rx_int = ioread32(hw_info->ap_pdn_base + REG_CLDMA_L2RISAR0);
|
|
|
|
l2_tx_int_msk = ioread32(hw_info->ap_pdn_base + REG_CLDMA_L2TIMR0);
|
|
|
|
l2_rx_int_msk = ioread32(hw_info->ap_ao_base + REG_CLDMA_L2RIMR0);
|
|
|
|
l2_tx_int &= ~l2_tx_int_msk;
|
|
|
|
l2_rx_int &= ~l2_rx_int_msk;
|
|
|
|
|
|
|
|
if (l2_tx_int) {
|
|
|
|
if (l2_tx_int & (TQ_ERR_INT_BITMASK | TQ_ACTIVE_START_ERR_INT_BITMASK)) {
|
|
|
|
/* Read and clear L3 TX interrupt status */
|
|
|
|
val = ioread32(hw_info->ap_pdn_base + REG_CLDMA_L3TISAR0);
|
|
|
|
iowrite32(val, hw_info->ap_pdn_base + REG_CLDMA_L3TISAR0);
|
|
|
|
val = ioread32(hw_info->ap_pdn_base + REG_CLDMA_L3TISAR1);
|
|
|
|
iowrite32(val, hw_info->ap_pdn_base + REG_CLDMA_L3TISAR1);
|
|
|
|
}
|
|
|
|
|
|
|
|
t7xx_cldma_hw_tx_done(hw_info, l2_tx_int);
|
|
|
|
if (l2_tx_int & (TXRX_STATUS_BITMASK | EMPTY_STATUS_BITMASK)) {
|
|
|
|
for_each_set_bit(i, &l2_tx_int, L2_INT_BIT_COUNT) {
|
|
|
|
if (i < CLDMA_TXQ_NUM) {
|
2022-05-06 11:13:08 -07:00
|
|
|
pm_runtime_get(md_ctrl->dev);
|
2022-05-06 11:12:59 -07:00
|
|
|
t7xx_cldma_hw_irq_dis_eq(hw_info, i, MTK_TX);
|
|
|
|
t7xx_cldma_hw_irq_dis_txrx(hw_info, i, MTK_TX);
|
|
|
|
queue_work(md_ctrl->txq[i].worker,
|
|
|
|
&md_ctrl->txq[i].cldma_work);
|
|
|
|
} else {
|
|
|
|
t7xx_cldma_txq_empty_hndl(&md_ctrl->txq[i - CLDMA_TXQ_NUM]);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (l2_rx_int) {
|
|
|
|
if (l2_rx_int & (RQ_ERR_INT_BITMASK | RQ_ACTIVE_START_ERR_INT_BITMASK)) {
|
|
|
|
/* Read and clear L3 RX interrupt status */
|
|
|
|
val = ioread32(hw_info->ap_pdn_base + REG_CLDMA_L3RISAR0);
|
|
|
|
iowrite32(val, hw_info->ap_pdn_base + REG_CLDMA_L3RISAR0);
|
|
|
|
val = ioread32(hw_info->ap_pdn_base + REG_CLDMA_L3RISAR1);
|
|
|
|
iowrite32(val, hw_info->ap_pdn_base + REG_CLDMA_L3RISAR1);
|
|
|
|
}
|
|
|
|
|
|
|
|
t7xx_cldma_hw_rx_done(hw_info, l2_rx_int);
|
|
|
|
if (l2_rx_int & (TXRX_STATUS_BITMASK | EMPTY_STATUS_BITMASK)) {
|
|
|
|
l2_rx_int |= l2_rx_int >> CLDMA_RXQ_NUM;
|
|
|
|
for_each_set_bit(i, &l2_rx_int, CLDMA_RXQ_NUM) {
|
2022-05-06 11:13:08 -07:00
|
|
|
pm_runtime_get(md_ctrl->dev);
|
2022-05-06 11:12:59 -07:00
|
|
|
t7xx_cldma_hw_irq_dis_eq(hw_info, i, MTK_RX);
|
|
|
|
t7xx_cldma_hw_irq_dis_txrx(hw_info, i, MTK_RX);
|
|
|
|
queue_work(md_ctrl->rxq[i].worker, &md_ctrl->rxq[i].cldma_work);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool t7xx_cldma_qs_are_active(struct cldma_ctrl *md_ctrl)
|
|
|
|
{
|
|
|
|
struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info;
|
|
|
|
unsigned int tx_active;
|
|
|
|
unsigned int rx_active;
|
|
|
|
|
|
|
|
if (!pci_device_is_present(to_pci_dev(md_ctrl->dev)))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
tx_active = t7xx_cldma_hw_queue_status(hw_info, CLDMA_ALL_Q, MTK_TX);
|
|
|
|
rx_active = t7xx_cldma_hw_queue_status(hw_info, CLDMA_ALL_Q, MTK_RX);
|
|
|
|
|
|
|
|
return tx_active || rx_active;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* t7xx_cldma_stop() - Stop CLDMA.
|
|
|
|
* @md_ctrl: CLDMA context structure.
|
|
|
|
*
|
|
|
|
* Stop TX and RX queues. Disable L1 and L2 interrupts.
|
|
|
|
* Clear status registers.
|
|
|
|
*
|
|
|
|
* Return:
|
|
|
|
* * 0 - Success.
|
|
|
|
* * -ERROR - Error code from polling cldma_queues_active.
|
|
|
|
*/
|
|
|
|
int t7xx_cldma_stop(struct cldma_ctrl *md_ctrl)
|
|
|
|
{
|
|
|
|
struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info;
|
|
|
|
bool active;
|
|
|
|
int i, ret;
|
|
|
|
|
|
|
|
md_ctrl->rxq_active = 0;
|
|
|
|
t7xx_cldma_hw_stop_all_qs(hw_info, MTK_RX);
|
|
|
|
md_ctrl->txq_active = 0;
|
|
|
|
t7xx_cldma_hw_stop_all_qs(hw_info, MTK_TX);
|
|
|
|
md_ctrl->txq_started = 0;
|
|
|
|
t7xx_cldma_disable_irq(md_ctrl);
|
|
|
|
t7xx_cldma_hw_stop(hw_info, MTK_RX);
|
|
|
|
t7xx_cldma_hw_stop(hw_info, MTK_TX);
|
|
|
|
t7xx_cldma_hw_tx_done(hw_info, CLDMA_L2TISAR0_ALL_INT_MASK);
|
|
|
|
t7xx_cldma_hw_rx_done(hw_info, CLDMA_L2RISAR0_ALL_INT_MASK);
|
|
|
|
|
|
|
|
if (md_ctrl->is_late_init) {
|
|
|
|
for (i = 0; i < CLDMA_TXQ_NUM; i++)
|
|
|
|
flush_work(&md_ctrl->txq[i].cldma_work);
|
|
|
|
|
|
|
|
for (i = 0; i < CLDMA_RXQ_NUM; i++)
|
|
|
|
flush_work(&md_ctrl->rxq[i].cldma_work);
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = read_poll_timeout(t7xx_cldma_qs_are_active, active, !active, CHECK_Q_STOP_STEP_US,
|
|
|
|
CHECK_Q_STOP_TIMEOUT_US, true, md_ctrl);
|
|
|
|
if (ret)
|
|
|
|
dev_err(md_ctrl->dev, "Could not stop CLDMA%d queues", md_ctrl->hif_id);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void t7xx_cldma_late_release(struct cldma_ctrl *md_ctrl)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
if (!md_ctrl->is_late_init)
|
|
|
|
return;
|
|
|
|
|
|
|
|
for (i = 0; i < CLDMA_TXQ_NUM; i++)
|
|
|
|
t7xx_cldma_ring_free(md_ctrl, &md_ctrl->tx_ring[i], DMA_TO_DEVICE);
|
|
|
|
|
|
|
|
for (i = 0; i < CLDMA_RXQ_NUM; i++)
|
|
|
|
t7xx_cldma_ring_free(md_ctrl, &md_ctrl->rx_ring[i], DMA_FROM_DEVICE);
|
|
|
|
|
|
|
|
dma_pool_destroy(md_ctrl->gpd_dmapool);
|
|
|
|
md_ctrl->gpd_dmapool = NULL;
|
|
|
|
md_ctrl->is_late_init = false;
|
|
|
|
}
|
|
|
|
|
|
|
|
void t7xx_cldma_reset(struct cldma_ctrl *md_ctrl)
|
|
|
|
{
|
|
|
|
unsigned long flags;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
|
|
|
|
md_ctrl->txq_active = 0;
|
|
|
|
md_ctrl->rxq_active = 0;
|
|
|
|
t7xx_cldma_disable_irq(md_ctrl);
|
|
|
|
spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
|
|
|
|
|
|
|
|
for (i = 0; i < CLDMA_TXQ_NUM; i++) {
|
|
|
|
cancel_work_sync(&md_ctrl->txq[i].cldma_work);
|
|
|
|
|
|
|
|
spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
|
|
|
|
md_cd_queue_struct_reset(&md_ctrl->txq[i], md_ctrl, MTK_TX, i);
|
|
|
|
spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < CLDMA_RXQ_NUM; i++) {
|
|
|
|
cancel_work_sync(&md_ctrl->rxq[i].cldma_work);
|
|
|
|
|
|
|
|
spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
|
|
|
|
md_cd_queue_struct_reset(&md_ctrl->rxq[i], md_ctrl, MTK_RX, i);
|
|
|
|
spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
|
|
|
|
}
|
|
|
|
|
|
|
|
t7xx_cldma_late_release(md_ctrl);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* t7xx_cldma_start() - Start CLDMA.
|
|
|
|
* @md_ctrl: CLDMA context structure.
|
|
|
|
*
|
|
|
|
* Set TX/RX start address.
|
|
|
|
* Start all RX queues and enable L2 interrupt.
|
|
|
|
*/
|
|
|
|
void t7xx_cldma_start(struct cldma_ctrl *md_ctrl)
|
|
|
|
{
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
|
|
|
|
if (md_ctrl->is_late_init) {
|
|
|
|
struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
t7xx_cldma_enable_irq(md_ctrl);
|
|
|
|
|
|
|
|
for (i = 0; i < CLDMA_TXQ_NUM; i++) {
|
|
|
|
if (md_ctrl->txq[i].tr_done)
|
|
|
|
t7xx_cldma_hw_set_start_addr(hw_info, i,
|
|
|
|
md_ctrl->txq[i].tr_done->gpd_addr,
|
|
|
|
MTK_TX);
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < CLDMA_RXQ_NUM; i++) {
|
|
|
|
if (md_ctrl->rxq[i].tr_done)
|
|
|
|
t7xx_cldma_hw_set_start_addr(hw_info, i,
|
|
|
|
md_ctrl->rxq[i].tr_done->gpd_addr,
|
|
|
|
MTK_RX);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Enable L2 interrupt */
|
|
|
|
t7xx_cldma_hw_start_queue(hw_info, CLDMA_ALL_Q, MTK_RX);
|
|
|
|
t7xx_cldma_hw_start(hw_info);
|
|
|
|
md_ctrl->txq_started = 0;
|
|
|
|
md_ctrl->txq_active |= TXRX_STATUS_BITMASK;
|
|
|
|
md_ctrl->rxq_active |= TXRX_STATUS_BITMASK;
|
|
|
|
}
|
|
|
|
spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void t7xx_cldma_clear_txq(struct cldma_ctrl *md_ctrl, int qnum)
|
|
|
|
{
|
|
|
|
struct cldma_queue *txq = &md_ctrl->txq[qnum];
|
|
|
|
struct cldma_request *req;
|
|
|
|
struct cldma_gpd *gpd;
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&txq->ring_lock, flags);
|
|
|
|
t7xx_cldma_q_reset(txq);
|
|
|
|
list_for_each_entry(req, &txq->tr_ring->gpd_ring, entry) {
|
|
|
|
gpd = req->gpd;
|
|
|
|
gpd->flags &= ~GPD_FLAGS_HWO;
|
|
|
|
t7xx_cldma_gpd_set_data_ptr(gpd, 0);
|
|
|
|
gpd->data_buff_len = 0;
|
|
|
|
dev_kfree_skb_any(req->skb);
|
|
|
|
req->skb = NULL;
|
|
|
|
}
|
|
|
|
spin_unlock_irqrestore(&txq->ring_lock, flags);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int t7xx_cldma_clear_rxq(struct cldma_ctrl *md_ctrl, int qnum)
|
|
|
|
{
|
|
|
|
struct cldma_queue *rxq = &md_ctrl->rxq[qnum];
|
|
|
|
struct cldma_request *req;
|
|
|
|
struct cldma_gpd *gpd;
|
|
|
|
unsigned long flags;
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&rxq->ring_lock, flags);
|
|
|
|
t7xx_cldma_q_reset(rxq);
|
|
|
|
list_for_each_entry(req, &rxq->tr_ring->gpd_ring, entry) {
|
|
|
|
gpd = req->gpd;
|
|
|
|
gpd->flags = GPD_FLAGS_IOC | GPD_FLAGS_HWO;
|
|
|
|
gpd->data_buff_len = 0;
|
|
|
|
|
|
|
|
if (req->skb) {
|
|
|
|
req->skb->len = 0;
|
|
|
|
skb_reset_tail_pointer(req->skb);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
list_for_each_entry(req, &rxq->tr_ring->gpd_ring, entry) {
|
|
|
|
if (req->skb)
|
|
|
|
continue;
|
|
|
|
|
2022-05-19 11:21:08 +08:00
|
|
|
ret = t7xx_cldma_alloc_and_map_skb(md_ctrl, req, rxq->tr_ring->pkt_size, GFP_ATOMIC);
|
2022-05-06 11:12:59 -07:00
|
|
|
if (ret)
|
|
|
|
break;
|
|
|
|
|
|
|
|
t7xx_cldma_gpd_set_data_ptr(req->gpd, req->mapped_buff);
|
|
|
|
}
|
|
|
|
spin_unlock_irqrestore(&rxq->ring_lock, flags);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
void t7xx_cldma_clear_all_qs(struct cldma_ctrl *md_ctrl, enum mtk_txrx tx_rx)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
if (tx_rx == MTK_TX) {
|
|
|
|
for (i = 0; i < CLDMA_TXQ_NUM; i++)
|
|
|
|
t7xx_cldma_clear_txq(md_ctrl, i);
|
|
|
|
} else {
|
|
|
|
for (i = 0; i < CLDMA_RXQ_NUM; i++)
|
|
|
|
t7xx_cldma_clear_rxq(md_ctrl, i);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void t7xx_cldma_stop_all_qs(struct cldma_ctrl *md_ctrl, enum mtk_txrx tx_rx)
|
|
|
|
{
|
|
|
|
struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info;
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
|
|
|
|
t7xx_cldma_hw_irq_dis_eq(hw_info, CLDMA_ALL_Q, tx_rx);
|
|
|
|
t7xx_cldma_hw_irq_dis_txrx(hw_info, CLDMA_ALL_Q, tx_rx);
|
|
|
|
if (tx_rx == MTK_RX)
|
|
|
|
md_ctrl->rxq_active &= ~TXRX_STATUS_BITMASK;
|
|
|
|
else
|
|
|
|
md_ctrl->txq_active &= ~TXRX_STATUS_BITMASK;
|
|
|
|
t7xx_cldma_hw_stop_all_qs(hw_info, tx_rx);
|
|
|
|
spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int t7xx_cldma_gpd_handle_tx_request(struct cldma_queue *queue, struct cldma_request *tx_req,
|
|
|
|
struct sk_buff *skb)
|
|
|
|
{
|
|
|
|
struct cldma_ctrl *md_ctrl = queue->md_ctrl;
|
|
|
|
struct cldma_gpd *gpd = tx_req->gpd;
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
/* Update GPD */
|
|
|
|
tx_req->mapped_buff = dma_map_single(md_ctrl->dev, skb->data, skb->len, DMA_TO_DEVICE);
|
|
|
|
|
|
|
|
if (dma_mapping_error(md_ctrl->dev, tx_req->mapped_buff)) {
|
|
|
|
dev_err(md_ctrl->dev, "DMA mapping failed\n");
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
t7xx_cldma_gpd_set_data_ptr(gpd, tx_req->mapped_buff);
|
|
|
|
gpd->data_buff_len = cpu_to_le16(skb->len);
|
|
|
|
|
|
|
|
/* This lock must cover TGPD setting, as even without a resume operation,
|
|
|
|
* CLDMA can send next HWO=1 if last TGPD just finished.
|
|
|
|
*/
|
|
|
|
spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
|
|
|
|
if (md_ctrl->txq_active & BIT(queue->index))
|
|
|
|
gpd->flags |= GPD_FLAGS_HWO;
|
|
|
|
|
|
|
|
spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
|
|
|
|
|
|
|
|
tx_req->skb = skb;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Called with cldma_lock */
|
|
|
|
static void t7xx_cldma_hw_start_send(struct cldma_ctrl *md_ctrl, int qno,
|
|
|
|
struct cldma_request *prev_req)
|
|
|
|
{
|
|
|
|
struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info;
|
|
|
|
|
|
|
|
/* Check whether the device was powered off (CLDMA start address is not set) */
|
|
|
|
if (!t7xx_cldma_tx_addr_is_set(hw_info, qno)) {
|
|
|
|
t7xx_cldma_hw_init(hw_info);
|
|
|
|
t7xx_cldma_hw_set_start_addr(hw_info, qno, prev_req->gpd_addr, MTK_TX);
|
|
|
|
md_ctrl->txq_started &= ~BIT(qno);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!t7xx_cldma_hw_queue_status(hw_info, qno, MTK_TX)) {
|
|
|
|
if (md_ctrl->txq_started & BIT(qno))
|
|
|
|
t7xx_cldma_hw_resume_queue(hw_info, qno, MTK_TX);
|
|
|
|
else
|
|
|
|
t7xx_cldma_hw_start_queue(hw_info, qno, MTK_TX);
|
|
|
|
|
|
|
|
md_ctrl->txq_started |= BIT(qno);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* t7xx_cldma_set_recv_skb() - Set the callback to handle RX packets.
|
2024-02-05 18:22:29 +08:00
|
|
|
* @queue: CLDMA queue.
|
2022-05-06 11:12:59 -07:00
|
|
|
* @recv_skb: Receiving skb callback.
|
|
|
|
*/
|
2024-02-05 18:22:29 +08:00
|
|
|
void t7xx_cldma_set_recv_skb(struct cldma_queue *queue,
|
2022-05-06 11:12:59 -07:00
|
|
|
int (*recv_skb)(struct cldma_queue *queue, struct sk_buff *skb))
|
|
|
|
{
|
2024-02-05 18:22:29 +08:00
|
|
|
queue->recv_skb = recv_skb;
|
2022-05-06 11:12:59 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* t7xx_cldma_send_skb() - Send control data to modem.
|
|
|
|
* @md_ctrl: CLDMA context structure.
|
|
|
|
* @qno: Queue number.
|
|
|
|
* @skb: Socket buffer.
|
|
|
|
*
|
|
|
|
* Return:
|
|
|
|
* * 0 - Success.
|
|
|
|
* * -ENOMEM - Allocation failure.
|
|
|
|
* * -EINVAL - Invalid queue request.
|
|
|
|
* * -EIO - Queue is not active.
|
|
|
|
* * -ETIMEDOUT - Timeout waiting for the device to wake up.
|
|
|
|
*/
|
|
|
|
int t7xx_cldma_send_skb(struct cldma_ctrl *md_ctrl, int qno, struct sk_buff *skb)
|
|
|
|
{
|
|
|
|
struct cldma_request *tx_req;
|
|
|
|
struct cldma_queue *queue;
|
|
|
|
unsigned long flags;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (qno >= CLDMA_TXQ_NUM)
|
|
|
|
return -EINVAL;
|
|
|
|
|
2022-05-06 11:13:08 -07:00
|
|
|
ret = pm_runtime_resume_and_get(md_ctrl->dev);
|
|
|
|
if (ret < 0 && ret != -EACCES)
|
|
|
|
return ret;
|
|
|
|
|
2022-05-06 11:13:09 -07:00
|
|
|
t7xx_pci_disable_sleep(md_ctrl->t7xx_dev);
|
2022-05-06 11:12:59 -07:00
|
|
|
queue = &md_ctrl->txq[qno];
|
|
|
|
|
|
|
|
spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
|
|
|
|
if (!(md_ctrl->txq_active & BIT(qno))) {
|
|
|
|
ret = -EIO;
|
|
|
|
spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
|
|
|
|
goto allow_sleep;
|
|
|
|
}
|
|
|
|
spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
|
|
|
|
|
|
|
|
do {
|
|
|
|
spin_lock_irqsave(&queue->ring_lock, flags);
|
|
|
|
tx_req = queue->tx_next;
|
|
|
|
if (queue->budget > 0 && !tx_req->skb) {
|
|
|
|
struct list_head *gpd_ring = &queue->tr_ring->gpd_ring;
|
|
|
|
|
|
|
|
queue->budget--;
|
|
|
|
t7xx_cldma_gpd_handle_tx_request(queue, tx_req, skb);
|
|
|
|
queue->tx_next = list_next_entry_circular(tx_req, gpd_ring, entry);
|
|
|
|
spin_unlock_irqrestore(&queue->ring_lock, flags);
|
|
|
|
|
2022-05-06 11:13:09 -07:00
|
|
|
if (!t7xx_pci_sleep_disable_complete(md_ctrl->t7xx_dev)) {
|
|
|
|
ret = -ETIMEDOUT;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2022-05-06 11:12:59 -07:00
|
|
|
/* Protect the access to the modem for queues operations (resume/start)
|
|
|
|
* which access shared locations by all the queues.
|
|
|
|
* cldma_lock is independent of ring_lock which is per queue.
|
|
|
|
*/
|
|
|
|
spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
|
|
|
|
t7xx_cldma_hw_start_send(md_ctrl, qno, tx_req);
|
|
|
|
spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
|
|
|
|
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
spin_unlock_irqrestore(&queue->ring_lock, flags);
|
|
|
|
|
2022-05-06 11:13:09 -07:00
|
|
|
if (!t7xx_pci_sleep_disable_complete(md_ctrl->t7xx_dev)) {
|
|
|
|
ret = -ETIMEDOUT;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2022-05-06 11:12:59 -07:00
|
|
|
if (!t7xx_cldma_hw_queue_status(&md_ctrl->hw_info, qno, MTK_TX)) {
|
|
|
|
spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
|
|
|
|
t7xx_cldma_hw_resume_queue(&md_ctrl->hw_info, qno, MTK_TX);
|
|
|
|
spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = wait_event_interruptible_exclusive(queue->req_wq, queue->budget > 0);
|
|
|
|
} while (!ret);
|
|
|
|
|
|
|
|
allow_sleep:
|
2022-05-06 11:13:09 -07:00
|
|
|
t7xx_pci_enable_sleep(md_ctrl->t7xx_dev);
|
2022-05-06 11:13:08 -07:00
|
|
|
pm_runtime_mark_last_busy(md_ctrl->dev);
|
|
|
|
pm_runtime_put_autosuspend(md_ctrl->dev);
|
2022-05-06 11:12:59 -07:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2024-02-05 18:22:29 +08:00
|
|
|
static void t7xx_cldma_adjust_config(struct cldma_ctrl *md_ctrl, enum cldma_cfg cfg_id)
|
|
|
|
{
|
|
|
|
int qno;
|
|
|
|
|
|
|
|
for (qno = 0; qno < CLDMA_RXQ_NUM; qno++) {
|
|
|
|
md_ctrl->rx_ring[qno].pkt_size = CLDMA_SHARED_Q_BUFF_SZ;
|
|
|
|
t7xx_cldma_set_recv_skb(&md_ctrl->rxq[qno], t7xx_port_proxy_recv_skb);
|
|
|
|
}
|
|
|
|
|
|
|
|
md_ctrl->rx_ring[CLDMA_RXQ_NUM - 1].pkt_size = CLDMA_JUMBO_BUFF_SZ;
|
|
|
|
|
|
|
|
for (qno = 0; qno < CLDMA_TXQ_NUM; qno++)
|
|
|
|
md_ctrl->tx_ring[qno].pkt_size = CLDMA_SHARED_Q_BUFF_SZ;
|
|
|
|
|
|
|
|
if (cfg_id == CLDMA_DEDICATED_Q_CFG) {
|
|
|
|
md_ctrl->tx_ring[CLDMA_Q_IDX_DUMP].pkt_size = CLDMA_DEDICATED_Q_BUFF_SZ;
|
|
|
|
md_ctrl->rx_ring[CLDMA_Q_IDX_DUMP].pkt_size = CLDMA_DEDICATED_Q_BUFF_SZ;
|
|
|
|
t7xx_cldma_set_recv_skb(&md_ctrl->rxq[CLDMA_Q_IDX_DUMP],
|
|
|
|
t7xx_port_proxy_recv_skb_from_dedicated_queue);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-05-06 11:12:59 -07:00
|
|
|
static int t7xx_cldma_late_init(struct cldma_ctrl *md_ctrl)
|
|
|
|
{
|
|
|
|
char dma_pool_name[32];
|
|
|
|
int i, j, ret;
|
|
|
|
|
|
|
|
if (md_ctrl->is_late_init) {
|
|
|
|
dev_err(md_ctrl->dev, "CLDMA late init was already done\n");
|
|
|
|
return -EALREADY;
|
|
|
|
}
|
|
|
|
|
|
|
|
snprintf(dma_pool_name, sizeof(dma_pool_name), "cldma_req_hif%d", md_ctrl->hif_id);
|
|
|
|
|
|
|
|
md_ctrl->gpd_dmapool = dma_pool_create(dma_pool_name, md_ctrl->dev,
|
|
|
|
sizeof(struct cldma_gpd), GPD_DMAPOOL_ALIGN, 0);
|
|
|
|
if (!md_ctrl->gpd_dmapool) {
|
|
|
|
dev_err(md_ctrl->dev, "DMA pool alloc fail\n");
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < CLDMA_TXQ_NUM; i++) {
|
|
|
|
ret = t7xx_cldma_tx_ring_init(md_ctrl, &md_ctrl->tx_ring[i]);
|
|
|
|
if (ret) {
|
|
|
|
dev_err(md_ctrl->dev, "control TX ring init fail\n");
|
|
|
|
goto err_free_tx_ring;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
for (j = 0; j < CLDMA_RXQ_NUM; j++) {
|
|
|
|
ret = t7xx_cldma_rx_ring_init(md_ctrl, &md_ctrl->rx_ring[j]);
|
|
|
|
if (ret) {
|
|
|
|
dev_err(md_ctrl->dev, "Control RX ring init fail\n");
|
|
|
|
goto err_free_rx_ring;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < CLDMA_TXQ_NUM; i++)
|
|
|
|
t7xx_cldma_txq_init(&md_ctrl->txq[i]);
|
|
|
|
|
|
|
|
for (j = 0; j < CLDMA_RXQ_NUM; j++)
|
|
|
|
t7xx_cldma_rxq_init(&md_ctrl->rxq[j]);
|
|
|
|
|
|
|
|
md_ctrl->is_late_init = true;
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
err_free_rx_ring:
|
|
|
|
while (j--)
|
|
|
|
t7xx_cldma_ring_free(md_ctrl, &md_ctrl->rx_ring[j], DMA_FROM_DEVICE);
|
|
|
|
|
|
|
|
err_free_tx_ring:
|
|
|
|
while (i--)
|
|
|
|
t7xx_cldma_ring_free(md_ctrl, &md_ctrl->tx_ring[i], DMA_TO_DEVICE);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void __iomem *t7xx_pcie_addr_transfer(void __iomem *addr, u32 addr_trs1, u32 phy_addr)
|
|
|
|
{
|
|
|
|
return addr + phy_addr - addr_trs1;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void t7xx_hw_info_init(struct cldma_ctrl *md_ctrl)
|
|
|
|
{
|
|
|
|
struct t7xx_addr_base *pbase = &md_ctrl->t7xx_dev->base_addr;
|
|
|
|
struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info;
|
|
|
|
u32 phy_ao_base, phy_pd_base;
|
|
|
|
|
|
|
|
hw_info->hw_mode = MODE_BIT_64;
|
2023-07-11 08:28:13 +02:00
|
|
|
|
|
|
|
if (md_ctrl->hif_id == CLDMA_ID_MD) {
|
|
|
|
phy_ao_base = CLDMA1_AO_BASE;
|
|
|
|
phy_pd_base = CLDMA1_PD_BASE;
|
|
|
|
hw_info->phy_interrupt_id = CLDMA1_INT;
|
|
|
|
} else {
|
|
|
|
phy_ao_base = CLDMA0_AO_BASE;
|
|
|
|
phy_pd_base = CLDMA0_PD_BASE;
|
|
|
|
hw_info->phy_interrupt_id = CLDMA0_INT;
|
|
|
|
}
|
|
|
|
|
2022-05-06 11:12:59 -07:00
|
|
|
hw_info->ap_ao_base = t7xx_pcie_addr_transfer(pbase->pcie_ext_reg_base,
|
|
|
|
pbase->pcie_dev_reg_trsl_addr, phy_ao_base);
|
|
|
|
hw_info->ap_pdn_base = t7xx_pcie_addr_transfer(pbase->pcie_ext_reg_base,
|
|
|
|
pbase->pcie_dev_reg_trsl_addr, phy_pd_base);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int t7xx_cldma_default_recv_skb(struct cldma_queue *queue, struct sk_buff *skb)
|
|
|
|
{
|
|
|
|
dev_kfree_skb_any(skb);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int t7xx_cldma_alloc(enum cldma_id hif_id, struct t7xx_pci_dev *t7xx_dev)
|
|
|
|
{
|
|
|
|
struct device *dev = &t7xx_dev->pdev->dev;
|
|
|
|
struct cldma_ctrl *md_ctrl;
|
2024-02-05 18:22:29 +08:00
|
|
|
int qno;
|
2022-05-06 11:12:59 -07:00
|
|
|
|
|
|
|
md_ctrl = devm_kzalloc(dev, sizeof(*md_ctrl), GFP_KERNEL);
|
|
|
|
if (!md_ctrl)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
md_ctrl->t7xx_dev = t7xx_dev;
|
|
|
|
md_ctrl->dev = dev;
|
|
|
|
md_ctrl->hif_id = hif_id;
|
2024-02-05 18:22:29 +08:00
|
|
|
for (qno = 0; qno < CLDMA_RXQ_NUM; qno++)
|
|
|
|
md_ctrl->rxq[qno].recv_skb = t7xx_cldma_default_recv_skb;
|
|
|
|
|
2022-05-06 11:12:59 -07:00
|
|
|
t7xx_hw_info_init(md_ctrl);
|
|
|
|
t7xx_dev->md->md_ctrl[hif_id] = md_ctrl;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2022-05-06 11:13:07 -07:00
|
|
|
static void t7xx_cldma_resume_early(struct t7xx_pci_dev *t7xx_dev, void *entity_param)
|
|
|
|
{
|
|
|
|
struct cldma_ctrl *md_ctrl = entity_param;
|
|
|
|
struct t7xx_cldma_hw *hw_info;
|
|
|
|
unsigned long flags;
|
|
|
|
int qno_t;
|
|
|
|
|
|
|
|
hw_info = &md_ctrl->hw_info;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
|
|
|
|
t7xx_cldma_hw_restore(hw_info);
|
|
|
|
for (qno_t = 0; qno_t < CLDMA_TXQ_NUM; qno_t++) {
|
|
|
|
t7xx_cldma_hw_set_start_addr(hw_info, qno_t, md_ctrl->txq[qno_t].tx_next->gpd_addr,
|
|
|
|
MTK_TX);
|
|
|
|
t7xx_cldma_hw_set_start_addr(hw_info, qno_t, md_ctrl->rxq[qno_t].tr_done->gpd_addr,
|
|
|
|
MTK_RX);
|
|
|
|
}
|
|
|
|
t7xx_cldma_enable_irq(md_ctrl);
|
|
|
|
t7xx_cldma_hw_start_queue(hw_info, CLDMA_ALL_Q, MTK_RX);
|
|
|
|
md_ctrl->rxq_active |= TXRX_STATUS_BITMASK;
|
|
|
|
t7xx_cldma_hw_irq_en_eq(hw_info, CLDMA_ALL_Q, MTK_RX);
|
|
|
|
t7xx_cldma_hw_irq_en_txrx(hw_info, CLDMA_ALL_Q, MTK_RX);
|
|
|
|
spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int t7xx_cldma_resume(struct t7xx_pci_dev *t7xx_dev, void *entity_param)
|
|
|
|
{
|
|
|
|
struct cldma_ctrl *md_ctrl = entity_param;
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
|
|
|
|
md_ctrl->txq_active |= TXRX_STATUS_BITMASK;
|
|
|
|
t7xx_cldma_hw_irq_en_txrx(&md_ctrl->hw_info, CLDMA_ALL_Q, MTK_TX);
|
|
|
|
t7xx_cldma_hw_irq_en_eq(&md_ctrl->hw_info, CLDMA_ALL_Q, MTK_TX);
|
|
|
|
spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
|
|
|
|
|
|
|
|
if (md_ctrl->hif_id == CLDMA_ID_MD)
|
|
|
|
t7xx_mhccif_mask_clr(t7xx_dev, D2H_SW_INT_MASK);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void t7xx_cldma_suspend_late(struct t7xx_pci_dev *t7xx_dev, void *entity_param)
|
|
|
|
{
|
|
|
|
struct cldma_ctrl *md_ctrl = entity_param;
|
|
|
|
struct t7xx_cldma_hw *hw_info;
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
hw_info = &md_ctrl->hw_info;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
|
|
|
|
t7xx_cldma_hw_irq_dis_eq(hw_info, CLDMA_ALL_Q, MTK_RX);
|
|
|
|
t7xx_cldma_hw_irq_dis_txrx(hw_info, CLDMA_ALL_Q, MTK_RX);
|
|
|
|
md_ctrl->rxq_active &= ~TXRX_STATUS_BITMASK;
|
|
|
|
t7xx_cldma_hw_stop_all_qs(hw_info, MTK_RX);
|
|
|
|
t7xx_cldma_clear_ip_busy(hw_info);
|
|
|
|
t7xx_cldma_disable_irq(md_ctrl);
|
|
|
|
spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int t7xx_cldma_suspend(struct t7xx_pci_dev *t7xx_dev, void *entity_param)
|
|
|
|
{
|
|
|
|
struct cldma_ctrl *md_ctrl = entity_param;
|
|
|
|
struct t7xx_cldma_hw *hw_info;
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
if (md_ctrl->hif_id == CLDMA_ID_MD)
|
|
|
|
t7xx_mhccif_mask_set(t7xx_dev, D2H_SW_INT_MASK);
|
|
|
|
|
|
|
|
hw_info = &md_ctrl->hw_info;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
|
|
|
|
t7xx_cldma_hw_irq_dis_eq(hw_info, CLDMA_ALL_Q, MTK_TX);
|
|
|
|
t7xx_cldma_hw_irq_dis_txrx(hw_info, CLDMA_ALL_Q, MTK_TX);
|
|
|
|
md_ctrl->txq_active &= ~TXRX_STATUS_BITMASK;
|
|
|
|
t7xx_cldma_hw_stop_all_qs(hw_info, MTK_TX);
|
|
|
|
md_ctrl->txq_started = 0;
|
|
|
|
spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int t7xx_cldma_pm_init(struct cldma_ctrl *md_ctrl)
|
|
|
|
{
|
|
|
|
md_ctrl->pm_entity = kzalloc(sizeof(*md_ctrl->pm_entity), GFP_KERNEL);
|
|
|
|
if (!md_ctrl->pm_entity)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
md_ctrl->pm_entity->entity_param = md_ctrl;
|
|
|
|
|
|
|
|
if (md_ctrl->hif_id == CLDMA_ID_MD)
|
|
|
|
md_ctrl->pm_entity->id = PM_ENTITY_ID_CTRL1;
|
|
|
|
else
|
|
|
|
md_ctrl->pm_entity->id = PM_ENTITY_ID_CTRL2;
|
|
|
|
|
|
|
|
md_ctrl->pm_entity->suspend = t7xx_cldma_suspend;
|
|
|
|
md_ctrl->pm_entity->suspend_late = t7xx_cldma_suspend_late;
|
|
|
|
md_ctrl->pm_entity->resume = t7xx_cldma_resume;
|
|
|
|
md_ctrl->pm_entity->resume_early = t7xx_cldma_resume_early;
|
|
|
|
|
|
|
|
return t7xx_pci_pm_entity_register(md_ctrl->t7xx_dev, md_ctrl->pm_entity);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int t7xx_cldma_pm_uninit(struct cldma_ctrl *md_ctrl)
|
|
|
|
{
|
|
|
|
if (!md_ctrl->pm_entity)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
t7xx_pci_pm_entity_unregister(md_ctrl->t7xx_dev, md_ctrl->pm_entity);
|
|
|
|
kfree(md_ctrl->pm_entity);
|
|
|
|
md_ctrl->pm_entity = NULL;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2022-05-06 11:12:59 -07:00
|
|
|
void t7xx_cldma_hif_hw_init(struct cldma_ctrl *md_ctrl)
|
|
|
|
{
|
|
|
|
struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info;
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
|
|
|
|
t7xx_cldma_hw_stop(hw_info, MTK_TX);
|
|
|
|
t7xx_cldma_hw_stop(hw_info, MTK_RX);
|
|
|
|
t7xx_cldma_hw_rx_done(hw_info, EMPTY_STATUS_BITMASK | TXRX_STATUS_BITMASK);
|
|
|
|
t7xx_cldma_hw_tx_done(hw_info, EMPTY_STATUS_BITMASK | TXRX_STATUS_BITMASK);
|
|
|
|
t7xx_cldma_hw_init(hw_info);
|
|
|
|
spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
|
|
|
|
}
|
|
|
|
|
|
|
|
static irqreturn_t t7xx_cldma_isr_handler(int irq, void *data)
|
|
|
|
{
|
|
|
|
struct cldma_ctrl *md_ctrl = data;
|
|
|
|
u32 interrupt;
|
|
|
|
|
|
|
|
interrupt = md_ctrl->hw_info.phy_interrupt_id;
|
|
|
|
t7xx_pcie_mac_clear_int(md_ctrl->t7xx_dev, interrupt);
|
|
|
|
t7xx_cldma_irq_work_cb(md_ctrl);
|
|
|
|
t7xx_pcie_mac_clear_int_status(md_ctrl->t7xx_dev, interrupt);
|
|
|
|
t7xx_pcie_mac_set_int(md_ctrl->t7xx_dev, interrupt);
|
|
|
|
return IRQ_HANDLED;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void t7xx_cldma_destroy_wqs(struct cldma_ctrl *md_ctrl)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < CLDMA_TXQ_NUM; i++) {
|
|
|
|
if (md_ctrl->txq[i].worker) {
|
|
|
|
destroy_workqueue(md_ctrl->txq[i].worker);
|
|
|
|
md_ctrl->txq[i].worker = NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < CLDMA_RXQ_NUM; i++) {
|
|
|
|
if (md_ctrl->rxq[i].worker) {
|
|
|
|
destroy_workqueue(md_ctrl->rxq[i].worker);
|
|
|
|
md_ctrl->rxq[i].worker = NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* t7xx_cldma_init() - Initialize CLDMA.
|
|
|
|
* @md_ctrl: CLDMA context structure.
|
|
|
|
*
|
2022-05-06 11:13:07 -07:00
|
|
|
* Allocate and initialize device power management entity.
|
2022-05-06 11:12:59 -07:00
|
|
|
* Initialize HIF TX/RX queue structure.
|
|
|
|
* Register CLDMA callback ISR with PCIe driver.
|
|
|
|
*
|
|
|
|
* Return:
|
|
|
|
* * 0 - Success.
|
|
|
|
* * -ERROR - Error code from failure sub-initializations.
|
|
|
|
*/
|
|
|
|
int t7xx_cldma_init(struct cldma_ctrl *md_ctrl)
|
|
|
|
{
|
|
|
|
struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info;
|
2022-05-06 11:13:07 -07:00
|
|
|
int ret, i;
|
2022-05-06 11:12:59 -07:00
|
|
|
|
|
|
|
md_ctrl->txq_active = 0;
|
|
|
|
md_ctrl->rxq_active = 0;
|
|
|
|
md_ctrl->is_late_init = false;
|
|
|
|
|
2022-05-06 11:13:07 -07:00
|
|
|
ret = t7xx_cldma_pm_init(md_ctrl);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
2022-05-06 11:12:59 -07:00
|
|
|
spin_lock_init(&md_ctrl->cldma_lock);
|
|
|
|
|
|
|
|
for (i = 0; i < CLDMA_TXQ_NUM; i++) {
|
|
|
|
md_cd_queue_struct_init(&md_ctrl->txq[i], md_ctrl, MTK_TX, i);
|
|
|
|
md_ctrl->txq[i].worker =
|
net: wwan: t7xx: Use alloc_ordered_workqueue() to create ordered workqueues
BACKGROUND
==========
When multiple work items are queued to a workqueue, their execution order
doesn't match the queueing order. They may get executed in any order and
simultaneously. When fully serialized execution - one by one in the queueing
order - is needed, an ordered workqueue should be used which can be created
with alloc_ordered_workqueue().
However, alloc_ordered_workqueue() was a later addition. Before it, an
ordered workqueue could be obtained by creating an UNBOUND workqueue with
@max_active==1. This originally was an implementation side-effect which was
broken by 4c16bd327c74 ("workqueue: restore WQ_UNBOUND/max_active==1 to be
ordered"). Because there were users that depended on the ordered execution,
5c0338c68706 ("workqueue: restore WQ_UNBOUND/max_active==1 to be ordered")
made workqueue allocation path to implicitly promote UNBOUND workqueues w/
@max_active==1 to ordered workqueues.
While this has worked okay, overloading the UNBOUND allocation interface
this way creates other issues. It's difficult to tell whether a given
workqueue actually needs to be ordered and users that legitimately want a
min concurrency level wq unexpectedly gets an ordered one instead. With
planned UNBOUND workqueue updates to improve execution locality and more
prevalence of chiplet designs which can benefit from such improvements, this
isn't a state we wanna be in forever.
This patch series audits all callsites that create an UNBOUND workqueue w/
@max_active==1 and converts them to alloc_ordered_workqueue() as necessary.
WHAT TO LOOK FOR
================
The conversions are from
alloc_workqueue(WQ_UNBOUND | flags, 1, args..)
to
alloc_ordered_workqueue(flags, args...)
which don't cause any functional changes. If you know that fully ordered
execution is not necessary, please let me know. I'll drop the conversion and
instead add a comment noting the fact to reduce confusion while conversion
is in progress.
If you aren't fully sure, it's completely fine to let the conversion
through. The behavior will stay exactly the same and we can always
reconsider later.
As there are follow-up workqueue core changes, I'd really appreciate if the
patch can be routed through the workqueue tree w/ your acks. Thanks.
Signed-off-by: Tejun Heo <tj@kernel.org>
Cc: Chandrashekar Devegowda <chandrashekar.devegowda@intel.com>
Cc: Intel Corporation <linuxwwan@intel.com>
Cc: Chiranjeevi Rapolu <chiranjeevi.rapolu@linux.intel.com>
Cc: Liu Haijun <haijun.liu@mediatek.com>
Cc: M Chetan Kumar <m.chetan.kumar@linux.intel.com>
Cc: Ricardo Martinez <ricardo.martinez@linux.intel.com>
Cc: Loic Poulain <loic.poulain@linaro.org>
Cc: Sergey Ryazanov <ryazanov.s.a@gmail.com>
Cc: Johannes Berg <johannes@sipsolutions.net>
Cc: "David S. Miller" <davem@davemloft.net>
Cc: Eric Dumazet <edumazet@google.com>
Cc: Jakub Kicinski <kuba@kernel.org>
Cc: Paolo Abeni <pabeni@redhat.com>
Cc: netdev@vger.kernel.org
2023-05-25 12:15:29 -10:00
|
|
|
alloc_ordered_workqueue("md_hif%d_tx%d_worker",
|
|
|
|
WQ_MEM_RECLAIM | (i ? 0 : WQ_HIGHPRI),
|
|
|
|
md_ctrl->hif_id, i);
|
2022-05-06 11:12:59 -07:00
|
|
|
if (!md_ctrl->txq[i].worker)
|
|
|
|
goto err_workqueue;
|
|
|
|
|
|
|
|
INIT_WORK(&md_ctrl->txq[i].cldma_work, t7xx_cldma_tx_done);
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < CLDMA_RXQ_NUM; i++) {
|
|
|
|
md_cd_queue_struct_init(&md_ctrl->rxq[i], md_ctrl, MTK_RX, i);
|
|
|
|
INIT_WORK(&md_ctrl->rxq[i].cldma_work, t7xx_cldma_rx_done);
|
|
|
|
|
net: wwan: t7xx: Use alloc_ordered_workqueue() to create ordered workqueues
BACKGROUND
==========
When multiple work items are queued to a workqueue, their execution order
doesn't match the queueing order. They may get executed in any order and
simultaneously. When fully serialized execution - one by one in the queueing
order - is needed, an ordered workqueue should be used which can be created
with alloc_ordered_workqueue().
However, alloc_ordered_workqueue() was a later addition. Before it, an
ordered workqueue could be obtained by creating an UNBOUND workqueue with
@max_active==1. This originally was an implementation side-effect which was
broken by 4c16bd327c74 ("workqueue: restore WQ_UNBOUND/max_active==1 to be
ordered"). Because there were users that depended on the ordered execution,
5c0338c68706 ("workqueue: restore WQ_UNBOUND/max_active==1 to be ordered")
made workqueue allocation path to implicitly promote UNBOUND workqueues w/
@max_active==1 to ordered workqueues.
While this has worked okay, overloading the UNBOUND allocation interface
this way creates other issues. It's difficult to tell whether a given
workqueue actually needs to be ordered and users that legitimately want a
min concurrency level wq unexpectedly gets an ordered one instead. With
planned UNBOUND workqueue updates to improve execution locality and more
prevalence of chiplet designs which can benefit from such improvements, this
isn't a state we wanna be in forever.
This patch series audits all callsites that create an UNBOUND workqueue w/
@max_active==1 and converts them to alloc_ordered_workqueue() as necessary.
WHAT TO LOOK FOR
================
The conversions are from
alloc_workqueue(WQ_UNBOUND | flags, 1, args..)
to
alloc_ordered_workqueue(flags, args...)
which don't cause any functional changes. If you know that fully ordered
execution is not necessary, please let me know. I'll drop the conversion and
instead add a comment noting the fact to reduce confusion while conversion
is in progress.
If you aren't fully sure, it's completely fine to let the conversion
through. The behavior will stay exactly the same and we can always
reconsider later.
As there are follow-up workqueue core changes, I'd really appreciate if the
patch can be routed through the workqueue tree w/ your acks. Thanks.
Signed-off-by: Tejun Heo <tj@kernel.org>
Cc: Chandrashekar Devegowda <chandrashekar.devegowda@intel.com>
Cc: Intel Corporation <linuxwwan@intel.com>
Cc: Chiranjeevi Rapolu <chiranjeevi.rapolu@linux.intel.com>
Cc: Liu Haijun <haijun.liu@mediatek.com>
Cc: M Chetan Kumar <m.chetan.kumar@linux.intel.com>
Cc: Ricardo Martinez <ricardo.martinez@linux.intel.com>
Cc: Loic Poulain <loic.poulain@linaro.org>
Cc: Sergey Ryazanov <ryazanov.s.a@gmail.com>
Cc: Johannes Berg <johannes@sipsolutions.net>
Cc: "David S. Miller" <davem@davemloft.net>
Cc: Eric Dumazet <edumazet@google.com>
Cc: Jakub Kicinski <kuba@kernel.org>
Cc: Paolo Abeni <pabeni@redhat.com>
Cc: netdev@vger.kernel.org
2023-05-25 12:15:29 -10:00
|
|
|
md_ctrl->rxq[i].worker =
|
|
|
|
alloc_ordered_workqueue("md_hif%d_rx%d_worker",
|
|
|
|
WQ_MEM_RECLAIM,
|
|
|
|
md_ctrl->hif_id, i);
|
2022-05-06 11:12:59 -07:00
|
|
|
if (!md_ctrl->rxq[i].worker)
|
|
|
|
goto err_workqueue;
|
|
|
|
}
|
|
|
|
|
|
|
|
t7xx_pcie_mac_clear_int(md_ctrl->t7xx_dev, hw_info->phy_interrupt_id);
|
|
|
|
md_ctrl->t7xx_dev->intr_handler[hw_info->phy_interrupt_id] = t7xx_cldma_isr_handler;
|
|
|
|
md_ctrl->t7xx_dev->intr_thread[hw_info->phy_interrupt_id] = NULL;
|
|
|
|
md_ctrl->t7xx_dev->callback_param[hw_info->phy_interrupt_id] = md_ctrl;
|
|
|
|
t7xx_pcie_mac_clear_int_status(md_ctrl->t7xx_dev, hw_info->phy_interrupt_id);
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
err_workqueue:
|
|
|
|
t7xx_cldma_destroy_wqs(md_ctrl);
|
2022-05-06 11:13:07 -07:00
|
|
|
t7xx_cldma_pm_uninit(md_ctrl);
|
2022-05-06 11:12:59 -07:00
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
2024-02-05 18:22:29 +08:00
|
|
|
void t7xx_cldma_switch_cfg(struct cldma_ctrl *md_ctrl, enum cldma_cfg cfg_id)
|
2022-05-06 11:12:59 -07:00
|
|
|
{
|
|
|
|
t7xx_cldma_late_release(md_ctrl);
|
2024-02-05 18:22:29 +08:00
|
|
|
t7xx_cldma_adjust_config(md_ctrl, cfg_id);
|
2022-05-06 11:12:59 -07:00
|
|
|
t7xx_cldma_late_init(md_ctrl);
|
|
|
|
}
|
|
|
|
|
|
|
|
void t7xx_cldma_exit(struct cldma_ctrl *md_ctrl)
|
|
|
|
{
|
|
|
|
t7xx_cldma_stop(md_ctrl);
|
|
|
|
t7xx_cldma_late_release(md_ctrl);
|
|
|
|
t7xx_cldma_destroy_wqs(md_ctrl);
|
2022-05-06 11:13:07 -07:00
|
|
|
t7xx_cldma_pm_uninit(md_ctrl);
|
2022-05-06 11:12:59 -07:00
|
|
|
}
|