2020-12-10 00:06:03 +02:00
|
|
|
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
|
|
|
|
/*
|
2024-01-11 15:07:25 +02:00
|
|
|
* Copyright (C) 2018-2024 Intel Corporation
|
2020-12-10 00:06:03 +02:00
|
|
|
*/
|
2018-05-17 10:14:30 +03:00
|
|
|
#include <linux/firmware.h>
|
2019-06-13 16:41:35 +03:00
|
|
|
#include "iwl-drv.h"
|
2018-05-17 10:14:30 +03:00
|
|
|
#include "iwl-trans.h"
|
|
|
|
#include "iwl-dbg-tlv.h"
|
2019-06-13 16:41:35 +03:00
|
|
|
#include "fw/dbg.h"
|
|
|
|
#include "fw/runtime.h"
|
2018-05-17 10:14:30 +03:00
|
|
|
|
2019-06-30 10:23:26 +03:00
|
|
|
/**
|
|
|
|
* enum iwl_dbg_tlv_type - debug TLV types
|
|
|
|
* @IWL_DBG_TLV_TYPE_DEBUG_INFO: debug info TLV
|
|
|
|
* @IWL_DBG_TLV_TYPE_BUF_ALLOC: buffer allocation TLV
|
|
|
|
* @IWL_DBG_TLV_TYPE_HCMD: host command TLV
|
|
|
|
* @IWL_DBG_TLV_TYPE_REGION: region TLV
|
|
|
|
* @IWL_DBG_TLV_TYPE_TRIGGER: trigger TLV
|
2021-10-17 12:40:19 +03:00
|
|
|
* @IWL_DBG_TLV_TYPE_CONF_SET: conf set TLV
|
2019-06-30 10:23:26 +03:00
|
|
|
* @IWL_DBG_TLV_TYPE_NUM: number of debug TLVs
|
|
|
|
*/
|
|
|
|
enum iwl_dbg_tlv_type {
|
|
|
|
IWL_DBG_TLV_TYPE_DEBUG_INFO =
|
|
|
|
IWL_UCODE_TLV_TYPE_DEBUG_INFO - IWL_UCODE_TLV_DEBUG_BASE,
|
|
|
|
IWL_DBG_TLV_TYPE_BUF_ALLOC,
|
|
|
|
IWL_DBG_TLV_TYPE_HCMD,
|
|
|
|
IWL_DBG_TLV_TYPE_REGION,
|
|
|
|
IWL_DBG_TLV_TYPE_TRIGGER,
|
2021-10-17 12:40:19 +03:00
|
|
|
IWL_DBG_TLV_TYPE_CONF_SET,
|
2019-06-30 10:23:26 +03:00
|
|
|
IWL_DBG_TLV_TYPE_NUM,
|
|
|
|
};
|
|
|
|
|
|
|
|
/**
|
|
|
|
* struct iwl_dbg_tlv_ver_data - debug TLV version struct
|
|
|
|
* @min_ver: min version supported
|
|
|
|
* @max_ver: max version supported
|
|
|
|
*/
|
|
|
|
struct iwl_dbg_tlv_ver_data {
|
|
|
|
int min_ver;
|
|
|
|
int max_ver;
|
|
|
|
};
|
|
|
|
|
2019-07-23 15:10:59 +03:00
|
|
|
/**
|
|
|
|
* struct iwl_dbg_tlv_timer_node - timer node struct
|
|
|
|
* @list: list of &struct iwl_dbg_tlv_timer_node
|
|
|
|
* @timer: timer
|
|
|
|
* @fwrt: &struct iwl_fw_runtime
|
|
|
|
* @tlv: TLV attach to the timer node
|
|
|
|
*/
|
|
|
|
struct iwl_dbg_tlv_timer_node {
|
|
|
|
struct list_head list;
|
|
|
|
struct timer_list timer;
|
|
|
|
struct iwl_fw_runtime *fwrt;
|
|
|
|
struct iwl_ucode_tlv *tlv;
|
|
|
|
};
|
|
|
|
|
2019-06-30 10:23:26 +03:00
|
|
|
static const struct iwl_dbg_tlv_ver_data
|
|
|
|
dbg_ver_table[IWL_DBG_TLV_TYPE_NUM] = {
|
|
|
|
[IWL_DBG_TLV_TYPE_DEBUG_INFO] = {.min_ver = 1, .max_ver = 1,},
|
|
|
|
[IWL_DBG_TLV_TYPE_BUF_ALLOC] = {.min_ver = 1, .max_ver = 1,},
|
|
|
|
[IWL_DBG_TLV_TYPE_HCMD] = {.min_ver = 1, .max_ver = 1,},
|
2021-12-04 13:10:52 +02:00
|
|
|
[IWL_DBG_TLV_TYPE_REGION] = {.min_ver = 1, .max_ver = 3,},
|
2019-06-30 10:23:26 +03:00
|
|
|
[IWL_DBG_TLV_TYPE_TRIGGER] = {.min_ver = 1, .max_ver = 1,},
|
2021-10-17 12:40:19 +03:00
|
|
|
[IWL_DBG_TLV_TYPE_CONF_SET] = {.min_ver = 1, .max_ver = 1,},
|
2019-06-30 10:23:26 +03:00
|
|
|
};
|
|
|
|
|
2024-01-28 08:53:51 +02:00
|
|
|
/* add a new TLV node, returning it so it can be modified */
|
|
|
|
static struct iwl_ucode_tlv *iwl_dbg_tlv_add(const struct iwl_ucode_tlv *tlv,
|
|
|
|
struct list_head *list)
|
2019-07-23 13:41:44 +03:00
|
|
|
{
|
|
|
|
u32 len = le32_to_cpu(tlv->length);
|
|
|
|
struct iwl_dbg_tlv_node *node;
|
|
|
|
|
2024-01-28 08:53:52 +02:00
|
|
|
node = kzalloc(struct_size(node, tlv.data, len), GFP_KERNEL);
|
2019-07-23 13:41:44 +03:00
|
|
|
if (!node)
|
2024-01-28 08:53:51 +02:00
|
|
|
return NULL;
|
2019-07-23 13:41:44 +03:00
|
|
|
|
2021-07-27 13:58:54 -07:00
|
|
|
memcpy(&node->tlv, tlv, sizeof(node->tlv));
|
|
|
|
memcpy(node->tlv.data, tlv->data, len);
|
2019-07-23 13:41:44 +03:00
|
|
|
list_add_tail(&node->list, list);
|
|
|
|
|
2024-01-28 08:53:51 +02:00
|
|
|
return &node->tlv;
|
2019-07-23 13:41:44 +03:00
|
|
|
}
|
|
|
|
|
2021-01-12 14:24:49 +01:00
|
|
|
static bool iwl_dbg_tlv_ver_support(const struct iwl_ucode_tlv *tlv)
|
2019-06-30 10:23:26 +03:00
|
|
|
{
|
2021-01-12 14:24:49 +01:00
|
|
|
const struct iwl_fw_ini_header *hdr = (const void *)&tlv->data[0];
|
2019-06-30 10:23:26 +03:00
|
|
|
u32 type = le32_to_cpu(tlv->type);
|
|
|
|
u32 tlv_idx = type - IWL_UCODE_TLV_DEBUG_BASE;
|
2019-07-23 13:41:44 +03:00
|
|
|
u32 ver = le32_to_cpu(hdr->version);
|
2019-06-30 10:23:26 +03:00
|
|
|
|
|
|
|
if (ver < dbg_ver_table[tlv_idx].min_ver ||
|
|
|
|
ver > dbg_ver_table[tlv_idx].max_ver)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2019-07-23 13:41:44 +03:00
|
|
|
static int iwl_dbg_tlv_alloc_debug_info(struct iwl_trans *trans,
|
2021-01-12 14:24:49 +01:00
|
|
|
const struct iwl_ucode_tlv *tlv)
|
2019-07-23 13:41:44 +03:00
|
|
|
{
|
2021-01-12 14:24:49 +01:00
|
|
|
const struct iwl_fw_ini_debug_info_tlv *debug_info = (const void *)tlv->data;
|
2019-07-23 13:41:44 +03:00
|
|
|
|
|
|
|
if (le32_to_cpu(tlv->length) != sizeof(*debug_info))
|
|
|
|
return -EINVAL;
|
|
|
|
|
2024-01-28 08:53:53 +02:00
|
|
|
/* we use this as a string, ensure input was NUL terminated */
|
|
|
|
if (strnlen(debug_info->debug_cfg_name,
|
|
|
|
sizeof(debug_info->debug_cfg_name)) ==
|
|
|
|
sizeof(debug_info->debug_cfg_name))
|
|
|
|
return -EINVAL;
|
|
|
|
|
2019-07-23 13:41:44 +03:00
|
|
|
IWL_DEBUG_FW(trans, "WRT: Loading debug cfg: %s\n",
|
|
|
|
debug_info->debug_cfg_name);
|
|
|
|
|
2024-01-28 08:53:51 +02:00
|
|
|
if (!iwl_dbg_tlv_add(tlv, &trans->dbg.debug_info_tlv_list))
|
|
|
|
return -ENOMEM;
|
|
|
|
return 0;
|
2019-07-23 13:41:44 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
static int iwl_dbg_tlv_alloc_buf_alloc(struct iwl_trans *trans,
|
2021-01-12 14:24:49 +01:00
|
|
|
const struct iwl_ucode_tlv *tlv)
|
2019-07-23 13:41:44 +03:00
|
|
|
{
|
2021-01-12 14:24:49 +01:00
|
|
|
const struct iwl_fw_ini_allocation_tlv *alloc = (const void *)tlv->data;
|
2020-04-24 18:48:11 +03:00
|
|
|
u32 buf_location;
|
|
|
|
u32 alloc_id;
|
2020-04-18 11:08:57 +03:00
|
|
|
|
2020-04-24 18:48:11 +03:00
|
|
|
if (le32_to_cpu(tlv->length) != sizeof(*alloc))
|
2019-07-23 13:41:44 +03:00
|
|
|
return -EINVAL;
|
2020-04-24 18:48:11 +03:00
|
|
|
|
|
|
|
buf_location = le32_to_cpu(alloc->buf_location);
|
|
|
|
alloc_id = le32_to_cpu(alloc->alloc_id);
|
|
|
|
|
|
|
|
if (buf_location == IWL_FW_INI_LOCATION_INVALID ||
|
|
|
|
buf_location >= IWL_FW_INI_LOCATION_NUM)
|
|
|
|
goto err;
|
2019-07-23 13:41:44 +03:00
|
|
|
|
2020-04-18 11:08:57 +03:00
|
|
|
if (alloc_id == IWL_FW_INI_ALLOCATION_INVALID ||
|
2020-04-24 18:48:11 +03:00
|
|
|
alloc_id >= IWL_FW_INI_ALLOCATION_NUM)
|
|
|
|
goto err;
|
|
|
|
|
2020-09-30 19:19:58 +03:00
|
|
|
if (buf_location == IWL_FW_INI_LOCATION_NPK_PATH &&
|
|
|
|
alloc_id != IWL_FW_INI_ALLOCATION_ID_DBGC1)
|
|
|
|
goto err;
|
|
|
|
|
|
|
|
if (buf_location == IWL_FW_INI_LOCATION_SRAM_PATH &&
|
2021-08-02 21:58:47 +03:00
|
|
|
alloc_id != IWL_FW_INI_ALLOCATION_ID_DBGC1)
|
2020-04-24 18:48:11 +03:00
|
|
|
goto err;
|
2019-07-23 13:41:44 +03:00
|
|
|
|
2023-04-13 21:40:34 +03:00
|
|
|
if (buf_location == IWL_FW_INI_LOCATION_DRAM_PATH &&
|
|
|
|
alloc->req_size == 0) {
|
|
|
|
IWL_ERR(trans, "WRT: Invalid DRAM buffer allocation requested size (0)\n");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2019-07-23 13:41:44 +03:00
|
|
|
trans->dbg.fw_mon_cfg[alloc_id] = *alloc;
|
|
|
|
|
|
|
|
return 0;
|
2020-04-24 18:48:11 +03:00
|
|
|
err:
|
|
|
|
IWL_ERR(trans,
|
|
|
|
"WRT: Invalid allocation id %u and/or location id %u for allocation TLV\n",
|
|
|
|
alloc_id, buf_location);
|
|
|
|
return -EINVAL;
|
2019-07-23 13:41:44 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
static int iwl_dbg_tlv_alloc_hcmd(struct iwl_trans *trans,
|
2021-01-12 14:24:49 +01:00
|
|
|
const struct iwl_ucode_tlv *tlv)
|
2019-07-23 13:41:44 +03:00
|
|
|
{
|
2021-01-12 14:24:49 +01:00
|
|
|
const struct iwl_fw_ini_hcmd_tlv *hcmd = (const void *)tlv->data;
|
2019-07-23 13:41:44 +03:00
|
|
|
u32 tp = le32_to_cpu(hcmd->time_point);
|
|
|
|
|
|
|
|
if (le32_to_cpu(tlv->length) <= sizeof(*hcmd))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
/* Host commands can not be sent in early time point since the FW
|
|
|
|
* is not ready
|
|
|
|
*/
|
|
|
|
if (tp == IWL_FW_INI_TIME_POINT_INVALID ||
|
|
|
|
tp >= IWL_FW_INI_TIME_POINT_NUM ||
|
|
|
|
tp == IWL_FW_INI_TIME_POINT_EARLY) {
|
|
|
|
IWL_ERR(trans,
|
|
|
|
"WRT: Invalid time point %u for host command TLV\n",
|
|
|
|
tp);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2024-01-28 08:53:51 +02:00
|
|
|
if (!iwl_dbg_tlv_add(tlv, &trans->dbg.time_point[tp].hcmd_list))
|
|
|
|
return -ENOMEM;
|
|
|
|
return 0;
|
2019-07-23 13:41:44 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
static int iwl_dbg_tlv_alloc_region(struct iwl_trans *trans,
|
2021-01-12 14:24:49 +01:00
|
|
|
const struct iwl_ucode_tlv *tlv)
|
2019-07-23 13:41:44 +03:00
|
|
|
{
|
2021-01-12 14:24:49 +01:00
|
|
|
const struct iwl_fw_ini_region_tlv *reg = (const void *)tlv->data;
|
2019-07-23 13:41:44 +03:00
|
|
|
struct iwl_ucode_tlv **active_reg;
|
|
|
|
u32 id = le32_to_cpu(reg->id);
|
2021-12-04 13:10:52 +02:00
|
|
|
u8 type = reg->type;
|
2019-07-23 13:41:44 +03:00
|
|
|
u32 tlv_len = sizeof(*tlv) + le32_to_cpu(tlv->length);
|
|
|
|
|
2021-06-12 14:32:45 +03:00
|
|
|
/*
|
2022-02-10 18:22:29 +02:00
|
|
|
* The higher part of the ID from version 2 is debug policy.
|
|
|
|
* The id will be only lsb 16 bits, so mask it out.
|
2021-06-12 14:32:45 +03:00
|
|
|
*/
|
2021-12-04 13:10:52 +02:00
|
|
|
if (le32_to_cpu(reg->hdr.version) >= 2)
|
2022-02-10 18:22:29 +02:00
|
|
|
id &= IWL_FW_INI_REGION_ID_MASK;
|
2021-06-12 14:32:45 +03:00
|
|
|
|
2019-07-23 13:41:44 +03:00
|
|
|
if (le32_to_cpu(tlv->length) < sizeof(*reg))
|
|
|
|
return -EINVAL;
|
|
|
|
|
2021-06-12 14:32:45 +03:00
|
|
|
/* for safe use of a string from FW, limit it to IWL_FW_INI_MAX_NAME */
|
|
|
|
IWL_DEBUG_FW(trans, "WRT: parsing region: %.*s\n",
|
|
|
|
IWL_FW_INI_MAX_NAME, reg->name);
|
|
|
|
|
2019-07-23 13:41:44 +03:00
|
|
|
if (id >= IWL_FW_INI_MAX_REGION_ID) {
|
|
|
|
IWL_ERR(trans, "WRT: Invalid region id %u\n", id);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (type <= IWL_FW_INI_REGION_INVALID ||
|
|
|
|
type >= IWL_FW_INI_REGION_NUM) {
|
|
|
|
IWL_ERR(trans, "WRT: Invalid region type %u\n", type);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2019-11-01 15:31:02 +02:00
|
|
|
if (type == IWL_FW_INI_REGION_PCI_IOSF_CONFIG &&
|
|
|
|
!trans->ops->read_config32) {
|
|
|
|
IWL_ERR(trans, "WRT: Unsupported region type %u\n", type);
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
}
|
|
|
|
|
2022-01-29 13:16:19 +02:00
|
|
|
if (type == IWL_FW_INI_REGION_INTERNAL_BUFFER) {
|
|
|
|
trans->dbg.imr_data.sram_addr =
|
|
|
|
le32_to_cpu(reg->internal_buffer.base_addr);
|
|
|
|
trans->dbg.imr_data.sram_size =
|
|
|
|
le32_to_cpu(reg->internal_buffer.size);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2019-07-23 13:41:44 +03:00
|
|
|
active_reg = &trans->dbg.active_regions[id];
|
|
|
|
if (*active_reg) {
|
|
|
|
IWL_WARN(trans, "WRT: Overriding region id %u\n", id);
|
|
|
|
|
|
|
|
kfree(*active_reg);
|
|
|
|
}
|
|
|
|
|
|
|
|
*active_reg = kmemdup(tlv, tlv_len, GFP_KERNEL);
|
|
|
|
if (!*active_reg)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
IWL_DEBUG_FW(trans, "WRT: Enabling region id %u type %u\n", id, type);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int iwl_dbg_tlv_alloc_trigger(struct iwl_trans *trans,
|
2021-01-12 14:24:49 +01:00
|
|
|
const struct iwl_ucode_tlv *tlv)
|
2019-07-23 13:41:44 +03:00
|
|
|
{
|
2021-01-12 14:24:49 +01:00
|
|
|
const struct iwl_fw_ini_trigger_tlv *trig = (const void *)tlv->data;
|
2019-07-23 13:41:44 +03:00
|
|
|
u32 tp = le32_to_cpu(trig->time_point);
|
2021-12-19 12:18:13 +02:00
|
|
|
u32 rf = le32_to_cpu(trig->reset_fw);
|
2024-01-28 08:53:51 +02:00
|
|
|
struct iwl_ucode_tlv *new_tlv;
|
2019-07-23 13:41:44 +03:00
|
|
|
|
|
|
|
if (le32_to_cpu(tlv->length) < sizeof(*trig))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (tp <= IWL_FW_INI_TIME_POINT_INVALID ||
|
|
|
|
tp >= IWL_FW_INI_TIME_POINT_NUM) {
|
|
|
|
IWL_ERR(trans,
|
|
|
|
"WRT: Invalid time point %u for trigger TLV\n",
|
|
|
|
tp);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2021-12-19 12:18:13 +02:00
|
|
|
IWL_DEBUG_FW(trans,
|
|
|
|
"WRT: time point %u for trigger TLV with reset_fw %u\n",
|
|
|
|
tp, rf);
|
|
|
|
trans->dbg.last_tp_resetfw = 0xFF;
|
2024-01-28 08:53:51 +02:00
|
|
|
|
|
|
|
new_tlv = iwl_dbg_tlv_add(tlv, &trans->dbg.time_point[tp].trig_list);
|
|
|
|
if (!new_tlv)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
2020-06-12 09:38:00 +02:00
|
|
|
if (!le32_to_cpu(trig->occurrences)) {
|
2024-01-28 08:53:51 +02:00
|
|
|
struct iwl_fw_ini_trigger_tlv *new_trig = (void *)new_tlv->data;
|
2020-06-12 09:38:00 +02:00
|
|
|
|
2024-01-28 08:53:51 +02:00
|
|
|
new_trig->occurrences = cpu_to_le32(-1);
|
|
|
|
}
|
2019-07-23 13:41:44 +03:00
|
|
|
|
2024-01-28 08:53:51 +02:00
|
|
|
return 0;
|
2019-07-23 13:41:44 +03:00
|
|
|
}
|
|
|
|
|
2021-10-17 12:40:19 +03:00
|
|
|
static int iwl_dbg_tlv_config_set(struct iwl_trans *trans,
|
|
|
|
const struct iwl_ucode_tlv *tlv)
|
|
|
|
{
|
2022-01-28 15:34:28 +02:00
|
|
|
const struct iwl_fw_ini_conf_set_tlv *conf_set = (const void *)tlv->data;
|
2021-10-17 12:40:19 +03:00
|
|
|
u32 tp = le32_to_cpu(conf_set->time_point);
|
|
|
|
u32 type = le32_to_cpu(conf_set->set_type);
|
|
|
|
|
|
|
|
if (tp <= IWL_FW_INI_TIME_POINT_INVALID ||
|
|
|
|
tp >= IWL_FW_INI_TIME_POINT_NUM) {
|
|
|
|
IWL_DEBUG_FW(trans,
|
|
|
|
"WRT: Invalid time point %u for config set TLV\n", tp);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (type <= IWL_FW_INI_CONFIG_SET_TYPE_INVALID ||
|
|
|
|
type >= IWL_FW_INI_CONFIG_SET_TYPE_MAX_NUM) {
|
|
|
|
IWL_DEBUG_FW(trans,
|
|
|
|
"WRT: Invalid config set type %u for config set TLV\n", type);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2024-01-28 08:53:51 +02:00
|
|
|
if (!iwl_dbg_tlv_add(tlv, &trans->dbg.time_point[tp].config_list))
|
|
|
|
return -ENOMEM;
|
|
|
|
return 0;
|
2021-10-17 12:40:19 +03:00
|
|
|
}
|
|
|
|
|
2019-07-23 13:41:44 +03:00
|
|
|
static int (*dbg_tlv_alloc[])(struct iwl_trans *trans,
|
2021-01-12 14:24:49 +01:00
|
|
|
const struct iwl_ucode_tlv *tlv) = {
|
2019-07-23 13:41:44 +03:00
|
|
|
[IWL_DBG_TLV_TYPE_DEBUG_INFO] = iwl_dbg_tlv_alloc_debug_info,
|
|
|
|
[IWL_DBG_TLV_TYPE_BUF_ALLOC] = iwl_dbg_tlv_alloc_buf_alloc,
|
|
|
|
[IWL_DBG_TLV_TYPE_HCMD] = iwl_dbg_tlv_alloc_hcmd,
|
|
|
|
[IWL_DBG_TLV_TYPE_REGION] = iwl_dbg_tlv_alloc_region,
|
|
|
|
[IWL_DBG_TLV_TYPE_TRIGGER] = iwl_dbg_tlv_alloc_trigger,
|
2021-10-17 12:40:19 +03:00
|
|
|
[IWL_DBG_TLV_TYPE_CONF_SET] = iwl_dbg_tlv_config_set,
|
2019-07-23 13:41:44 +03:00
|
|
|
};
|
|
|
|
|
2021-01-12 14:24:49 +01:00
|
|
|
void iwl_dbg_tlv_alloc(struct iwl_trans *trans, const struct iwl_ucode_tlv *tlv,
|
2019-06-30 10:23:26 +03:00
|
|
|
bool ext)
|
|
|
|
{
|
|
|
|
enum iwl_ini_cfg_state *cfg_state = ext ?
|
|
|
|
&trans->dbg.external_ini_cfg : &trans->dbg.internal_ini_cfg;
|
2021-12-10 11:12:41 +02:00
|
|
|
const struct iwl_fw_ini_header *hdr = (const void *)&tlv->data[0];
|
|
|
|
u32 type;
|
|
|
|
u32 tlv_idx;
|
|
|
|
u32 domain;
|
2019-07-23 13:41:44 +03:00
|
|
|
int ret;
|
2018-05-17 10:14:30 +03:00
|
|
|
|
2021-12-10 11:12:41 +02:00
|
|
|
if (le32_to_cpu(tlv->length) < sizeof(*hdr))
|
|
|
|
return;
|
|
|
|
|
|
|
|
type = le32_to_cpu(tlv->type);
|
|
|
|
tlv_idx = type - IWL_UCODE_TLV_DEBUG_BASE;
|
|
|
|
domain = le32_to_cpu(hdr->domain);
|
|
|
|
|
2019-10-30 09:40:12 +02:00
|
|
|
if (domain != IWL_FW_INI_DOMAIN_ALWAYS_ON &&
|
|
|
|
!(domain & trans->dbg.domains_bitmap)) {
|
|
|
|
IWL_DEBUG_FW(trans,
|
|
|
|
"WRT: Skipping TLV with disabled domain 0x%0x (0x%0x)\n",
|
|
|
|
domain, trans->dbg.domains_bitmap);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2019-07-23 13:41:44 +03:00
|
|
|
if (tlv_idx >= ARRAY_SIZE(dbg_tlv_alloc) || !dbg_tlv_alloc[tlv_idx]) {
|
|
|
|
IWL_ERR(trans, "WRT: Unsupported TLV type 0x%x\n", type);
|
2019-06-30 10:23:26 +03:00
|
|
|
goto out_err;
|
|
|
|
}
|
2018-05-17 10:14:30 +03:00
|
|
|
|
2019-06-30 10:23:26 +03:00
|
|
|
if (!iwl_dbg_tlv_ver_support(tlv)) {
|
|
|
|
IWL_ERR(trans, "WRT: Unsupported TLV 0x%x version %u\n", type,
|
2019-07-23 13:41:44 +03:00
|
|
|
le32_to_cpu(hdr->version));
|
|
|
|
goto out_err;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = dbg_tlv_alloc[tlv_idx](trans, tlv);
|
|
|
|
if (ret) {
|
2023-03-05 14:16:18 +02:00
|
|
|
IWL_WARN(trans,
|
|
|
|
"WRT: Failed to allocate TLV 0x%x, ret %d, (ext=%d)\n",
|
|
|
|
type, ret, ext);
|
2019-06-30 10:23:26 +03:00
|
|
|
goto out_err;
|
|
|
|
}
|
2018-05-17 10:14:30 +03:00
|
|
|
|
2019-06-30 10:23:26 +03:00
|
|
|
if (*cfg_state == IWL_INI_CFG_STATE_NOT_LOADED)
|
|
|
|
*cfg_state = IWL_INI_CFG_STATE_LOADED;
|
2018-05-17 10:14:30 +03:00
|
|
|
|
2019-06-30 10:23:26 +03:00
|
|
|
return;
|
2018-05-17 10:14:30 +03:00
|
|
|
|
2019-06-30 10:23:26 +03:00
|
|
|
out_err:
|
|
|
|
*cfg_state = IWL_INI_CFG_STATE_CORRUPTED;
|
2019-06-10 16:14:20 +03:00
|
|
|
}
|
2018-05-17 10:14:30 +03:00
|
|
|
|
2019-07-01 16:03:48 +03:00
|
|
|
void iwl_dbg_tlv_del_timers(struct iwl_trans *trans)
|
|
|
|
{
|
2019-07-23 15:10:59 +03:00
|
|
|
struct list_head *timer_list = &trans->dbg.periodic_trig_list;
|
|
|
|
struct iwl_dbg_tlv_timer_node *node, *tmp;
|
|
|
|
|
|
|
|
list_for_each_entry_safe(node, tmp, timer_list, list) {
|
2022-12-20 13:45:19 -05:00
|
|
|
timer_shutdown_sync(&node->timer);
|
2019-07-23 15:10:59 +03:00
|
|
|
list_del(&node->list);
|
|
|
|
kfree(node);
|
|
|
|
}
|
2019-07-01 16:03:48 +03:00
|
|
|
}
|
|
|
|
IWL_EXPORT_SYMBOL(iwl_dbg_tlv_del_timers);
|
|
|
|
|
2019-07-23 14:37:45 +03:00
|
|
|
static void iwl_dbg_tlv_fragments_free(struct iwl_trans *trans,
|
|
|
|
enum iwl_fw_ini_allocation_id alloc_id)
|
|
|
|
{
|
|
|
|
struct iwl_fw_mon *fw_mon;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
if (alloc_id <= IWL_FW_INI_ALLOCATION_INVALID ||
|
|
|
|
alloc_id >= IWL_FW_INI_ALLOCATION_NUM)
|
|
|
|
return;
|
|
|
|
|
|
|
|
fw_mon = &trans->dbg.fw_mon_ini[alloc_id];
|
|
|
|
|
|
|
|
for (i = 0; i < fw_mon->num_frags; i++) {
|
|
|
|
struct iwl_dram_data *frag = &fw_mon->frags[i];
|
|
|
|
|
|
|
|
dma_free_coherent(trans->dev, frag->size, frag->block,
|
|
|
|
frag->physical);
|
|
|
|
|
|
|
|
frag->physical = 0;
|
|
|
|
frag->block = NULL;
|
|
|
|
frag->size = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
kfree(fw_mon->frags);
|
|
|
|
fw_mon->frags = NULL;
|
|
|
|
fw_mon->num_frags = 0;
|
|
|
|
}
|
|
|
|
|
2019-06-13 15:01:07 +03:00
|
|
|
void iwl_dbg_tlv_free(struct iwl_trans *trans)
|
2018-05-17 10:14:30 +03:00
|
|
|
{
|
2019-07-23 13:41:44 +03:00
|
|
|
struct iwl_dbg_tlv_node *tlv_node, *tlv_node_tmp;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
iwl_dbg_tlv_del_timers(trans);
|
|
|
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE(trans->dbg.active_regions); i++) {
|
|
|
|
struct iwl_ucode_tlv **active_reg =
|
|
|
|
&trans->dbg.active_regions[i];
|
|
|
|
|
|
|
|
kfree(*active_reg);
|
|
|
|
*active_reg = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
list_for_each_entry_safe(tlv_node, tlv_node_tmp,
|
|
|
|
&trans->dbg.debug_info_tlv_list, list) {
|
|
|
|
list_del(&tlv_node->list);
|
|
|
|
kfree(tlv_node);
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE(trans->dbg.time_point); i++) {
|
|
|
|
struct iwl_dbg_tlv_time_point_data *tp =
|
|
|
|
&trans->dbg.time_point[i];
|
|
|
|
|
|
|
|
list_for_each_entry_safe(tlv_node, tlv_node_tmp, &tp->trig_list,
|
|
|
|
list) {
|
|
|
|
list_del(&tlv_node->list);
|
|
|
|
kfree(tlv_node);
|
|
|
|
}
|
|
|
|
|
|
|
|
list_for_each_entry_safe(tlv_node, tlv_node_tmp, &tp->hcmd_list,
|
|
|
|
list) {
|
|
|
|
list_del(&tlv_node->list);
|
|
|
|
kfree(tlv_node);
|
|
|
|
}
|
2019-07-23 14:26:49 +03:00
|
|
|
|
|
|
|
list_for_each_entry_safe(tlv_node, tlv_node_tmp,
|
|
|
|
&tp->active_trig_list, list) {
|
|
|
|
list_del(&tlv_node->list);
|
|
|
|
kfree(tlv_node);
|
|
|
|
}
|
2021-10-17 12:40:19 +03:00
|
|
|
|
|
|
|
list_for_each_entry_safe(tlv_node, tlv_node_tmp,
|
|
|
|
&tp->config_list, list) {
|
|
|
|
list_del(&tlv_node->list);
|
|
|
|
kfree(tlv_node);
|
|
|
|
}
|
|
|
|
|
2019-07-23 13:41:44 +03:00
|
|
|
}
|
2019-07-23 14:37:45 +03:00
|
|
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE(trans->dbg.fw_mon_ini); i++)
|
|
|
|
iwl_dbg_tlv_fragments_free(trans, i);
|
2018-06-14 10:49:03 +03:00
|
|
|
}
|
|
|
|
|
2019-06-13 15:01:07 +03:00
|
|
|
static int iwl_dbg_tlv_parse_bin(struct iwl_trans *trans, const u8 *data,
|
|
|
|
size_t len)
|
2018-06-14 10:49:03 +03:00
|
|
|
{
|
2021-01-12 14:24:49 +01:00
|
|
|
const struct iwl_ucode_tlv *tlv;
|
2018-06-14 10:49:03 +03:00
|
|
|
u32 tlv_len;
|
|
|
|
|
|
|
|
while (len >= sizeof(*tlv)) {
|
|
|
|
len -= sizeof(*tlv);
|
2022-01-28 15:34:27 +02:00
|
|
|
tlv = (const void *)data;
|
2018-06-14 10:49:03 +03:00
|
|
|
|
|
|
|
tlv_len = le32_to_cpu(tlv->length);
|
|
|
|
|
|
|
|
if (len < tlv_len) {
|
|
|
|
IWL_ERR(trans, "invalid TLV len: %zd/%u\n",
|
|
|
|
len, tlv_len);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
len -= ALIGN(tlv_len, 4);
|
|
|
|
data += sizeof(*tlv) + ALIGN(tlv_len, 4);
|
|
|
|
|
2019-06-30 10:23:26 +03:00
|
|
|
iwl_dbg_tlv_alloc(trans, tlv, true);
|
2018-05-17 10:14:30 +03:00
|
|
|
}
|
2018-06-14 10:49:03 +03:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-06-13 15:01:07 +03:00
|
|
|
void iwl_dbg_tlv_load_bin(struct device *dev, struct iwl_trans *trans)
|
2018-06-14 10:49:03 +03:00
|
|
|
{
|
|
|
|
const struct firmware *fw;
|
2021-08-02 17:09:44 +03:00
|
|
|
const char *yoyo_bin = "iwl-debug-yoyo.bin";
|
2018-06-14 10:49:03 +03:00
|
|
|
int res;
|
|
|
|
|
2021-04-11 13:25:44 +03:00
|
|
|
if (!iwlwifi_mod_params.enable_ini ||
|
2021-12-04 17:49:38 +02:00
|
|
|
trans->trans_cfg->device_family <= IWL_DEVICE_FAMILY_8000)
|
2018-06-14 10:49:03 +03:00
|
|
|
return;
|
|
|
|
|
2021-08-02 17:09:44 +03:00
|
|
|
res = firmware_request_nowarn(&fw, yoyo_bin, dev);
|
|
|
|
IWL_DEBUG_FW(trans, "%s %s\n", res ? "didn't load" : "loaded", yoyo_bin);
|
|
|
|
|
2018-06-14 10:49:03 +03:00
|
|
|
if (res)
|
|
|
|
return;
|
|
|
|
|
2023-09-13 14:56:41 +03:00
|
|
|
trans->dbg.yoyo_bin_loaded = true;
|
|
|
|
|
2019-06-13 15:01:07 +03:00
|
|
|
iwl_dbg_tlv_parse_bin(trans, fw->data, fw->size);
|
2018-06-14 10:49:03 +03:00
|
|
|
|
|
|
|
release_firmware(fw);
|
2018-05-17 10:14:30 +03:00
|
|
|
}
|
2019-06-13 16:41:35 +03:00
|
|
|
|
2019-07-23 13:41:44 +03:00
|
|
|
void iwl_dbg_tlv_init(struct iwl_trans *trans)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
INIT_LIST_HEAD(&trans->dbg.debug_info_tlv_list);
|
2019-07-23 15:10:59 +03:00
|
|
|
INIT_LIST_HEAD(&trans->dbg.periodic_trig_list);
|
2019-07-23 13:41:44 +03:00
|
|
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE(trans->dbg.time_point); i++) {
|
|
|
|
struct iwl_dbg_tlv_time_point_data *tp =
|
|
|
|
&trans->dbg.time_point[i];
|
|
|
|
|
|
|
|
INIT_LIST_HEAD(&tp->trig_list);
|
|
|
|
INIT_LIST_HEAD(&tp->hcmd_list);
|
2019-07-23 14:26:49 +03:00
|
|
|
INIT_LIST_HEAD(&tp->active_trig_list);
|
2021-10-17 12:40:19 +03:00
|
|
|
INIT_LIST_HEAD(&tp->config_list);
|
2019-07-23 14:26:49 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-07-23 14:37:45 +03:00
|
|
|
static int iwl_dbg_tlv_alloc_fragment(struct iwl_fw_runtime *fwrt,
|
|
|
|
struct iwl_dram_data *frag, u32 pages)
|
|
|
|
{
|
|
|
|
void *block = NULL;
|
|
|
|
dma_addr_t physical;
|
|
|
|
|
|
|
|
if (!frag || frag->size || !pages)
|
|
|
|
return -EIO;
|
|
|
|
|
2019-11-25 11:50:58 +02:00
|
|
|
/*
|
|
|
|
* We try to allocate as many pages as we can, starting with
|
|
|
|
* the requested amount and going down until we can allocate
|
|
|
|
* something. Because of DIV_ROUND_UP(), pages will never go
|
|
|
|
* down to 0 and stop the loop, so stop when pages reaches 1,
|
|
|
|
* which is too small anyway.
|
|
|
|
*/
|
|
|
|
while (pages > 1) {
|
2019-07-23 14:37:45 +03:00
|
|
|
block = dma_alloc_coherent(fwrt->dev, pages * PAGE_SIZE,
|
|
|
|
&physical,
|
|
|
|
GFP_KERNEL | __GFP_NOWARN);
|
|
|
|
if (block)
|
|
|
|
break;
|
|
|
|
|
|
|
|
IWL_WARN(fwrt, "WRT: Failed to allocate fragment size %lu\n",
|
|
|
|
pages * PAGE_SIZE);
|
|
|
|
|
|
|
|
pages = DIV_ROUND_UP(pages, 2);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!block)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
frag->physical = physical;
|
|
|
|
frag->block = block;
|
|
|
|
frag->size = pages * PAGE_SIZE;
|
|
|
|
|
|
|
|
return pages;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int iwl_dbg_tlv_alloc_fragments(struct iwl_fw_runtime *fwrt,
|
|
|
|
enum iwl_fw_ini_allocation_id alloc_id)
|
|
|
|
{
|
|
|
|
struct iwl_fw_mon *fw_mon;
|
|
|
|
struct iwl_fw_ini_allocation_tlv *fw_mon_cfg;
|
|
|
|
u32 num_frags, remain_pages, frag_pages;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
if (alloc_id < IWL_FW_INI_ALLOCATION_INVALID ||
|
|
|
|
alloc_id >= IWL_FW_INI_ALLOCATION_NUM)
|
|
|
|
return -EIO;
|
|
|
|
|
|
|
|
fw_mon_cfg = &fwrt->trans->dbg.fw_mon_cfg[alloc_id];
|
|
|
|
fw_mon = &fwrt->trans->dbg.fw_mon_ini[alloc_id];
|
|
|
|
|
2023-06-13 15:57:27 +03:00
|
|
|
if (fw_mon->num_frags) {
|
|
|
|
for (i = 0; i < fw_mon->num_frags; i++)
|
|
|
|
memset(fw_mon->frags[i].block, 0,
|
|
|
|
fw_mon->frags[i].size);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (fw_mon_cfg->buf_location !=
|
2019-07-23 14:37:45 +03:00
|
|
|
cpu_to_le32(IWL_FW_INI_LOCATION_DRAM_PATH))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
num_frags = le32_to_cpu(fw_mon_cfg->max_frags_num);
|
2022-01-29 13:16:18 +02:00
|
|
|
if (fwrt->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210) {
|
2019-07-23 14:37:45 +03:00
|
|
|
if (alloc_id != IWL_FW_INI_ALLOCATION_ID_DBGC1)
|
|
|
|
return -EIO;
|
|
|
|
num_frags = 1;
|
2022-11-02 16:59:49 +02:00
|
|
|
} else if (fwrt->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_BZ &&
|
|
|
|
alloc_id > IWL_FW_INI_ALLOCATION_ID_DBGC3) {
|
|
|
|
return -EIO;
|
2019-07-23 14:37:45 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
remain_pages = DIV_ROUND_UP(le32_to_cpu(fw_mon_cfg->req_size),
|
|
|
|
PAGE_SIZE);
|
|
|
|
num_frags = min_t(u32, num_frags, BUF_ALLOC_MAX_NUM_FRAGS);
|
|
|
|
num_frags = min_t(u32, num_frags, remain_pages);
|
|
|
|
frag_pages = DIV_ROUND_UP(remain_pages, num_frags);
|
|
|
|
|
|
|
|
fw_mon->frags = kcalloc(num_frags, sizeof(*fw_mon->frags), GFP_KERNEL);
|
|
|
|
if (!fw_mon->frags)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
for (i = 0; i < num_frags; i++) {
|
|
|
|
int pages = min_t(u32, frag_pages, remain_pages);
|
|
|
|
|
|
|
|
IWL_DEBUG_FW(fwrt,
|
|
|
|
"WRT: Allocating DRAM buffer (alloc_id=%u, fragment=%u, size=0x%lx)\n",
|
|
|
|
alloc_id, i, pages * PAGE_SIZE);
|
|
|
|
|
|
|
|
pages = iwl_dbg_tlv_alloc_fragment(fwrt, &fw_mon->frags[i],
|
|
|
|
pages);
|
|
|
|
if (pages < 0) {
|
|
|
|
u32 alloc_size = le32_to_cpu(fw_mon_cfg->req_size) -
|
|
|
|
(remain_pages * PAGE_SIZE);
|
|
|
|
|
|
|
|
if (alloc_size < le32_to_cpu(fw_mon_cfg->min_size)) {
|
|
|
|
iwl_dbg_tlv_fragments_free(fwrt->trans,
|
|
|
|
alloc_id);
|
|
|
|
return pages;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
remain_pages -= pages;
|
|
|
|
fw_mon->num_frags++;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int iwl_dbg_tlv_apply_buffer(struct iwl_fw_runtime *fwrt,
|
|
|
|
enum iwl_fw_ini_allocation_id alloc_id)
|
|
|
|
{
|
|
|
|
struct iwl_fw_mon *fw_mon;
|
|
|
|
u32 remain_frags, num_commands;
|
|
|
|
int i, fw_mon_idx = 0;
|
|
|
|
|
|
|
|
if (!fw_has_capa(&fwrt->fw->ucode_capa,
|
|
|
|
IWL_UCODE_TLV_CAPA_DBG_BUF_ALLOC_CMD_SUPP))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (alloc_id < IWL_FW_INI_ALLOCATION_INVALID ||
|
|
|
|
alloc_id >= IWL_FW_INI_ALLOCATION_NUM)
|
|
|
|
return -EIO;
|
|
|
|
|
|
|
|
if (le32_to_cpu(fwrt->trans->dbg.fw_mon_cfg[alloc_id].buf_location) !=
|
|
|
|
IWL_FW_INI_LOCATION_DRAM_PATH)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
fw_mon = &fwrt->trans->dbg.fw_mon_ini[alloc_id];
|
|
|
|
|
|
|
|
/* the first fragment of DBGC1 is given to the FW via register
|
|
|
|
* or context info
|
|
|
|
*/
|
|
|
|
if (alloc_id == IWL_FW_INI_ALLOCATION_ID_DBGC1)
|
|
|
|
fw_mon_idx++;
|
|
|
|
|
|
|
|
remain_frags = fw_mon->num_frags - fw_mon_idx;
|
|
|
|
if (!remain_frags)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
num_commands = DIV_ROUND_UP(remain_frags, BUF_ALLOC_MAX_NUM_FRAGS);
|
|
|
|
|
|
|
|
IWL_DEBUG_FW(fwrt, "WRT: Applying DRAM destination (alloc_id=%u)\n",
|
|
|
|
alloc_id);
|
|
|
|
|
|
|
|
for (i = 0; i < num_commands; i++) {
|
|
|
|
u32 num_frags = min_t(u32, remain_frags,
|
|
|
|
BUF_ALLOC_MAX_NUM_FRAGS);
|
|
|
|
struct iwl_buf_alloc_cmd data = {
|
|
|
|
.alloc_id = cpu_to_le32(alloc_id),
|
|
|
|
.num_frags = cpu_to_le32(num_frags),
|
|
|
|
.buf_location =
|
|
|
|
cpu_to_le32(IWL_FW_INI_LOCATION_DRAM_PATH),
|
|
|
|
};
|
|
|
|
struct iwl_host_cmd hcmd = {
|
|
|
|
.id = WIDE_ID(DEBUG_GROUP, BUFFER_ALLOCATION),
|
|
|
|
.data[0] = &data,
|
|
|
|
.len[0] = sizeof(data),
|
2021-08-19 18:40:35 +03:00
|
|
|
.flags = CMD_SEND_IN_RFKILL,
|
2019-07-23 14:37:45 +03:00
|
|
|
};
|
|
|
|
int ret, j;
|
|
|
|
|
|
|
|
for (j = 0; j < num_frags; j++) {
|
|
|
|
struct iwl_buf_alloc_frag *frag = &data.frags[j];
|
|
|
|
struct iwl_dram_data *fw_mon_frag =
|
|
|
|
&fw_mon->frags[fw_mon_idx++];
|
|
|
|
|
|
|
|
frag->addr = cpu_to_le64(fw_mon_frag->physical);
|
|
|
|
frag->size = cpu_to_le32(fw_mon_frag->size);
|
|
|
|
}
|
|
|
|
ret = iwl_trans_send_cmd(fwrt->trans, &hcmd);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
remain_frags -= num_frags;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void iwl_dbg_tlv_apply_buffers(struct iwl_fw_runtime *fwrt)
|
|
|
|
{
|
|
|
|
int ret, i;
|
|
|
|
|
2021-10-17 12:40:19 +03:00
|
|
|
if (fw_has_capa(&fwrt->fw->ucode_capa,
|
|
|
|
IWL_UCODE_TLV_CAPA_DRAM_FRAG_SUPPORT))
|
|
|
|
return;
|
|
|
|
|
2019-07-23 14:37:45 +03:00
|
|
|
for (i = 0; i < IWL_FW_INI_ALLOCATION_NUM; i++) {
|
|
|
|
ret = iwl_dbg_tlv_apply_buffer(fwrt, i);
|
|
|
|
if (ret)
|
|
|
|
IWL_WARN(fwrt,
|
|
|
|
"WRT: Failed to apply DRAM buffer for allocation id %d, ret=%d\n",
|
|
|
|
i, ret);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-10-17 12:40:19 +03:00
|
|
|
static int iwl_dbg_tlv_update_dram(struct iwl_fw_runtime *fwrt,
|
|
|
|
enum iwl_fw_ini_allocation_id alloc_id,
|
|
|
|
struct iwl_dram_info *dram_info)
|
|
|
|
{
|
|
|
|
struct iwl_fw_mon *fw_mon;
|
|
|
|
u32 remain_frags, num_frags;
|
|
|
|
int j, fw_mon_idx = 0;
|
|
|
|
struct iwl_buf_alloc_cmd *data;
|
|
|
|
|
|
|
|
if (le32_to_cpu(fwrt->trans->dbg.fw_mon_cfg[alloc_id].buf_location) !=
|
|
|
|
IWL_FW_INI_LOCATION_DRAM_PATH) {
|
2023-05-31 19:49:58 +03:00
|
|
|
IWL_DEBUG_FW(fwrt, "WRT: alloc_id %u location is not in DRAM_PATH\n",
|
|
|
|
alloc_id);
|
2021-10-17 12:40:19 +03:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
fw_mon = &fwrt->trans->dbg.fw_mon_ini[alloc_id];
|
|
|
|
|
|
|
|
/* the first fragment of DBGC1 is given to the FW via register
|
|
|
|
* or context info
|
|
|
|
*/
|
|
|
|
if (alloc_id == IWL_FW_INI_ALLOCATION_ID_DBGC1)
|
|
|
|
fw_mon_idx++;
|
|
|
|
|
|
|
|
remain_frags = fw_mon->num_frags - fw_mon_idx;
|
|
|
|
if (!remain_frags)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
num_frags = min_t(u32, remain_frags, BUF_ALLOC_MAX_NUM_FRAGS);
|
|
|
|
data = &dram_info->dram_frags[alloc_id - 1];
|
|
|
|
data->alloc_id = cpu_to_le32(alloc_id);
|
|
|
|
data->num_frags = cpu_to_le32(num_frags);
|
|
|
|
data->buf_location = cpu_to_le32(IWL_FW_INI_LOCATION_DRAM_PATH);
|
|
|
|
|
|
|
|
IWL_DEBUG_FW(fwrt, "WRT: DRAM buffer details alloc_id=%u, num_frags=%u\n",
|
|
|
|
cpu_to_le32(alloc_id), cpu_to_le32(num_frags));
|
|
|
|
|
|
|
|
for (j = 0; j < num_frags; j++) {
|
|
|
|
struct iwl_buf_alloc_frag *frag = &data->frags[j];
|
|
|
|
struct iwl_dram_data *fw_mon_frag = &fw_mon->frags[fw_mon_idx++];
|
|
|
|
|
|
|
|
frag->addr = cpu_to_le64(fw_mon_frag->physical);
|
|
|
|
frag->size = cpu_to_le32(fw_mon_frag->size);
|
|
|
|
IWL_DEBUG_FW(fwrt, "WRT: DRAM fragment details\n");
|
|
|
|
IWL_DEBUG_FW(fwrt, "frag=%u, addr=0x%016llx, size=0x%x)\n",
|
|
|
|
j, cpu_to_le64(fw_mon_frag->physical),
|
|
|
|
cpu_to_le32(fw_mon_frag->size));
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void iwl_dbg_tlv_update_drams(struct iwl_fw_runtime *fwrt)
|
|
|
|
{
|
2022-01-30 11:53:01 +02:00
|
|
|
int ret, i;
|
|
|
|
bool dram_alloc = false;
|
2021-10-17 12:40:19 +03:00
|
|
|
struct iwl_dram_data *frags =
|
|
|
|
&fwrt->trans->dbg.fw_mon_ini[IWL_FW_INI_ALLOCATION_ID_DBGC1].frags[0];
|
2022-01-30 11:53:01 +02:00
|
|
|
struct iwl_dram_info *dram_info;
|
|
|
|
|
|
|
|
if (!frags || !frags->block)
|
|
|
|
return;
|
|
|
|
|
|
|
|
dram_info = frags->block;
|
2021-10-17 12:40:19 +03:00
|
|
|
|
|
|
|
if (!fw_has_capa(&fwrt->fw->ucode_capa,
|
|
|
|
IWL_UCODE_TLV_CAPA_DRAM_FRAG_SUPPORT))
|
|
|
|
return;
|
|
|
|
|
wifi: iwlwifi: dbg-tlv: fix DRAM data init
Given the existing code in iwl_dbg_tlv_update_drams(), the
following can happen and cause firmware asserts, and even
the device to become unusable:
* We set the magic so the firmware will use the data;
* we try to fill multiple allocation IDs, with at least
one successful, but - crucially - one failing and thus
not touching the data;
* we don't clear the data since there was one success.
This doesn't seem like much of a problem just yet, however,
what happens now is that the allocation ID(s) that failed
are not initialized.
There are two additional things to know:
* we never free these allocations across FW restart or
interface down/up etc., in fact we never free them until
the driver is unbound from the device (e.g. unloaded)
* the firmware uses the DRAM info structure for real debug
data when it has used it completely
Given that, and the fact that we never initialize the data
on restart, we can be unlucky and end up with an allocation
that looks for the most part valid (valid ID, valid number
of buffers, etc.) but has bad sizes - causing the firmware
to throw an assert we can never recover from.
Fixing the code to have the entire buffers cleared (which
we should do so old debug data isn't sticking around) is a
bit more complex, so as a first step make the actual code
that fills the information more robust by clearing the
structure first, and filling the magic values only if it
actually succeeded for one, rather than doing it the other
way around.
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Signed-off-by: Gregory Greenman <gregory.greenman@intel.com>
Link: https://lore.kernel.org/r/20230613155501.87cf5528f4bc.I26ac907a4162297808b33467fc7f5d8177474a34@changeid
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
2023-06-13 15:57:24 +03:00
|
|
|
memset(dram_info, 0, sizeof(*dram_info));
|
2021-10-17 12:40:19 +03:00
|
|
|
|
|
|
|
for (i = IWL_FW_INI_ALLOCATION_ID_DBGC1;
|
2022-11-02 16:59:49 +02:00
|
|
|
i < IWL_FW_INI_ALLOCATION_NUM; i++) {
|
2023-05-31 19:49:58 +03:00
|
|
|
if (fwrt->trans->dbg.fw_mon_cfg[i].buf_location ==
|
|
|
|
IWL_FW_INI_LOCATION_INVALID)
|
|
|
|
continue;
|
|
|
|
|
2022-01-30 11:53:01 +02:00
|
|
|
ret = iwl_dbg_tlv_update_dram(fwrt, i, dram_info);
|
2021-10-17 12:40:19 +03:00
|
|
|
if (!ret)
|
2022-01-30 11:53:01 +02:00
|
|
|
dram_alloc = true;
|
2021-10-17 12:40:19 +03:00
|
|
|
else
|
2023-04-16 15:47:39 +03:00
|
|
|
IWL_INFO(fwrt,
|
2021-10-17 12:40:19 +03:00
|
|
|
"WRT: Failed to set DRAM buffer for alloc id %d, ret=%d\n",
|
|
|
|
i, ret);
|
|
|
|
}
|
2022-01-30 11:53:01 +02:00
|
|
|
|
wifi: iwlwifi: dbg-tlv: fix DRAM data init
Given the existing code in iwl_dbg_tlv_update_drams(), the
following can happen and cause firmware asserts, and even
the device to become unusable:
* We set the magic so the firmware will use the data;
* we try to fill multiple allocation IDs, with at least
one successful, but - crucially - one failing and thus
not touching the data;
* we don't clear the data since there was one success.
This doesn't seem like much of a problem just yet, however,
what happens now is that the allocation ID(s) that failed
are not initialized.
There are two additional things to know:
* we never free these allocations across FW restart or
interface down/up etc., in fact we never free them until
the driver is unbound from the device (e.g. unloaded)
* the firmware uses the DRAM info structure for real debug
data when it has used it completely
Given that, and the fact that we never initialize the data
on restart, we can be unlucky and end up with an allocation
that looks for the most part valid (valid ID, valid number
of buffers, etc.) but has bad sizes - causing the firmware
to throw an assert we can never recover from.
Fixing the code to have the entire buffers cleared (which
we should do so old debug data isn't sticking around) is a
bit more complex, so as a first step make the actual code
that fills the information more robust by clearing the
structure first, and filling the magic values only if it
actually succeeded for one, rather than doing it the other
way around.
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Signed-off-by: Gregory Greenman <gregory.greenman@intel.com>
Link: https://lore.kernel.org/r/20230613155501.87cf5528f4bc.I26ac907a4162297808b33467fc7f5d8177474a34@changeid
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
2023-06-13 15:57:24 +03:00
|
|
|
if (dram_alloc) {
|
|
|
|
dram_info->first_word = cpu_to_le32(DRAM_INFO_FIRST_MAGIC_WORD);
|
|
|
|
dram_info->second_word = cpu_to_le32(DRAM_INFO_SECOND_MAGIC_WORD);
|
|
|
|
}
|
2021-10-17 12:40:19 +03:00
|
|
|
}
|
|
|
|
|
2019-07-23 14:26:49 +03:00
|
|
|
static void iwl_dbg_tlv_send_hcmds(struct iwl_fw_runtime *fwrt,
|
|
|
|
struct list_head *hcmd_list)
|
|
|
|
{
|
|
|
|
struct iwl_dbg_tlv_node *node;
|
|
|
|
|
|
|
|
list_for_each_entry(node, hcmd_list, list) {
|
|
|
|
struct iwl_fw_ini_hcmd_tlv *hcmd = (void *)node->tlv.data;
|
|
|
|
struct iwl_fw_ini_hcmd *hcmd_data = &hcmd->hcmd;
|
|
|
|
u16 hcmd_len = le32_to_cpu(node->tlv.length) - sizeof(*hcmd);
|
|
|
|
struct iwl_host_cmd cmd = {
|
|
|
|
.id = WIDE_ID(hcmd_data->group, hcmd_data->id),
|
|
|
|
.len = { hcmd_len, },
|
|
|
|
.data = { hcmd_data->data, },
|
|
|
|
};
|
|
|
|
|
|
|
|
iwl_trans_send_cmd(fwrt->trans, &cmd);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-10-17 12:40:19 +03:00
|
|
|
static void iwl_dbg_tlv_apply_config(struct iwl_fw_runtime *fwrt,
|
2022-01-30 11:53:03 +02:00
|
|
|
struct list_head *conf_list)
|
2021-10-17 12:40:19 +03:00
|
|
|
{
|
|
|
|
struct iwl_dbg_tlv_node *node;
|
|
|
|
|
2022-01-30 11:53:03 +02:00
|
|
|
list_for_each_entry(node, conf_list, list) {
|
2021-10-17 12:40:19 +03:00
|
|
|
struct iwl_fw_ini_conf_set_tlv *config_list = (void *)node->tlv.data;
|
2021-10-24 18:20:27 +03:00
|
|
|
u32 count, address, value;
|
|
|
|
u32 len = (le32_to_cpu(node->tlv.length) - sizeof(*config_list)) / 8;
|
2021-10-17 12:40:19 +03:00
|
|
|
u32 type = le32_to_cpu(config_list->set_type);
|
2021-10-24 18:20:27 +03:00
|
|
|
u32 offset = le32_to_cpu(config_list->addr_offset);
|
2021-10-17 12:40:19 +03:00
|
|
|
|
|
|
|
switch (type) {
|
2021-10-24 18:20:27 +03:00
|
|
|
case IWL_FW_INI_CONFIG_SET_TYPE_DEVICE_PERIPHERY_MAC: {
|
|
|
|
if (!iwl_trans_grab_nic_access(fwrt->trans)) {
|
|
|
|
IWL_DEBUG_FW(fwrt, "WRT: failed to get nic access\n");
|
|
|
|
IWL_DEBUG_FW(fwrt, "WRT: skipping MAC PERIPHERY config\n");
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
IWL_DEBUG_FW(fwrt, "WRT: MAC PERIPHERY config len: len %u\n", len);
|
|
|
|
for (count = 0; count < len; count++) {
|
|
|
|
address = le32_to_cpu(config_list->addr_val[count].address);
|
|
|
|
value = le32_to_cpu(config_list->addr_val[count].value);
|
|
|
|
iwl_trans_write_prph(fwrt->trans, address + offset, value);
|
|
|
|
}
|
|
|
|
iwl_trans_release_nic_access(fwrt->trans);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case IWL_FW_INI_CONFIG_SET_TYPE_DEVICE_MEMORY: {
|
|
|
|
for (count = 0; count < len; count++) {
|
|
|
|
address = le32_to_cpu(config_list->addr_val[count].address);
|
|
|
|
value = le32_to_cpu(config_list->addr_val[count].value);
|
|
|
|
iwl_trans_write_mem32(fwrt->trans, address + offset, value);
|
|
|
|
IWL_DEBUG_FW(fwrt, "WRT: DEV_MEM: count %u, add: %u val: %u\n",
|
|
|
|
count, address, value);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case IWL_FW_INI_CONFIG_SET_TYPE_CSR: {
|
|
|
|
for (count = 0; count < len; count++) {
|
|
|
|
address = le32_to_cpu(config_list->addr_val[count].address);
|
|
|
|
value = le32_to_cpu(config_list->addr_val[count].value);
|
|
|
|
iwl_write32(fwrt->trans, address + offset, value);
|
|
|
|
IWL_DEBUG_FW(fwrt, "WRT: CSR: count %u, add: %u val: %u\n",
|
|
|
|
count, address, value);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case IWL_FW_INI_CONFIG_SET_TYPE_DBGC_DRAM_ADDR: {
|
|
|
|
struct iwl_dbgc1_info dram_info = {};
|
|
|
|
struct iwl_dram_data *frags = &fwrt->trans->dbg.fw_mon_ini[1].frags[0];
|
2022-02-04 12:25:01 +02:00
|
|
|
__le64 dram_base_addr;
|
|
|
|
__le32 dram_size;
|
|
|
|
u64 dram_addr;
|
2021-10-24 18:20:27 +03:00
|
|
|
u32 ret;
|
|
|
|
|
2022-02-04 12:25:01 +02:00
|
|
|
if (!frags)
|
|
|
|
break;
|
|
|
|
|
|
|
|
dram_base_addr = cpu_to_le64(frags->physical);
|
|
|
|
dram_size = cpu_to_le32(frags->size);
|
|
|
|
dram_addr = le64_to_cpu(dram_base_addr);
|
|
|
|
|
2021-10-24 18:20:27 +03:00
|
|
|
IWL_DEBUG_FW(fwrt, "WRT: dram_base_addr 0x%016llx, dram_size 0x%x\n",
|
|
|
|
dram_base_addr, dram_size);
|
|
|
|
IWL_DEBUG_FW(fwrt, "WRT: config_list->addr_offset: %u\n",
|
|
|
|
le32_to_cpu(config_list->addr_offset));
|
|
|
|
for (count = 0; count < len; count++) {
|
|
|
|
address = le32_to_cpu(config_list->addr_val[count].address);
|
|
|
|
dram_info.dbgc1_add_lsb =
|
|
|
|
cpu_to_le32((dram_addr & 0x00000000FFFFFFFFULL) + 0x400);
|
|
|
|
dram_info.dbgc1_add_msb =
|
|
|
|
cpu_to_le32((dram_addr & 0xFFFFFFFF00000000ULL) >> 32);
|
|
|
|
dram_info.dbgc1_size = cpu_to_le32(le32_to_cpu(dram_size) - 0x400);
|
|
|
|
ret = iwl_trans_write_mem(fwrt->trans,
|
|
|
|
address + offset, &dram_info, 4);
|
|
|
|
if (ret) {
|
|
|
|
IWL_ERR(fwrt, "Failed to write dram_info to HW_SMEM\n");
|
|
|
|
break;
|
|
|
|
}
|
2021-10-17 12:40:19 +03:00
|
|
|
}
|
2021-10-24 18:20:27 +03:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
case IWL_FW_INI_CONFIG_SET_TYPE_PERIPH_SCRATCH_HWM: {
|
|
|
|
u32 debug_token_config =
|
|
|
|
le32_to_cpu(config_list->addr_val[0].value);
|
|
|
|
|
|
|
|
IWL_DEBUG_FW(fwrt, "WRT: Setting HWM debug token config: %u\n",
|
|
|
|
debug_token_config);
|
|
|
|
fwrt->trans->dbg.ucode_preset = debug_token_config;
|
|
|
|
break;
|
|
|
|
}
|
2021-10-17 12:40:19 +03:00
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-07-23 15:10:59 +03:00
|
|
|
static void iwl_dbg_tlv_periodic_trig_handler(struct timer_list *t)
|
|
|
|
{
|
|
|
|
struct iwl_dbg_tlv_timer_node *timer_node =
|
|
|
|
from_timer(timer_node, t, timer);
|
|
|
|
struct iwl_fwrt_dump_data dump_data = {
|
|
|
|
.trig = (void *)timer_node->tlv->data,
|
|
|
|
};
|
|
|
|
int ret;
|
|
|
|
|
2021-08-02 17:09:39 +03:00
|
|
|
ret = iwl_fw_dbg_ini_collect(timer_node->fwrt, &dump_data, false);
|
2019-07-23 15:10:59 +03:00
|
|
|
if (!ret || ret == -EBUSY) {
|
|
|
|
u32 occur = le32_to_cpu(dump_data.trig->occurrences);
|
|
|
|
u32 collect_interval = le32_to_cpu(dump_data.trig->data[0]);
|
|
|
|
|
|
|
|
if (!occur)
|
|
|
|
return;
|
|
|
|
|
|
|
|
mod_timer(t, jiffies + msecs_to_jiffies(collect_interval));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void iwl_dbg_tlv_set_periodic_trigs(struct iwl_fw_runtime *fwrt)
|
|
|
|
{
|
|
|
|
struct iwl_dbg_tlv_node *node;
|
|
|
|
struct list_head *trig_list =
|
|
|
|
&fwrt->trans->dbg.time_point[IWL_FW_INI_TIME_POINT_PERIODIC].active_trig_list;
|
|
|
|
|
|
|
|
list_for_each_entry(node, trig_list, list) {
|
|
|
|
struct iwl_fw_ini_trigger_tlv *trig = (void *)node->tlv.data;
|
|
|
|
struct iwl_dbg_tlv_timer_node *timer_node;
|
|
|
|
u32 occur = le32_to_cpu(trig->occurrences), collect_interval;
|
|
|
|
u32 min_interval = 100;
|
|
|
|
|
|
|
|
if (!occur)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
/* make sure there is at least one dword of data for the
|
|
|
|
* interval value
|
|
|
|
*/
|
|
|
|
if (le32_to_cpu(node->tlv.length) <
|
|
|
|
sizeof(*trig) + sizeof(__le32)) {
|
|
|
|
IWL_ERR(fwrt,
|
|
|
|
"WRT: Invalid periodic trigger data was not given\n");
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (le32_to_cpu(trig->data[0]) < min_interval) {
|
|
|
|
IWL_WARN(fwrt,
|
|
|
|
"WRT: Override min interval from %u to %u msec\n",
|
|
|
|
le32_to_cpu(trig->data[0]), min_interval);
|
|
|
|
trig->data[0] = cpu_to_le32(min_interval);
|
|
|
|
}
|
|
|
|
|
|
|
|
collect_interval = le32_to_cpu(trig->data[0]);
|
|
|
|
|
|
|
|
timer_node = kzalloc(sizeof(*timer_node), GFP_KERNEL);
|
|
|
|
if (!timer_node) {
|
|
|
|
IWL_ERR(fwrt,
|
|
|
|
"WRT: Failed to allocate periodic trigger\n");
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
timer_node->fwrt = fwrt;
|
|
|
|
timer_node->tlv = &node->tlv;
|
|
|
|
timer_setup(&timer_node->timer,
|
|
|
|
iwl_dbg_tlv_periodic_trig_handler, 0);
|
|
|
|
|
|
|
|
list_add_tail(&timer_node->list,
|
|
|
|
&fwrt->trans->dbg.periodic_trig_list);
|
|
|
|
|
|
|
|
IWL_DEBUG_FW(fwrt, "WRT: Enabling periodic trigger\n");
|
|
|
|
|
|
|
|
mod_timer(&timer_node->timer,
|
|
|
|
jiffies + msecs_to_jiffies(collect_interval));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-01-12 14:24:49 +01:00
|
|
|
static bool is_trig_data_contained(const struct iwl_ucode_tlv *new,
|
|
|
|
const struct iwl_ucode_tlv *old)
|
2019-07-23 14:26:49 +03:00
|
|
|
{
|
2021-01-12 14:24:49 +01:00
|
|
|
const struct iwl_fw_ini_trigger_tlv *new_trig = (const void *)new->data;
|
|
|
|
const struct iwl_fw_ini_trigger_tlv *old_trig = (const void *)old->data;
|
|
|
|
const __le32 *new_data = new_trig->data, *old_data = old_trig->data;
|
2019-07-23 14:26:49 +03:00
|
|
|
u32 new_dwords_num = iwl_tlv_array_len(new, new_trig, data);
|
2020-12-09 23:16:37 +02:00
|
|
|
u32 old_dwords_num = iwl_tlv_array_len(old, old_trig, data);
|
2019-07-23 14:26:49 +03:00
|
|
|
int i, j;
|
|
|
|
|
|
|
|
for (i = 0; i < new_dwords_num; i++) {
|
|
|
|
bool match = false;
|
|
|
|
|
|
|
|
for (j = 0; j < old_dwords_num; j++) {
|
|
|
|
if (new_data[i] == old_data[j]) {
|
|
|
|
match = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (!match)
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int iwl_dbg_tlv_override_trig_node(struct iwl_fw_runtime *fwrt,
|
|
|
|
struct iwl_ucode_tlv *trig_tlv,
|
|
|
|
struct iwl_dbg_tlv_node *node)
|
|
|
|
{
|
|
|
|
struct iwl_ucode_tlv *node_tlv = &node->tlv;
|
|
|
|
struct iwl_fw_ini_trigger_tlv *node_trig = (void *)node_tlv->data;
|
|
|
|
struct iwl_fw_ini_trigger_tlv *trig = (void *)trig_tlv->data;
|
|
|
|
u32 policy = le32_to_cpu(trig->apply_policy);
|
|
|
|
u32 size = le32_to_cpu(trig_tlv->length);
|
|
|
|
u32 trig_data_len = size - sizeof(*trig);
|
|
|
|
u32 offset = 0;
|
|
|
|
|
|
|
|
if (!(policy & IWL_FW_INI_APPLY_POLICY_OVERRIDE_DATA)) {
|
|
|
|
u32 data_len = le32_to_cpu(node_tlv->length) -
|
|
|
|
sizeof(*node_trig);
|
|
|
|
|
|
|
|
IWL_DEBUG_FW(fwrt,
|
|
|
|
"WRT: Appending trigger data (time point %u)\n",
|
|
|
|
le32_to_cpu(trig->time_point));
|
|
|
|
|
|
|
|
offset += data_len;
|
|
|
|
size += data_len;
|
|
|
|
} else {
|
|
|
|
IWL_DEBUG_FW(fwrt,
|
|
|
|
"WRT: Overriding trigger data (time point %u)\n",
|
|
|
|
le32_to_cpu(trig->time_point));
|
|
|
|
}
|
|
|
|
|
|
|
|
if (size != le32_to_cpu(node_tlv->length)) {
|
|
|
|
struct list_head *prev = node->list.prev;
|
|
|
|
struct iwl_dbg_tlv_node *tmp;
|
|
|
|
|
|
|
|
list_del(&node->list);
|
|
|
|
|
|
|
|
tmp = krealloc(node, sizeof(*node) + size, GFP_KERNEL);
|
|
|
|
if (!tmp) {
|
|
|
|
IWL_WARN(fwrt,
|
|
|
|
"WRT: No memory to override trigger (time point %u)\n",
|
|
|
|
le32_to_cpu(trig->time_point));
|
|
|
|
|
|
|
|
list_add(&node->list, prev);
|
|
|
|
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
list_add(&tmp->list, prev);
|
|
|
|
node_tlv = &tmp->tlv;
|
|
|
|
node_trig = (void *)node_tlv->data;
|
|
|
|
}
|
|
|
|
|
2024-01-11 15:07:25 +02:00
|
|
|
memcpy((u8 *)node_trig->data + offset, trig->data, trig_data_len);
|
2019-07-23 14:26:49 +03:00
|
|
|
node_tlv->length = cpu_to_le32(size);
|
|
|
|
|
|
|
|
if (policy & IWL_FW_INI_APPLY_POLICY_OVERRIDE_CFG) {
|
|
|
|
IWL_DEBUG_FW(fwrt,
|
|
|
|
"WRT: Overriding trigger configuration (time point %u)\n",
|
|
|
|
le32_to_cpu(trig->time_point));
|
|
|
|
|
|
|
|
/* the first 11 dwords are configuration related */
|
|
|
|
memcpy(node_trig, trig, sizeof(__le32) * 11);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (policy & IWL_FW_INI_APPLY_POLICY_OVERRIDE_REGIONS) {
|
|
|
|
IWL_DEBUG_FW(fwrt,
|
|
|
|
"WRT: Overriding trigger regions (time point %u)\n",
|
|
|
|
le32_to_cpu(trig->time_point));
|
|
|
|
|
|
|
|
node_trig->regions_mask = trig->regions_mask;
|
|
|
|
} else {
|
|
|
|
IWL_DEBUG_FW(fwrt,
|
|
|
|
"WRT: Appending trigger regions (time point %u)\n",
|
|
|
|
le32_to_cpu(trig->time_point));
|
|
|
|
|
|
|
|
node_trig->regions_mask |= trig->regions_mask;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
iwl_dbg_tlv_add_active_trigger(struct iwl_fw_runtime *fwrt,
|
|
|
|
struct list_head *trig_list,
|
|
|
|
struct iwl_ucode_tlv *trig_tlv)
|
|
|
|
{
|
|
|
|
struct iwl_fw_ini_trigger_tlv *trig = (void *)trig_tlv->data;
|
|
|
|
struct iwl_dbg_tlv_node *node, *match = NULL;
|
|
|
|
u32 policy = le32_to_cpu(trig->apply_policy);
|
|
|
|
|
|
|
|
list_for_each_entry(node, trig_list, list) {
|
|
|
|
if (!(policy & IWL_FW_INI_APPLY_POLICY_MATCH_TIME_POINT))
|
|
|
|
break;
|
|
|
|
|
|
|
|
if (!(policy & IWL_FW_INI_APPLY_POLICY_MATCH_DATA) ||
|
|
|
|
is_trig_data_contained(trig_tlv, &node->tlv)) {
|
|
|
|
match = node;
|
|
|
|
break;
|
|
|
|
}
|
2019-07-23 13:41:44 +03:00
|
|
|
}
|
2019-07-23 14:26:49 +03:00
|
|
|
|
|
|
|
if (!match) {
|
|
|
|
IWL_DEBUG_FW(fwrt, "WRT: Enabling trigger (time point %u)\n",
|
|
|
|
le32_to_cpu(trig->time_point));
|
2024-01-28 08:53:51 +02:00
|
|
|
if (!iwl_dbg_tlv_add(trig_tlv, trig_list))
|
|
|
|
return -ENOMEM;
|
|
|
|
return 0;
|
2019-07-23 14:26:49 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
return iwl_dbg_tlv_override_trig_node(fwrt, trig_tlv, match);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
iwl_dbg_tlv_gen_active_trig_list(struct iwl_fw_runtime *fwrt,
|
|
|
|
struct iwl_dbg_tlv_time_point_data *tp)
|
|
|
|
{
|
2019-10-28 12:39:10 +02:00
|
|
|
struct iwl_dbg_tlv_node *node;
|
2019-07-23 14:26:49 +03:00
|
|
|
struct list_head *trig_list = &tp->trig_list;
|
|
|
|
struct list_head *active_trig_list = &tp->active_trig_list;
|
|
|
|
|
|
|
|
list_for_each_entry(node, trig_list, list) {
|
|
|
|
struct iwl_ucode_tlv *tlv = &node->tlv;
|
|
|
|
|
|
|
|
iwl_dbg_tlv_add_active_trigger(fwrt, active_trig_list, tlv);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-07-23 15:22:25 +03:00
|
|
|
static bool iwl_dbg_tlv_check_fw_pkt(struct iwl_fw_runtime *fwrt,
|
|
|
|
struct iwl_fwrt_dump_data *dump_data,
|
|
|
|
union iwl_dbg_tlv_tp_data *tp_data,
|
|
|
|
u32 trig_data)
|
|
|
|
{
|
|
|
|
struct iwl_rx_packet *pkt = tp_data->fw_pkt;
|
|
|
|
struct iwl_cmd_header *wanted_hdr = (void *)&trig_data;
|
|
|
|
|
2020-09-30 16:31:19 +03:00
|
|
|
if (pkt && (pkt->hdr.cmd == wanted_hdr->cmd &&
|
|
|
|
pkt->hdr.group_id == wanted_hdr->group_id)) {
|
2019-07-23 15:22:25 +03:00
|
|
|
struct iwl_rx_packet *fw_pkt =
|
|
|
|
kmemdup(pkt,
|
|
|
|
sizeof(*pkt) + iwl_rx_packet_payload_len(pkt),
|
|
|
|
GFP_ATOMIC);
|
|
|
|
|
|
|
|
if (!fw_pkt)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
dump_data->fw_pkt = fw_pkt;
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2019-07-23 14:26:49 +03:00
|
|
|
static int
|
2021-08-02 17:09:39 +03:00
|
|
|
iwl_dbg_tlv_tp_trigger(struct iwl_fw_runtime *fwrt, bool sync,
|
2019-07-23 14:26:49 +03:00
|
|
|
struct list_head *active_trig_list,
|
|
|
|
union iwl_dbg_tlv_tp_data *tp_data,
|
|
|
|
bool (*data_check)(struct iwl_fw_runtime *fwrt,
|
|
|
|
struct iwl_fwrt_dump_data *dump_data,
|
|
|
|
union iwl_dbg_tlv_tp_data *tp_data,
|
|
|
|
u32 trig_data))
|
|
|
|
{
|
|
|
|
struct iwl_dbg_tlv_node *node;
|
|
|
|
|
|
|
|
list_for_each_entry(node, active_trig_list, list) {
|
|
|
|
struct iwl_fwrt_dump_data dump_data = {
|
|
|
|
.trig = (void *)node->tlv.data,
|
|
|
|
};
|
|
|
|
u32 num_data = iwl_tlv_array_len(&node->tlv, dump_data.trig,
|
|
|
|
data);
|
|
|
|
int ret, i;
|
2021-12-19 12:18:13 +02:00
|
|
|
u32 tp = le32_to_cpu(dump_data.trig->time_point);
|
|
|
|
|
2019-07-23 14:26:49 +03:00
|
|
|
|
|
|
|
if (!num_data) {
|
2021-08-02 17:09:39 +03:00
|
|
|
ret = iwl_fw_dbg_ini_collect(fwrt, &dump_data, sync);
|
2019-07-23 14:26:49 +03:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < num_data; i++) {
|
|
|
|
if (!data_check ||
|
|
|
|
data_check(fwrt, &dump_data, tp_data,
|
|
|
|
le32_to_cpu(dump_data.trig->data[i]))) {
|
2021-08-02 17:09:39 +03:00
|
|
|
ret = iwl_fw_dbg_ini_collect(fwrt, &dump_data, sync);
|
2019-07-23 14:26:49 +03:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-02-08 18:58:40 +02:00
|
|
|
fwrt->trans->dbg.restart_required = false;
|
2023-03-05 14:16:32 +02:00
|
|
|
IWL_DEBUG_FW(fwrt, "WRT: tp %d, reset_fw %d\n",
|
|
|
|
tp, dump_data.trig->reset_fw);
|
|
|
|
IWL_DEBUG_FW(fwrt,
|
|
|
|
"WRT: restart_required %d, last_tp_resetfw %d\n",
|
|
|
|
fwrt->trans->dbg.restart_required,
|
|
|
|
fwrt->trans->dbg.last_tp_resetfw);
|
2021-12-19 12:18:13 +02:00
|
|
|
|
|
|
|
if (fwrt->trans->trans_cfg->device_family ==
|
|
|
|
IWL_DEVICE_FAMILY_9000) {
|
2024-02-08 18:58:40 +02:00
|
|
|
fwrt->trans->dbg.restart_required = true;
|
2021-12-19 12:18:13 +02:00
|
|
|
} else if (tp == IWL_FW_INI_TIME_POINT_FW_ASSERT &&
|
|
|
|
fwrt->trans->dbg.last_tp_resetfw ==
|
|
|
|
IWL_FW_INI_RESET_FW_MODE_STOP_FW_ONLY) {
|
2024-02-08 18:58:40 +02:00
|
|
|
fwrt->trans->dbg.restart_required = false;
|
2021-12-19 12:18:13 +02:00
|
|
|
fwrt->trans->dbg.last_tp_resetfw = 0xFF;
|
|
|
|
IWL_DEBUG_FW(fwrt, "WRT: FW_ASSERT due to reset_fw_mode-no restart\n");
|
|
|
|
} else if (le32_to_cpu(dump_data.trig->reset_fw) ==
|
|
|
|
IWL_FW_INI_RESET_FW_MODE_STOP_AND_RELOAD_FW) {
|
2023-03-05 14:16:32 +02:00
|
|
|
IWL_DEBUG_FW(fwrt, "WRT: stop and reload firmware\n");
|
2024-02-08 18:58:40 +02:00
|
|
|
fwrt->trans->dbg.restart_required = true;
|
2021-12-19 12:18:13 +02:00
|
|
|
} else if (le32_to_cpu(dump_data.trig->reset_fw) ==
|
|
|
|
IWL_FW_INI_RESET_FW_MODE_STOP_FW_ONLY) {
|
2023-03-05 14:16:32 +02:00
|
|
|
IWL_DEBUG_FW(fwrt,
|
|
|
|
"WRT: stop only and no reload firmware\n");
|
2024-02-08 18:58:40 +02:00
|
|
|
fwrt->trans->dbg.restart_required = false;
|
2021-12-19 12:18:13 +02:00
|
|
|
fwrt->trans->dbg.last_tp_resetfw =
|
|
|
|
le32_to_cpu(dump_data.trig->reset_fw);
|
|
|
|
} else if (le32_to_cpu(dump_data.trig->reset_fw) ==
|
|
|
|
IWL_FW_INI_RESET_FW_MODE_NOTHING) {
|
2023-03-05 14:16:32 +02:00
|
|
|
IWL_DEBUG_FW(fwrt,
|
|
|
|
"WRT: nothing need to be done after debug collection\n");
|
2021-12-19 12:18:13 +02:00
|
|
|
} else {
|
|
|
|
IWL_ERR(fwrt, "WRT: wrong resetfw %d\n",
|
|
|
|
le32_to_cpu(dump_data.trig->reset_fw));
|
|
|
|
}
|
|
|
|
}
|
2019-07-23 14:26:49 +03:00
|
|
|
return 0;
|
2019-07-23 13:41:44 +03:00
|
|
|
}
|
|
|
|
|
2023-12-07 04:50:10 +02:00
|
|
|
void iwl_dbg_tlv_init_cfg(struct iwl_fw_runtime *fwrt)
|
2019-07-23 14:37:45 +03:00
|
|
|
{
|
|
|
|
enum iwl_fw_ini_buffer_location *ini_dest = &fwrt->trans->dbg.ini_dest;
|
|
|
|
int ret, i;
|
2021-02-10 14:29:16 +02:00
|
|
|
u32 failed_alloc = 0;
|
2019-07-23 14:37:45 +03:00
|
|
|
|
2023-06-13 15:57:27 +03:00
|
|
|
if (*ini_dest == IWL_FW_INI_LOCATION_INVALID) {
|
|
|
|
IWL_DEBUG_FW(fwrt,
|
|
|
|
"WRT: Generating active triggers list, domain 0x%x\n",
|
|
|
|
fwrt->trans->dbg.domains_bitmap);
|
2019-10-28 12:57:00 +02:00
|
|
|
|
2023-06-13 15:57:27 +03:00
|
|
|
for (i = 0; i < ARRAY_SIZE(fwrt->trans->dbg.time_point); i++) {
|
|
|
|
struct iwl_dbg_tlv_time_point_data *tp =
|
|
|
|
&fwrt->trans->dbg.time_point[i];
|
2019-10-28 12:57:00 +02:00
|
|
|
|
2023-06-13 15:57:27 +03:00
|
|
|
iwl_dbg_tlv_gen_active_trig_list(fwrt, tp);
|
|
|
|
}
|
|
|
|
} else if (*ini_dest != IWL_FW_INI_LOCATION_DRAM_PATH) {
|
|
|
|
/* For DRAM, go through the loop below to clear all the buffers
|
|
|
|
* properly on restart, otherwise garbage may be left there and
|
|
|
|
* leak into new debug dumps.
|
|
|
|
*/
|
|
|
|
return;
|
2019-10-28 12:57:00 +02:00
|
|
|
}
|
2019-07-23 14:37:45 +03:00
|
|
|
|
|
|
|
*ini_dest = IWL_FW_INI_LOCATION_INVALID;
|
|
|
|
for (i = 0; i < IWL_FW_INI_ALLOCATION_NUM; i++) {
|
|
|
|
struct iwl_fw_ini_allocation_tlv *fw_mon_cfg =
|
|
|
|
&fwrt->trans->dbg.fw_mon_cfg[i];
|
|
|
|
u32 dest = le32_to_cpu(fw_mon_cfg->buf_location);
|
|
|
|
|
2021-10-17 16:59:48 +03:00
|
|
|
if (dest == IWL_FW_INI_LOCATION_INVALID) {
|
|
|
|
failed_alloc |= BIT(i);
|
2019-07-23 14:37:45 +03:00
|
|
|
continue;
|
2021-10-17 16:59:48 +03:00
|
|
|
}
|
2019-07-23 14:37:45 +03:00
|
|
|
|
|
|
|
if (*ini_dest == IWL_FW_INI_LOCATION_INVALID)
|
|
|
|
*ini_dest = dest;
|
|
|
|
|
|
|
|
if (dest != *ini_dest)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
ret = iwl_dbg_tlv_alloc_fragments(fwrt, i);
|
2021-02-10 14:29:16 +02:00
|
|
|
|
|
|
|
if (ret) {
|
2019-07-23 14:37:45 +03:00
|
|
|
IWL_WARN(fwrt,
|
|
|
|
"WRT: Failed to allocate DRAM buffer for allocation id %d, ret=%d\n",
|
|
|
|
i, ret);
|
2021-02-10 14:29:16 +02:00
|
|
|
failed_alloc |= BIT(i);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!failed_alloc)
|
|
|
|
return;
|
|
|
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE(fwrt->trans->dbg.active_regions) && failed_alloc; i++) {
|
|
|
|
struct iwl_fw_ini_region_tlv *reg;
|
|
|
|
struct iwl_ucode_tlv **active_reg =
|
|
|
|
&fwrt->trans->dbg.active_regions[i];
|
|
|
|
u32 reg_type;
|
|
|
|
|
2021-10-17 16:59:53 +03:00
|
|
|
if (!*active_reg) {
|
|
|
|
fwrt->trans->dbg.unsupported_region_msk |= BIT(i);
|
2021-02-10 14:29:16 +02:00
|
|
|
continue;
|
2021-10-17 16:59:53 +03:00
|
|
|
}
|
2021-02-10 14:29:16 +02:00
|
|
|
|
|
|
|
reg = (void *)(*active_reg)->data;
|
2021-12-04 13:10:52 +02:00
|
|
|
reg_type = reg->type;
|
2021-02-10 14:29:16 +02:00
|
|
|
|
|
|
|
if (reg_type != IWL_FW_INI_REGION_DRAM_BUFFER ||
|
|
|
|
!(BIT(le32_to_cpu(reg->dram_alloc_id)) & failed_alloc))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
IWL_DEBUG_FW(fwrt,
|
|
|
|
"WRT: removing allocation id %d from region id %d\n",
|
|
|
|
le32_to_cpu(reg->dram_alloc_id), i);
|
|
|
|
|
2022-11-02 16:59:52 +02:00
|
|
|
failed_alloc &= ~BIT(le32_to_cpu(reg->dram_alloc_id));
|
2021-02-10 14:29:16 +02:00
|
|
|
fwrt->trans->dbg.unsupported_region_msk |= BIT(i);
|
|
|
|
|
|
|
|
kfree(*active_reg);
|
|
|
|
*active_reg = NULL;
|
2019-07-23 14:37:45 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-08-02 17:09:39 +03:00
|
|
|
void _iwl_dbg_tlv_time_point(struct iwl_fw_runtime *fwrt,
|
|
|
|
enum iwl_fw_ini_time_point tp_id,
|
|
|
|
union iwl_dbg_tlv_tp_data *tp_data,
|
|
|
|
bool sync)
|
2019-06-13 16:41:35 +03:00
|
|
|
{
|
2021-10-17 12:40:19 +03:00
|
|
|
struct list_head *hcmd_list, *trig_list, *conf_list;
|
2019-07-23 14:26:49 +03:00
|
|
|
|
|
|
|
if (!iwl_trans_dbg_ini_valid(fwrt->trans) ||
|
|
|
|
tp_id == IWL_FW_INI_TIME_POINT_INVALID ||
|
|
|
|
tp_id >= IWL_FW_INI_TIME_POINT_NUM)
|
|
|
|
return;
|
|
|
|
|
|
|
|
hcmd_list = &fwrt->trans->dbg.time_point[tp_id].hcmd_list;
|
|
|
|
trig_list = &fwrt->trans->dbg.time_point[tp_id].active_trig_list;
|
2021-10-17 12:40:19 +03:00
|
|
|
conf_list = &fwrt->trans->dbg.time_point[tp_id].config_list;
|
2019-07-23 14:26:49 +03:00
|
|
|
|
|
|
|
switch (tp_id) {
|
|
|
|
case IWL_FW_INI_TIME_POINT_EARLY:
|
2019-07-23 14:37:45 +03:00
|
|
|
iwl_dbg_tlv_init_cfg(fwrt);
|
2021-10-17 12:40:19 +03:00
|
|
|
iwl_dbg_tlv_apply_config(fwrt, conf_list);
|
|
|
|
iwl_dbg_tlv_update_drams(fwrt);
|
2021-08-02 17:09:39 +03:00
|
|
|
iwl_dbg_tlv_tp_trigger(fwrt, sync, trig_list, tp_data, NULL);
|
2019-07-23 14:37:45 +03:00
|
|
|
break;
|
|
|
|
case IWL_FW_INI_TIME_POINT_AFTER_ALIVE:
|
|
|
|
iwl_dbg_tlv_apply_buffers(fwrt);
|
|
|
|
iwl_dbg_tlv_send_hcmds(fwrt, hcmd_list);
|
2021-10-24 18:20:27 +03:00
|
|
|
iwl_dbg_tlv_apply_config(fwrt, conf_list);
|
2021-08-02 17:09:39 +03:00
|
|
|
iwl_dbg_tlv_tp_trigger(fwrt, sync, trig_list, tp_data, NULL);
|
2019-07-23 14:26:49 +03:00
|
|
|
break;
|
2019-07-23 15:10:59 +03:00
|
|
|
case IWL_FW_INI_TIME_POINT_PERIODIC:
|
|
|
|
iwl_dbg_tlv_set_periodic_trigs(fwrt);
|
|
|
|
iwl_dbg_tlv_send_hcmds(fwrt, hcmd_list);
|
|
|
|
break;
|
2019-07-23 15:22:25 +03:00
|
|
|
case IWL_FW_INI_TIME_POINT_FW_RSP_OR_NOTIF:
|
2019-07-25 13:25:07 +03:00
|
|
|
case IWL_FW_INI_TIME_POINT_MISSED_BEACONS:
|
2020-09-30 19:19:53 +03:00
|
|
|
case IWL_FW_INI_TIME_POINT_FW_DHC_NOTIFICATION:
|
2019-07-23 15:22:25 +03:00
|
|
|
iwl_dbg_tlv_send_hcmds(fwrt, hcmd_list);
|
2021-10-24 18:20:27 +03:00
|
|
|
iwl_dbg_tlv_apply_config(fwrt, conf_list);
|
2021-08-02 17:09:39 +03:00
|
|
|
iwl_dbg_tlv_tp_trigger(fwrt, sync, trig_list, tp_data,
|
2019-07-23 15:22:25 +03:00
|
|
|
iwl_dbg_tlv_check_fw_pkt);
|
|
|
|
break;
|
2019-07-23 14:26:49 +03:00
|
|
|
default:
|
|
|
|
iwl_dbg_tlv_send_hcmds(fwrt, hcmd_list);
|
2021-10-24 18:20:27 +03:00
|
|
|
iwl_dbg_tlv_apply_config(fwrt, conf_list);
|
2021-08-02 17:09:39 +03:00
|
|
|
iwl_dbg_tlv_tp_trigger(fwrt, sync, trig_list, tp_data, NULL);
|
2019-07-23 14:26:49 +03:00
|
|
|
break;
|
|
|
|
}
|
2019-06-13 16:41:35 +03:00
|
|
|
}
|
2021-08-02 17:09:39 +03:00
|
|
|
IWL_EXPORT_SYMBOL(_iwl_dbg_tlv_time_point);
|