mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-09-18 22:14:16 +00:00
net: ena: Add support to changing tx_push_buf_len
The ENA driver allows for two distinct values for the number of bytes of the packet's payload that can be written directly to the device. For a value of 224 the driver turns on Large LLQ Header mode in which the first 224 of the packet's payload are written to the LLQ. Reviewed-by: Michal Kubiak <michal.kubiak@intel.com> Signed-off-by: Shay Agroskin <shayagr@amazon.com> Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
parent
a416cb254d
commit
b0c59e5396
4 changed files with 82 additions and 12 deletions
|
@ -10,6 +10,10 @@
|
|||
|
||||
/* head update threshold in units of (queue size / ENA_COMP_HEAD_THRESH) */
|
||||
#define ENA_COMP_HEAD_THRESH 4
|
||||
/* we allow 2 DMA descriptors per LLQ entry */
|
||||
#define ENA_LLQ_ENTRY_DESC_CHUNK_SIZE (2 * sizeof(struct ena_eth_io_tx_desc))
|
||||
#define ENA_LLQ_HEADER (128UL - ENA_LLQ_ENTRY_DESC_CHUNK_SIZE)
|
||||
#define ENA_LLQ_LARGE_HEADER (256UL - ENA_LLQ_ENTRY_DESC_CHUNK_SIZE)
|
||||
|
||||
struct ena_com_tx_ctx {
|
||||
struct ena_com_tx_meta ena_meta;
|
||||
|
|
|
@ -476,6 +476,19 @@ static void ena_get_ringparam(struct net_device *netdev,
|
|||
|
||||
ring->tx_max_pending = adapter->max_tx_ring_size;
|
||||
ring->rx_max_pending = adapter->max_rx_ring_size;
|
||||
if (adapter->ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
|
||||
bool large_llq_supported = adapter->large_llq_header_supported;
|
||||
|
||||
kernel_ring->tx_push_buf_len = adapter->ena_dev->tx_max_header_size;
|
||||
if (large_llq_supported)
|
||||
kernel_ring->tx_push_buf_max_len = ENA_LLQ_LARGE_HEADER;
|
||||
else
|
||||
kernel_ring->tx_push_buf_max_len = ENA_LLQ_HEADER;
|
||||
} else {
|
||||
kernel_ring->tx_push_buf_max_len = 0;
|
||||
kernel_ring->tx_push_buf_len = 0;
|
||||
}
|
||||
|
||||
ring->tx_pending = adapter->tx_ring[0].ring_size;
|
||||
ring->rx_pending = adapter->rx_ring[0].ring_size;
|
||||
}
|
||||
|
@ -486,7 +499,8 @@ static int ena_set_ringparam(struct net_device *netdev,
|
|||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
struct ena_adapter *adapter = netdev_priv(netdev);
|
||||
u32 new_tx_size, new_rx_size;
|
||||
u32 new_tx_size, new_rx_size, new_tx_push_buf_len;
|
||||
bool changed = false;
|
||||
|
||||
new_tx_size = ring->tx_pending < ENA_MIN_RING_SIZE ?
|
||||
ENA_MIN_RING_SIZE : ring->tx_pending;
|
||||
|
@ -496,11 +510,45 @@ static int ena_set_ringparam(struct net_device *netdev,
|
|||
ENA_MIN_RING_SIZE : ring->rx_pending;
|
||||
new_rx_size = rounddown_pow_of_two(new_rx_size);
|
||||
|
||||
if (new_tx_size == adapter->requested_tx_ring_size &&
|
||||
new_rx_size == adapter->requested_rx_ring_size)
|
||||
changed |= new_tx_size != adapter->requested_tx_ring_size ||
|
||||
new_rx_size != adapter->requested_rx_ring_size;
|
||||
|
||||
/* This value is ignored if LLQ is not supported */
|
||||
new_tx_push_buf_len = adapter->ena_dev->tx_max_header_size;
|
||||
|
||||
/* Validate that the push buffer is supported on the underlying device */
|
||||
if (kernel_ring->tx_push_buf_len) {
|
||||
enum ena_admin_placement_policy_type placement;
|
||||
|
||||
new_tx_push_buf_len = kernel_ring->tx_push_buf_len;
|
||||
|
||||
placement = adapter->ena_dev->tx_mem_queue_type;
|
||||
if (placement == ENA_ADMIN_PLACEMENT_POLICY_HOST)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (new_tx_push_buf_len != ENA_LLQ_HEADER &&
|
||||
new_tx_push_buf_len != ENA_LLQ_LARGE_HEADER) {
|
||||
bool large_llq_sup = adapter->large_llq_header_supported;
|
||||
char large_llq_size_str[40];
|
||||
|
||||
snprintf(large_llq_size_str, 40, ", %lu", ENA_LLQ_LARGE_HEADER);
|
||||
|
||||
NL_SET_ERR_MSG_FMT_MOD(extack,
|
||||
"Supported tx push buff values: [%lu%s]",
|
||||
ENA_LLQ_HEADER,
|
||||
large_llq_sup ? large_llq_size_str : "");
|
||||
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
changed |= new_tx_push_buf_len != adapter->ena_dev->tx_max_header_size;
|
||||
}
|
||||
|
||||
if (!changed)
|
||||
return 0;
|
||||
|
||||
return ena_update_queue_sizes(adapter, new_tx_size, new_rx_size);
|
||||
return ena_update_queue_params(adapter, new_tx_size, new_rx_size,
|
||||
new_tx_push_buf_len);
|
||||
}
|
||||
|
||||
static u32 ena_flow_hash_to_flow_type(u16 hash_fields)
|
||||
|
@ -909,6 +957,7 @@ static int ena_set_tunable(struct net_device *netdev,
|
|||
static const struct ethtool_ops ena_ethtool_ops = {
|
||||
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
|
||||
ETHTOOL_COALESCE_USE_ADAPTIVE_RX,
|
||||
.supported_ring_params = ETHTOOL_RING_USE_TX_PUSH_BUF_LEN,
|
||||
.get_link_ksettings = ena_get_link_ksettings,
|
||||
.get_drvinfo = ena_get_drvinfo,
|
||||
.get_msglevel = ena_get_msglevel,
|
||||
|
|
|
@ -2809,11 +2809,13 @@ static int ena_close(struct net_device *netdev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
int ena_update_queue_sizes(struct ena_adapter *adapter,
|
||||
u32 new_tx_size,
|
||||
u32 new_rx_size)
|
||||
int ena_update_queue_params(struct ena_adapter *adapter,
|
||||
u32 new_tx_size,
|
||||
u32 new_rx_size,
|
||||
u32 new_llq_header_len)
|
||||
{
|
||||
bool dev_was_up;
|
||||
bool dev_was_up, large_llq_changed = false;
|
||||
int rc = 0;
|
||||
|
||||
dev_was_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags);
|
||||
ena_close(adapter->netdev);
|
||||
|
@ -2823,7 +2825,21 @@ int ena_update_queue_sizes(struct ena_adapter *adapter,
|
|||
0,
|
||||
adapter->xdp_num_queues +
|
||||
adapter->num_io_queues);
|
||||
return dev_was_up ? ena_up(adapter) : 0;
|
||||
|
||||
large_llq_changed = adapter->ena_dev->tx_mem_queue_type ==
|
||||
ENA_ADMIN_PLACEMENT_POLICY_DEV;
|
||||
large_llq_changed &=
|
||||
new_llq_header_len != adapter->ena_dev->tx_max_header_size;
|
||||
|
||||
/* a check that the configuration is valid is done by caller */
|
||||
if (large_llq_changed) {
|
||||
adapter->large_llq_header_enabled = !adapter->large_llq_header_enabled;
|
||||
|
||||
ena_destroy_device(adapter, false);
|
||||
rc = ena_restore_device(adapter);
|
||||
}
|
||||
|
||||
return dev_was_up && !rc ? ena_up(adapter) : rc;
|
||||
}
|
||||
|
||||
int ena_set_rx_copybreak(struct ena_adapter *adapter, u32 rx_copybreak)
|
||||
|
|
|
@ -396,9 +396,10 @@ void ena_dump_stats_to_buf(struct ena_adapter *adapter, u8 *buf);
|
|||
|
||||
int ena_update_hw_stats(struct ena_adapter *adapter);
|
||||
|
||||
int ena_update_queue_sizes(struct ena_adapter *adapter,
|
||||
u32 new_tx_size,
|
||||
u32 new_rx_size);
|
||||
int ena_update_queue_params(struct ena_adapter *adapter,
|
||||
u32 new_tx_size,
|
||||
u32 new_rx_size,
|
||||
u32 new_llq_header_len);
|
||||
|
||||
int ena_update_queue_count(struct ena_adapter *adapter, u32 new_channel_count);
|
||||
|
||||
|
|
Loading…
Add table
Reference in a new issue