2019-10-24 01:11:17 -07:00
|
|
|
// SPDX-License-Identifier: GPL-2.0
|
|
|
|
/* Copyright (c) 2019, Intel Corporation. */
|
|
|
|
|
2020-05-20 21:20:57 +02:00
|
|
|
#include <net/xdp_sock_drv.h>
|
2019-10-24 01:11:17 -07:00
|
|
|
#include "ice_base.h"
|
2020-05-15 17:54:59 -07:00
|
|
|
#include "ice_lib.h"
|
2019-10-24 01:11:17 -07:00
|
|
|
#include "ice_dcb_lib.h"
|
2022-02-22 16:26:49 -08:00
|
|
|
#include "ice_sriov.h"
|
2019-10-24 01:11:17 -07:00
|
|
|
|
|
|
|
/**
|
|
|
|
* __ice_vsi_get_qs_contig - Assign a contiguous chunk of queues to VSI
|
|
|
|
* @qs_cfg: gathered variables needed for PF->VSI queues assignment
|
|
|
|
*
|
|
|
|
* Return 0 on success and -ENOMEM in case of no left space in PF queue bitmap
|
|
|
|
*/
|
|
|
|
static int __ice_vsi_get_qs_contig(struct ice_qs_cfg *qs_cfg)
|
|
|
|
{
|
2020-05-15 17:36:38 -07:00
|
|
|
unsigned int offset, i;
|
2019-10-24 01:11:17 -07:00
|
|
|
|
|
|
|
mutex_lock(qs_cfg->qs_mutex);
|
|
|
|
offset = bitmap_find_next_zero_area(qs_cfg->pf_map, qs_cfg->pf_map_size,
|
|
|
|
0, qs_cfg->q_count, 0);
|
|
|
|
if (offset >= qs_cfg->pf_map_size) {
|
|
|
|
mutex_unlock(qs_cfg->qs_mutex);
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
bitmap_set(qs_cfg->pf_map, offset, qs_cfg->q_count);
|
|
|
|
for (i = 0; i < qs_cfg->q_count; i++)
|
2020-05-07 17:41:05 -07:00
|
|
|
qs_cfg->vsi_map[i + qs_cfg->vsi_map_offset] = (u16)(i + offset);
|
2019-10-24 01:11:17 -07:00
|
|
|
mutex_unlock(qs_cfg->qs_mutex);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* __ice_vsi_get_qs_sc - Assign a scattered queues from PF to VSI
|
|
|
|
* @qs_cfg: gathered variables needed for pf->vsi queues assignment
|
|
|
|
*
|
|
|
|
* Return 0 on success and -ENOMEM in case of no left space in PF queue bitmap
|
|
|
|
*/
|
|
|
|
static int __ice_vsi_get_qs_sc(struct ice_qs_cfg *qs_cfg)
|
|
|
|
{
|
2020-05-15 17:36:38 -07:00
|
|
|
unsigned int i, index = 0;
|
2019-10-24 01:11:17 -07:00
|
|
|
|
|
|
|
mutex_lock(qs_cfg->qs_mutex);
|
|
|
|
for (i = 0; i < qs_cfg->q_count; i++) {
|
|
|
|
index = find_next_zero_bit(qs_cfg->pf_map,
|
|
|
|
qs_cfg->pf_map_size, index);
|
|
|
|
if (index >= qs_cfg->pf_map_size)
|
|
|
|
goto err_scatter;
|
|
|
|
set_bit(index, qs_cfg->pf_map);
|
2020-05-07 17:41:05 -07:00
|
|
|
qs_cfg->vsi_map[i + qs_cfg->vsi_map_offset] = (u16)index;
|
2019-10-24 01:11:17 -07:00
|
|
|
}
|
|
|
|
mutex_unlock(qs_cfg->qs_mutex);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
err_scatter:
|
|
|
|
for (index = 0; index < i; index++) {
|
|
|
|
clear_bit(qs_cfg->vsi_map[index], qs_cfg->pf_map);
|
|
|
|
qs_cfg->vsi_map[index + qs_cfg->vsi_map_offset] = 0;
|
|
|
|
}
|
|
|
|
mutex_unlock(qs_cfg->qs_mutex);
|
|
|
|
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* ice_pf_rxq_wait - Wait for a PF's Rx queue to be enabled or disabled
|
|
|
|
* @pf: the PF being configured
|
|
|
|
* @pf_q: the PF queue
|
|
|
|
* @ena: enable or disable state of the queue
|
|
|
|
*
|
|
|
|
* This routine will wait for the given Rx queue of the PF to reach the
|
|
|
|
* enabled or disabled state.
|
|
|
|
* Returns -ETIMEDOUT in case of failing to reach the requested state after
|
|
|
|
* multiple retries; else will return 0 in case of success.
|
|
|
|
*/
|
|
|
|
static int ice_pf_rxq_wait(struct ice_pf *pf, int pf_q, bool ena)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < ICE_Q_WAIT_MAX_RETRY; i++) {
|
|
|
|
if (ena == !!(rd32(&pf->hw, QRX_CTRL(pf_q)) &
|
|
|
|
QRX_CTRL_QENA_STAT_M))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
usleep_range(20, 40);
|
|
|
|
}
|
|
|
|
|
|
|
|
return -ETIMEDOUT;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* ice_vsi_alloc_q_vector - Allocate memory for a single interrupt vector
|
|
|
|
* @vsi: the VSI being configured
|
|
|
|
* @v_idx: index of the vector in the VSI struct
|
|
|
|
*
|
2019-12-12 03:12:57 -08:00
|
|
|
* We allocate one q_vector and set default value for ITR setting associated
|
|
|
|
* with this q_vector. If allocation fails we return -ENOMEM.
|
2019-10-24 01:11:17 -07:00
|
|
|
*/
|
2020-05-07 17:41:05 -07:00
|
|
|
static int ice_vsi_alloc_q_vector(struct ice_vsi *vsi, u16 v_idx)
|
2019-10-24 01:11:17 -07:00
|
|
|
{
|
|
|
|
struct ice_pf *pf = vsi->back;
|
|
|
|
struct ice_q_vector *q_vector;
|
2023-05-15 21:03:17 +02:00
|
|
|
int err;
|
2019-10-24 01:11:17 -07:00
|
|
|
|
|
|
|
/* allocate q_vector */
|
2023-05-15 21:03:17 +02:00
|
|
|
q_vector = kzalloc(sizeof(*q_vector), GFP_KERNEL);
|
2019-10-24 01:11:17 -07:00
|
|
|
if (!q_vector)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
q_vector->vsi = vsi;
|
|
|
|
q_vector->v_idx = v_idx;
|
2019-12-12 03:12:57 -08:00
|
|
|
q_vector->tx.itr_setting = ICE_DFLT_TX_ITR;
|
|
|
|
q_vector->rx.itr_setting = ICE_DFLT_RX_ITR;
|
2021-03-31 14:16:59 -07:00
|
|
|
q_vector->tx.itr_mode = ITR_DYNAMIC;
|
|
|
|
q_vector->rx.itr_mode = ITR_DYNAMIC;
|
2021-08-19 13:59:57 +02:00
|
|
|
q_vector->tx.type = ICE_TX_CONTAINER;
|
|
|
|
q_vector->rx.type = ICE_RX_CONTAINER;
|
2023-05-15 21:03:17 +02:00
|
|
|
q_vector->irq.index = -ENOENT;
|
2021-03-31 14:16:59 -07:00
|
|
|
|
2023-05-15 21:03:17 +02:00
|
|
|
if (vsi->type == ICE_VSI_VF) {
|
ice: store VF relative MSI-X index in q_vector->vf_reg_idx
The ice physical function driver needs to configure the association of
queues and interrupts on behalf of its virtual functions. This is done over
virtchnl by the VF sending messages during its initialization phase. These
messages contain a vector_id which the VF wants to associate with a given
queue. This ID is relative to the VF space, where 0 indicates the control
IRQ for non-queue interrupts.
When programming the mapping, the PF driver currently passes this vector_id
directly to the low level functions for programming. This works for SR-IOV,
because the hardware uses the VF-based indexing for interrupts.
This won't work for Scalable IOV, which uses PF-based indexing for
programming its VSIs. To handle this, the driver needs to be able to look
up the proper index to use for programming. For typical IRQs, this would be
the q_vector->reg_idx field.
The q_vector->reg_idx can't be set to a VF relative value, because it is
used when the PF needs to control the interrupt, such as when triggering a
software interrupt on stopping the Tx queue. Thus, introduce a new
q_vector->vf_reg_idx which can store the VF relative index for registers
which expect this.
Use this in ice_cfg_interrupt to look up the VF index from the q_vector.
This allows removing the vector ID parameter of ice_cfg_interrupt. Also
notice that this function returns an int, but then is cast to the virtchnl
error enumeration, virtchnl_status_code. Update the return type to indicate
it does not return an integer error code. We can't use normal error codes
here because the return values are passed across the virtchnl interface.
This will allow the future Scalable IOV VFs to correctly look up the index
needed for programming the VF queues without breaking SR-IOV.
Signed-off-by: Jacob Keller <jacob.e.keller@intel.com>
Tested-by: Rafal Romanowski <rafal.romanowski@intel.com>
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
2024-03-22 14:44:45 -07:00
|
|
|
ice_calc_vf_reg_idx(vsi->vf, q_vector);
|
2019-10-24 01:11:17 -07:00
|
|
|
goto out;
|
2023-05-15 21:03:17 +02:00
|
|
|
} else if (vsi->type == ICE_VSI_CTRL && vsi->vf) {
|
|
|
|
struct ice_vsi *ctrl_vsi = ice_get_vf_ctrl_vsi(pf, vsi);
|
|
|
|
|
|
|
|
if (ctrl_vsi) {
|
|
|
|
if (unlikely(!ctrl_vsi->q_vectors)) {
|
|
|
|
err = -ENOENT;
|
|
|
|
goto err_free_q_vector;
|
|
|
|
}
|
|
|
|
|
|
|
|
q_vector->irq = ctrl_vsi->q_vectors[0]->irq;
|
|
|
|
goto skip_alloc;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-05-15 21:03:19 +02:00
|
|
|
q_vector->irq = ice_alloc_irq(pf, vsi->irq_dyn_alloc);
|
2023-05-15 21:03:17 +02:00
|
|
|
if (q_vector->irq.index < 0) {
|
|
|
|
err = -ENOMEM;
|
|
|
|
goto err_free_q_vector;
|
|
|
|
}
|
|
|
|
|
|
|
|
skip_alloc:
|
|
|
|
q_vector->reg_idx = q_vector->irq.index;
|
ice: store VF relative MSI-X index in q_vector->vf_reg_idx
The ice physical function driver needs to configure the association of
queues and interrupts on behalf of its virtual functions. This is done over
virtchnl by the VF sending messages during its initialization phase. These
messages contain a vector_id which the VF wants to associate with a given
queue. This ID is relative to the VF space, where 0 indicates the control
IRQ for non-queue interrupts.
When programming the mapping, the PF driver currently passes this vector_id
directly to the low level functions for programming. This works for SR-IOV,
because the hardware uses the VF-based indexing for interrupts.
This won't work for Scalable IOV, which uses PF-based indexing for
programming its VSIs. To handle this, the driver needs to be able to look
up the proper index to use for programming. For typical IRQs, this would be
the q_vector->reg_idx field.
The q_vector->reg_idx can't be set to a VF relative value, because it is
used when the PF needs to control the interrupt, such as when triggering a
software interrupt on stopping the Tx queue. Thus, introduce a new
q_vector->vf_reg_idx which can store the VF relative index for registers
which expect this.
Use this in ice_cfg_interrupt to look up the VF index from the q_vector.
This allows removing the vector ID parameter of ice_cfg_interrupt. Also
notice that this function returns an int, but then is cast to the virtchnl
error enumeration, virtchnl_status_code. Update the return type to indicate
it does not return an integer error code. We can't use normal error codes
here because the return values are passed across the virtchnl interface.
This will allow the future Scalable IOV VFs to correctly look up the index
needed for programming the VF queues without breaking SR-IOV.
Signed-off-by: Jacob Keller <jacob.e.keller@intel.com>
Tested-by: Rafal Romanowski <rafal.romanowski@intel.com>
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
2024-03-22 14:44:45 -07:00
|
|
|
q_vector->vf_reg_idx = q_vector->irq.index;
|
2023-05-15 21:03:17 +02:00
|
|
|
|
2019-10-24 01:11:17 -07:00
|
|
|
/* only set affinity_mask if the CPU is online */
|
|
|
|
if (cpu_online(v_idx))
|
|
|
|
cpumask_set_cpu(v_idx, &q_vector->affinity_mask);
|
|
|
|
|
|
|
|
/* This will not be called in the driver load path because the netdev
|
|
|
|
* will not be created yet. All other cases with register the NAPI
|
|
|
|
* handler here (i.e. resume, reset/rebuild, etc.)
|
|
|
|
*/
|
|
|
|
if (vsi->netdev)
|
2022-09-27 06:27:53 -07:00
|
|
|
netif_napi_add(vsi->netdev, &q_vector->napi, ice_napi_poll);
|
2019-10-24 01:11:17 -07:00
|
|
|
|
|
|
|
out:
|
|
|
|
/* tie q_vector and VSI together */
|
|
|
|
vsi->q_vectors[v_idx] = q_vector;
|
|
|
|
|
|
|
|
return 0;
|
2023-05-15 21:03:17 +02:00
|
|
|
|
|
|
|
err_free_q_vector:
|
|
|
|
kfree(q_vector);
|
|
|
|
|
|
|
|
return err;
|
2019-10-24 01:11:17 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* ice_free_q_vector - Free memory allocated for a specific interrupt vector
|
|
|
|
* @vsi: VSI having the memory freed
|
|
|
|
* @v_idx: index of the vector to be freed
|
|
|
|
*/
|
|
|
|
static void ice_free_q_vector(struct ice_vsi *vsi, int v_idx)
|
|
|
|
{
|
|
|
|
struct ice_q_vector *q_vector;
|
|
|
|
struct ice_pf *pf = vsi->back;
|
2021-08-19 13:59:58 +02:00
|
|
|
struct ice_tx_ring *tx_ring;
|
|
|
|
struct ice_rx_ring *rx_ring;
|
2019-11-08 06:23:26 -08:00
|
|
|
struct device *dev;
|
2019-10-24 01:11:17 -07:00
|
|
|
|
2019-11-08 06:23:26 -08:00
|
|
|
dev = ice_pf_to_dev(pf);
|
2019-10-24 01:11:17 -07:00
|
|
|
if (!vsi->q_vectors[v_idx]) {
|
2019-11-08 06:23:26 -08:00
|
|
|
dev_dbg(dev, "Queue vector at index %d not found\n", v_idx);
|
2019-10-24 01:11:17 -07:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
q_vector = vsi->q_vectors[v_idx];
|
|
|
|
|
2023-12-01 15:28:40 -08:00
|
|
|
ice_for_each_tx_ring(tx_ring, q_vector->tx) {
|
2024-02-13 11:48:50 -08:00
|
|
|
ice_queue_set_napi(vsi, tx_ring->q_index, NETDEV_QUEUE_TYPE_TX,
|
|
|
|
NULL);
|
2021-08-19 13:59:58 +02:00
|
|
|
tx_ring->q_vector = NULL;
|
2023-12-01 15:28:40 -08:00
|
|
|
}
|
|
|
|
ice_for_each_rx_ring(rx_ring, q_vector->rx) {
|
2024-02-13 11:48:50 -08:00
|
|
|
ice_queue_set_napi(vsi, rx_ring->q_index, NETDEV_QUEUE_TYPE_RX,
|
|
|
|
NULL);
|
2021-08-19 13:59:58 +02:00
|
|
|
rx_ring->q_vector = NULL;
|
2023-12-01 15:28:40 -08:00
|
|
|
}
|
2019-10-24 01:11:17 -07:00
|
|
|
|
|
|
|
/* only VSI with an associated netdev is set up with NAPI */
|
|
|
|
if (vsi->netdev)
|
|
|
|
netif_napi_del(&q_vector->napi);
|
|
|
|
|
2023-05-15 21:03:17 +02:00
|
|
|
/* release MSIX interrupt if q_vector had interrupt allocated */
|
|
|
|
if (q_vector->irq.index < 0)
|
|
|
|
goto free_q_vector;
|
|
|
|
|
|
|
|
/* only free last VF ctrl vsi interrupt */
|
|
|
|
if (vsi->type == ICE_VSI_CTRL && vsi->vf &&
|
|
|
|
ice_get_vf_ctrl_vsi(pf, vsi))
|
|
|
|
goto free_q_vector;
|
|
|
|
|
|
|
|
ice_free_irq(pf, q_vector->irq);
|
|
|
|
|
|
|
|
free_q_vector:
|
|
|
|
kfree(q_vector);
|
2019-10-24 01:11:17 -07:00
|
|
|
vsi->q_vectors[v_idx] = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* ice_cfg_itr_gran - set the ITR granularity to 2 usecs if not already set
|
|
|
|
* @hw: board specific structure
|
|
|
|
*/
|
|
|
|
static void ice_cfg_itr_gran(struct ice_hw *hw)
|
|
|
|
{
|
|
|
|
u32 regval = rd32(hw, GLINT_CTL);
|
|
|
|
|
|
|
|
/* no need to update global register if ITR gran is already set */
|
|
|
|
if (!(regval & GLINT_CTL_DIS_AUTOMASK_M) &&
|
2023-12-05 17:01:12 -08:00
|
|
|
(FIELD_GET(GLINT_CTL_ITR_GRAN_200_M, regval) == ICE_ITR_GRAN_US) &&
|
|
|
|
(FIELD_GET(GLINT_CTL_ITR_GRAN_100_M, regval) == ICE_ITR_GRAN_US) &&
|
|
|
|
(FIELD_GET(GLINT_CTL_ITR_GRAN_50_M, regval) == ICE_ITR_GRAN_US) &&
|
|
|
|
(FIELD_GET(GLINT_CTL_ITR_GRAN_25_M, regval) == ICE_ITR_GRAN_US))
|
2019-10-24 01:11:17 -07:00
|
|
|
return;
|
|
|
|
|
2023-12-05 17:01:05 -08:00
|
|
|
regval = FIELD_PREP(GLINT_CTL_ITR_GRAN_200_M, ICE_ITR_GRAN_US) |
|
|
|
|
FIELD_PREP(GLINT_CTL_ITR_GRAN_100_M, ICE_ITR_GRAN_US) |
|
|
|
|
FIELD_PREP(GLINT_CTL_ITR_GRAN_50_M, ICE_ITR_GRAN_US) |
|
|
|
|
FIELD_PREP(GLINT_CTL_ITR_GRAN_25_M, ICE_ITR_GRAN_US);
|
2019-10-24 01:11:17 -07:00
|
|
|
wr32(hw, GLINT_CTL, regval);
|
|
|
|
}
|
|
|
|
|
2019-10-24 01:11:18 -07:00
|
|
|
/**
|
2021-08-19 13:59:58 +02:00
|
|
|
* ice_calc_txq_handle - calculate the queue handle
|
2019-10-24 01:11:18 -07:00
|
|
|
* @vsi: VSI that ring belongs to
|
|
|
|
* @ring: ring to get the absolute queue index
|
|
|
|
* @tc: traffic class number
|
|
|
|
*/
|
2021-08-19 13:59:58 +02:00
|
|
|
static u16 ice_calc_txq_handle(struct ice_vsi *vsi, struct ice_tx_ring *ring, u8 tc)
|
2019-10-24 01:11:18 -07:00
|
|
|
{
|
2020-02-13 13:31:26 -08:00
|
|
|
WARN_ONCE(ice_ring_is_xdp(ring) && tc, "XDP ring can't belong to TC other than 0\n");
|
2019-11-04 09:38:56 -08:00
|
|
|
|
2021-10-15 16:35:15 -07:00
|
|
|
if (ring->ch)
|
|
|
|
return ring->q_index - ring->ch->base_q;
|
|
|
|
|
2019-10-24 01:11:18 -07:00
|
|
|
/* Idea here for calculation is that we subtract the number of queue
|
|
|
|
* count from TC that ring belongs to from it's absolute queue index
|
|
|
|
* and as a result we get the queue's index within TC.
|
|
|
|
*/
|
|
|
|
return ring->q_index - vsi->tc_cfg.tc_info[tc].qoffset;
|
|
|
|
}
|
|
|
|
|
2021-03-02 10:12:02 -08:00
|
|
|
/**
|
|
|
|
* ice_cfg_xps_tx_ring - Configure XPS for a Tx ring
|
|
|
|
* @ring: The Tx ring to configure
|
|
|
|
*
|
|
|
|
* This enables/disables XPS for a given Tx descriptor ring
|
|
|
|
* based on the TCs enabled for the VSI that ring belongs to.
|
|
|
|
*/
|
2021-08-19 13:59:58 +02:00
|
|
|
static void ice_cfg_xps_tx_ring(struct ice_tx_ring *ring)
|
2021-03-02 10:12:02 -08:00
|
|
|
{
|
|
|
|
if (!ring->q_vector || !ring->netdev)
|
|
|
|
return;
|
|
|
|
|
|
|
|
/* We only initialize XPS once, so as not to overwrite user settings */
|
|
|
|
if (test_and_set_bit(ICE_TX_XPS_INIT_DONE, ring->xps_state))
|
|
|
|
return;
|
|
|
|
|
|
|
|
netif_set_xps_queue(ring->netdev, &ring->q_vector->affinity_mask,
|
|
|
|
ring->q_index);
|
|
|
|
}
|
|
|
|
|
2019-10-24 01:11:17 -07:00
|
|
|
/**
|
|
|
|
* ice_setup_tx_ctx - setup a struct ice_tlan_ctx instance
|
|
|
|
* @ring: The Tx ring to configure
|
|
|
|
* @tlan_ctx: Pointer to the Tx LAN queue context structure to be initialized
|
|
|
|
* @pf_q: queue index in the PF space
|
|
|
|
*
|
|
|
|
* Configure the Tx descriptor ring in TLAN context.
|
|
|
|
*/
|
|
|
|
static void
|
2021-08-19 13:59:58 +02:00
|
|
|
ice_setup_tx_ctx(struct ice_tx_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf_q)
|
2019-10-24 01:11:17 -07:00
|
|
|
{
|
|
|
|
struct ice_vsi *vsi = ring->vsi;
|
|
|
|
struct ice_hw *hw = &vsi->back->hw;
|
|
|
|
|
|
|
|
tlan_ctx->base = ring->dma >> ICE_TLAN_CTX_BASE_S;
|
|
|
|
|
|
|
|
tlan_ctx->port_num = vsi->port_info->lport;
|
|
|
|
|
|
|
|
/* Transmit Queue Length */
|
|
|
|
tlan_ctx->qlen = ring->count;
|
|
|
|
|
2021-08-19 13:59:58 +02:00
|
|
|
ice_set_cgd_num(tlan_ctx, ring->dcb_tc);
|
2019-10-24 01:11:17 -07:00
|
|
|
|
|
|
|
/* PF number */
|
|
|
|
tlan_ctx->pf_num = hw->pf_id;
|
|
|
|
|
|
|
|
/* queue belongs to a specific VSI type
|
|
|
|
* VF / VM index should be programmed per vmvf_type setting:
|
|
|
|
* for vmvf_type = VF, it is VF number between 0-256
|
|
|
|
* for vmvf_type = VM, it is VM number between 0-767
|
|
|
|
* for PF or EMP this field should be set to zero
|
|
|
|
*/
|
|
|
|
switch (vsi->type) {
|
|
|
|
case ICE_VSI_LB:
|
2020-05-11 18:01:40 -07:00
|
|
|
case ICE_VSI_CTRL:
|
2019-10-24 01:11:17 -07:00
|
|
|
case ICE_VSI_PF:
|
2021-10-15 16:35:15 -07:00
|
|
|
if (ring->ch)
|
|
|
|
tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_VMQ;
|
|
|
|
else
|
|
|
|
tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF;
|
2019-10-24 01:11:17 -07:00
|
|
|
break;
|
|
|
|
case ICE_VSI_VF:
|
|
|
|
/* Firmware expects vmvf_num to be absolute VF ID */
|
2022-02-16 13:37:29 -08:00
|
|
|
tlan_ctx->vmvf_num = hw->func_caps.vf_base_id + vsi->vf->vf_id;
|
2019-10-24 01:11:17 -07:00
|
|
|
tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_VF;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* make sure the context is associated with the right VSI */
|
2021-10-15 16:35:15 -07:00
|
|
|
if (ring->ch)
|
|
|
|
tlan_ctx->src_vsi = ring->ch->vsi_num;
|
|
|
|
else
|
|
|
|
tlan_ctx->src_vsi = ice_get_hw_vsi_num(hw, vsi->idx);
|
2019-10-24 01:11:17 -07:00
|
|
|
|
ice: enable transmit timestamps for E810 devices
Add support for enabling Tx timestamp requests for outgoing packets on
E810 devices.
The ice hardware can support multiple outstanding Tx timestamp requests.
When sending a descriptor to hardware, a Tx timestamp request is made by
setting a request bit, and assigning an index that represents which Tx
timestamp index to store the timestamp in.
Hardware makes no effort to synchronize the index use, so it is up to
software to ensure that Tx timestamp indexes are not re-used before the
timestamp is reported back.
To do this, introduce a Tx timestamp tracker which will keep track of
currently in-use indexes.
In the hot path, if a packet has a timestamp request, an index will be
requested from the tracker. Unfortunately, this does require a lock as
the indexes are shared across all queues on a PHY. There are not enough
indexes to reliably assign only 1 to each queue.
For the E810 devices, the timestamp indexes are not shared across PHYs,
so each port can have its own tracking.
Once hardware captures a timestamp, an interrupt is fired. In this
interrupt, trigger a new work item that will figure out which timestamp
was completed, and report the timestamp back to the stack.
This function loops through the Tx timestamp indexes and checks whether
there is now a valid timestamp. If so, it clears the PHY timestamp
indication in the PHY memory, locks and removes the SKB and bit in the
tracker, then reports the timestamp to the stack.
It is possible in some cases that a timestamp request will be initiated
but never completed. This might occur if the packet is dropped by
software or hardware before it reaches the PHY.
Add a task to the periodic work function that will check whether
a timestamp request is more than a few seconds old. If so, the timestamp
index is cleared in the PHY, and the SKB is released.
Just as with Rx timestamps, the Tx timestamps are only 40 bits wide, and
use the same overall logic for extending to 64 bits of nanoseconds.
With this change, E810 devices should be able to perform basic PTP
functionality.
Future changes will extend the support to cover the E822-based devices.
Signed-off-by: Jacob Keller <jacob.e.keller@intel.com>
Tested-by: Tony Brelinski <tonyx.brelinski@intel.com>
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
2021-06-09 09:39:53 -07:00
|
|
|
/* Restrict Tx timestamps to the PF VSI */
|
|
|
|
switch (vsi->type) {
|
|
|
|
case ICE_VSI_PF:
|
|
|
|
tlan_ctx->tsyn_ena = 1;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2019-10-24 01:11:17 -07:00
|
|
|
tlan_ctx->tso_ena = ICE_TX_LEGACY;
|
|
|
|
tlan_ctx->tso_qnum = pf_q;
|
|
|
|
|
|
|
|
/* Legacy or Advanced Host Interface:
|
|
|
|
* 0: Advanced Host Interface
|
|
|
|
* 1: Legacy Host Interface
|
|
|
|
*/
|
|
|
|
tlan_ctx->legacy_int = ICE_TX_LEGACY;
|
|
|
|
}
|
|
|
|
|
2021-03-03 16:39:27 +01:00
|
|
|
/**
|
|
|
|
* ice_rx_offset - Return expected offset into page to access data
|
|
|
|
* @rx_ring: Ring we are requesting offset of
|
|
|
|
*
|
|
|
|
* Returns the offset value for ring into the data buffer.
|
|
|
|
*/
|
2021-08-19 13:59:58 +02:00
|
|
|
static unsigned int ice_rx_offset(struct ice_rx_ring *rx_ring)
|
2021-03-03 16:39:27 +01:00
|
|
|
{
|
|
|
|
if (ice_ring_uses_build_skb(rx_ring))
|
|
|
|
return ICE_SKB_PAD;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-10-24 01:11:17 -07:00
|
|
|
/**
|
|
|
|
* ice_setup_rx_ctx - Configure a receive ring context
|
|
|
|
* @ring: The Rx ring to configure
|
|
|
|
*
|
|
|
|
* Configure the Rx descriptor ring in RLAN context.
|
|
|
|
*/
|
2021-08-19 13:59:58 +02:00
|
|
|
static int ice_setup_rx_ctx(struct ice_rx_ring *ring)
|
2019-10-24 01:11:17 -07:00
|
|
|
{
|
|
|
|
struct ice_vsi *vsi = ring->vsi;
|
|
|
|
u32 rxdid = ICE_RXDID_FLEX_NIC;
|
|
|
|
struct ice_rlan_ctx rlan_ctx;
|
2019-11-04 09:38:56 -08:00
|
|
|
struct ice_hw *hw;
|
2019-10-24 01:11:17 -07:00
|
|
|
u16 pf_q;
|
|
|
|
int err;
|
|
|
|
|
2019-11-04 09:38:56 -08:00
|
|
|
hw = &vsi->back->hw;
|
|
|
|
|
2019-10-24 01:11:17 -07:00
|
|
|
/* what is Rx queue number in global space of 2K Rx queues */
|
|
|
|
pf_q = vsi->rxq_map[ring->q_index];
|
|
|
|
|
|
|
|
/* clear the context structure first */
|
|
|
|
memset(&rlan_ctx, 0, sizeof(rlan_ctx));
|
|
|
|
|
2019-11-04 09:38:56 -08:00
|
|
|
/* Receive Queue Base Address.
|
|
|
|
* Indicates the starting address of the descriptor queue defined in
|
|
|
|
* 128 Byte units.
|
|
|
|
*/
|
2022-11-03 15:30:05 +01:00
|
|
|
rlan_ctx.base = ring->dma >> ICE_RLAN_BASE_S;
|
2019-10-24 01:11:17 -07:00
|
|
|
|
|
|
|
rlan_ctx.qlen = ring->count;
|
|
|
|
|
|
|
|
/* Receive Packet Data Buffer Size.
|
|
|
|
* The Packet Data Buffer Size is defined in 128 byte units.
|
|
|
|
*/
|
2023-08-10 16:51:10 -07:00
|
|
|
rlan_ctx.dbuf = DIV_ROUND_UP(ring->rx_buf_len,
|
|
|
|
BIT_ULL(ICE_RLAN_CTX_DBUF_S));
|
2019-10-24 01:11:17 -07:00
|
|
|
|
|
|
|
/* use 32 byte descriptors */
|
|
|
|
rlan_ctx.dsize = 1;
|
|
|
|
|
|
|
|
/* Strip the Ethernet CRC bytes before the packet is posted to host
|
|
|
|
* memory.
|
|
|
|
*/
|
2022-07-27 09:24:05 +02:00
|
|
|
rlan_ctx.crcstrip = !(ring->flags & ICE_RX_FLAGS_CRC_STRIP_DIS);
|
2019-10-24 01:11:17 -07:00
|
|
|
|
ice: Add hot path support for 802.1Q and 802.1ad VLAN offloads
Currently the driver only supports 802.1Q VLAN insertion and stripping.
However, once Double VLAN Mode (DVM) is fully supported, then both 802.1Q
and 802.1ad VLAN insertion and stripping will be supported. Unfortunately
the VSI context parameters only allow for one VLAN ethertype at a time
for VLAN offloads so only one or the other VLAN ethertype offload can be
supported at once.
To support this, multiple changes are needed.
Rx path changes:
[1] In DVM, the Rx queue context l2tagsel field needs to be cleared so
the outermost tag shows up in the l2tag2_2nd field of the Rx flex
descriptor. In Single VLAN Mode (SVM), the l2tagsel field should remain
1 to support SVM configurations.
[2] Modify the ice_test_staterr() function to take a __le16 instead of
the ice_32b_rx_flex_desc union pointer so this function can be used for
both rx_desc->wb.status_error0 and rx_desc->wb.status_error1.
[3] Add the new inline function ice_get_vlan_tag_from_rx_desc() that
checks if there is a VLAN tag in l2tag1 or l2tag2_2nd.
[4] In ice_receive_skb(), add a check to see if NETIF_F_HW_VLAN_STAG_RX
is enabled in netdev->features. If it is, then this is the VLAN
ethertype that needs to be added to the stripping VLAN tag. Since
ice_fix_features() prevents CTAG_RX and STAG_RX from being enabled
simultaneously, the VLAN ethertype will only ever be 802.1Q or 802.1ad.
Tx path changes:
[1] In DVM, the VLAN tag needs to be placed in the l2tag2 field of the Tx
context descriptor. The new define ICE_TX_FLAGS_HW_OUTER_SINGLE_VLAN was
added to the list of tx_flags to handle this case.
[2] When the stack requests the VLAN tag to be offloaded on Tx, the
driver needs to set either ICE_TX_FLAGS_HW_OUTER_SINGLE_VLAN or
ICE_TX_FLAGS_HW_VLAN, so the tag is inserted in l2tag2 or l2tag1
respectively. To determine which location to use, set a bit in the Tx
ring flags field during ring allocation that can be used to determine
which field to use in the Tx descriptor. In DVM, always use l2tag2,
and in SVM, always use l2tag1.
Signed-off-by: Brett Creeley <brett.creeley@intel.com>
Tested-by: Gurucharan G <gurucharanx.g@intel.com>
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
2021-12-02 08:38:47 -08:00
|
|
|
/* L2TSEL flag defines the reported L2 Tags in the receive descriptor
|
|
|
|
* and it needs to remain 1 for non-DVM capable configurations to not
|
|
|
|
* break backward compatibility for VF drivers. Setting this field to 0
|
|
|
|
* will cause the single/outer VLAN tag to be stripped to the L2TAG2_2ND
|
|
|
|
* field in the Rx descriptor. Setting it to 1 allows the VLAN tag to
|
|
|
|
* be stripped in L2TAG1 of the Rx descriptor, which is where VFs will
|
|
|
|
* check for the tag
|
|
|
|
*/
|
|
|
|
if (ice_is_dvm_ena(hw))
|
|
|
|
if (vsi->type == ICE_VSI_VF &&
|
2022-02-16 13:37:29 -08:00
|
|
|
ice_vf_is_port_vlan_ena(vsi->vf))
|
ice: Add hot path support for 802.1Q and 802.1ad VLAN offloads
Currently the driver only supports 802.1Q VLAN insertion and stripping.
However, once Double VLAN Mode (DVM) is fully supported, then both 802.1Q
and 802.1ad VLAN insertion and stripping will be supported. Unfortunately
the VSI context parameters only allow for one VLAN ethertype at a time
for VLAN offloads so only one or the other VLAN ethertype offload can be
supported at once.
To support this, multiple changes are needed.
Rx path changes:
[1] In DVM, the Rx queue context l2tagsel field needs to be cleared so
the outermost tag shows up in the l2tag2_2nd field of the Rx flex
descriptor. In Single VLAN Mode (SVM), the l2tagsel field should remain
1 to support SVM configurations.
[2] Modify the ice_test_staterr() function to take a __le16 instead of
the ice_32b_rx_flex_desc union pointer so this function can be used for
both rx_desc->wb.status_error0 and rx_desc->wb.status_error1.
[3] Add the new inline function ice_get_vlan_tag_from_rx_desc() that
checks if there is a VLAN tag in l2tag1 or l2tag2_2nd.
[4] In ice_receive_skb(), add a check to see if NETIF_F_HW_VLAN_STAG_RX
is enabled in netdev->features. If it is, then this is the VLAN
ethertype that needs to be added to the stripping VLAN tag. Since
ice_fix_features() prevents CTAG_RX and STAG_RX from being enabled
simultaneously, the VLAN ethertype will only ever be 802.1Q or 802.1ad.
Tx path changes:
[1] In DVM, the VLAN tag needs to be placed in the l2tag2 field of the Tx
context descriptor. The new define ICE_TX_FLAGS_HW_OUTER_SINGLE_VLAN was
added to the list of tx_flags to handle this case.
[2] When the stack requests the VLAN tag to be offloaded on Tx, the
driver needs to set either ICE_TX_FLAGS_HW_OUTER_SINGLE_VLAN or
ICE_TX_FLAGS_HW_VLAN, so the tag is inserted in l2tag2 or l2tag1
respectively. To determine which location to use, set a bit in the Tx
ring flags field during ring allocation that can be used to determine
which field to use in the Tx descriptor. In DVM, always use l2tag2,
and in SVM, always use l2tag1.
Signed-off-by: Brett Creeley <brett.creeley@intel.com>
Tested-by: Gurucharan G <gurucharanx.g@intel.com>
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
2021-12-02 08:38:47 -08:00
|
|
|
rlan_ctx.l2tsel = 1;
|
|
|
|
else
|
|
|
|
rlan_ctx.l2tsel = 0;
|
|
|
|
else
|
|
|
|
rlan_ctx.l2tsel = 1;
|
2019-10-24 01:11:17 -07:00
|
|
|
|
|
|
|
rlan_ctx.dtype = ICE_RX_DTYPE_NO_SPLIT;
|
|
|
|
rlan_ctx.hsplit_0 = ICE_RLAN_RX_HSPLIT_0_NO_SPLIT;
|
|
|
|
rlan_ctx.hsplit_1 = ICE_RLAN_RX_HSPLIT_1_NO_SPLIT;
|
|
|
|
|
|
|
|
/* This controls whether VLAN is stripped from inner headers
|
|
|
|
* The VLAN in the inner L2 header is stripped to the receive
|
|
|
|
* descriptor if enabled by this flag.
|
|
|
|
*/
|
|
|
|
rlan_ctx.showiv = 0;
|
|
|
|
|
|
|
|
/* Max packet size for this queue - must not be set to a larger value
|
|
|
|
* than 5 x DBUF
|
|
|
|
*/
|
2020-05-07 17:41:05 -07:00
|
|
|
rlan_ctx.rxmax = min_t(u32, vsi->max_frame,
|
2023-07-19 15:24:09 +02:00
|
|
|
ICE_MAX_CHAINED_RX_BUFS * ring->rx_buf_len);
|
2019-10-24 01:11:17 -07:00
|
|
|
|
|
|
|
/* Rx queue threshold in units of 64 */
|
|
|
|
rlan_ctx.lrxqthresh = 1;
|
|
|
|
|
2024-03-01 12:54:13 +01:00
|
|
|
/* PF acts as uplink for switchdev; set flex descriptor with src_vsi
|
|
|
|
* metadata and flags to allow redirecting to PR netdev
|
|
|
|
*/
|
|
|
|
if (ice_is_eswitch_mode_switchdev(vsi->back)) {
|
|
|
|
ring->flags |= ICE_RX_FLAGS_MULTIDEV;
|
|
|
|
rxdid = ICE_RXDID_FLEX_NIC_2;
|
|
|
|
}
|
|
|
|
|
2020-05-15 17:54:59 -07:00
|
|
|
/* Enable Flexible Descriptors in the queue context which
|
|
|
|
* allows this driver to select a specific receive descriptor format
|
|
|
|
* increasing context priority to pick up profile ID; default is 0x01;
|
|
|
|
* setting to 0x03 to ensure profile is programming if prev context is
|
|
|
|
* of same priority
|
|
|
|
*/
|
|
|
|
if (vsi->type != ICE_VSI_VF)
|
ice: enable receive hardware timestamping
Add SIOCGHWTSTAMP and SIOCSHWTSTAMP ioctl handlers to respond to
requests to enable timestamping support. If the request is for enabling
Rx timestamps, set a bit in the Rx descriptors to indicate that receive
timestamps should be reported.
Hardware captures receive timestamps in the PHY which only captures part
of the timer, and reports only 40 bits into the Rx descriptor. The upper
32 bits represent the contents of GLTSYN_TIME_L at the point of packet
reception, while the lower 8 bits represent the upper 8 bits of
GLTSYN_TIME_0.
The networking and PTP stack expect 64 bit timestamps in nanoseconds. To
support this, implement some logic to extend the timestamps by using the
full PHC time.
If the Rx timestamp was captured prior to the PHC time, then the real
timestamp is
PHC - (lower_32_bits(PHC) - timestamp)
If the Rx timestamp was captured after the PHC time, then the real
timestamp is
PHC + (timestamp - lower_32_bits(PHC))
These calculations are correct as long as neither the PHC timestamp nor
the Rx timestamps are more than 2^32-1 nanseconds old. Further, we can
detect when the Rx timestamp is before or after the PHC as long as the
PHC timestamp is no more than 2^31-1 nanoseconds old.
In that case, we calculate the delta between the lower 32 bits of the
PHC and the Rx timestamp. If it's larger than 2^31-1 then the Rx
timestamp must have been captured in the past. If it's smaller, then the
Rx timestamp must have been captured after PHC time.
Add an ice_ptp_extend_32b_ts function that relies on a cached copy of
the PHC time and implements this algorithm to calculate the proper upper
32bits of the Rx timestamps.
Cache the PHC time periodically in all of the Rx rings. This enables
each Rx ring to simply call the extension function with a recent copy of
the PHC time. By ensuring that the PHC time is kept up to date
periodically, we ensure this algorithm doesn't use stale data and
produce incorrect results.
To cache the time, introduce a kworker and a kwork item to periodically
store the Rx time. It might seem like we should use the .do_aux_work
interface of the PTP clock. This doesn't work because all PFs must cache
this time, but only one PF owns the PTP clock device.
Thus, the ice driver will manage its own kthread instead of relying on
the PTP do_aux_work handler.
With this change, the driver can now report Rx timestamps on all
incoming packets.
Signed-off-by: Jacob Keller <jacob.e.keller@intel.com>
Tested-by: Tony Brelinski <tonyx.brelinski@intel.com>
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
2021-06-09 09:39:52 -07:00
|
|
|
ice_write_qrxflxp_cntxt(hw, pf_q, rxdid, 0x3, true);
|
2020-05-15 17:54:59 -07:00
|
|
|
else
|
ice: enable receive hardware timestamping
Add SIOCGHWTSTAMP and SIOCSHWTSTAMP ioctl handlers to respond to
requests to enable timestamping support. If the request is for enabling
Rx timestamps, set a bit in the Rx descriptors to indicate that receive
timestamps should be reported.
Hardware captures receive timestamps in the PHY which only captures part
of the timer, and reports only 40 bits into the Rx descriptor. The upper
32 bits represent the contents of GLTSYN_TIME_L at the point of packet
reception, while the lower 8 bits represent the upper 8 bits of
GLTSYN_TIME_0.
The networking and PTP stack expect 64 bit timestamps in nanoseconds. To
support this, implement some logic to extend the timestamps by using the
full PHC time.
If the Rx timestamp was captured prior to the PHC time, then the real
timestamp is
PHC - (lower_32_bits(PHC) - timestamp)
If the Rx timestamp was captured after the PHC time, then the real
timestamp is
PHC + (timestamp - lower_32_bits(PHC))
These calculations are correct as long as neither the PHC timestamp nor
the Rx timestamps are more than 2^32-1 nanseconds old. Further, we can
detect when the Rx timestamp is before or after the PHC as long as the
PHC timestamp is no more than 2^31-1 nanoseconds old.
In that case, we calculate the delta between the lower 32 bits of the
PHC and the Rx timestamp. If it's larger than 2^31-1 then the Rx
timestamp must have been captured in the past. If it's smaller, then the
Rx timestamp must have been captured after PHC time.
Add an ice_ptp_extend_32b_ts function that relies on a cached copy of
the PHC time and implements this algorithm to calculate the proper upper
32bits of the Rx timestamps.
Cache the PHC time periodically in all of the Rx rings. This enables
each Rx ring to simply call the extension function with a recent copy of
the PHC time. By ensuring that the PHC time is kept up to date
periodically, we ensure this algorithm doesn't use stale data and
produce incorrect results.
To cache the time, introduce a kworker and a kwork item to periodically
store the Rx time. It might seem like we should use the .do_aux_work
interface of the PTP clock. This doesn't work because all PFs must cache
this time, but only one PF owns the PTP clock device.
Thus, the ice driver will manage its own kthread instead of relying on
the PTP do_aux_work handler.
With this change, the driver can now report Rx timestamps on all
incoming packets.
Signed-off-by: Jacob Keller <jacob.e.keller@intel.com>
Tested-by: Tony Brelinski <tonyx.brelinski@intel.com>
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
2021-06-09 09:39:52 -07:00
|
|
|
ice_write_qrxflxp_cntxt(hw, pf_q, ICE_RXDID_LEGACY_1, 0x3,
|
|
|
|
false);
|
2019-10-24 01:11:17 -07:00
|
|
|
|
|
|
|
/* Absolute queue number out of 2K needs to be passed */
|
|
|
|
err = ice_write_rxq_ctx(hw, &rlan_ctx, pf_q);
|
|
|
|
if (err) {
|
2020-11-20 16:39:32 -08:00
|
|
|
dev_err(ice_pf_to_dev(vsi->back), "Failed to set LAN Rx queue context for absolute Rx queue %d error: %d\n",
|
2019-10-24 01:11:17 -07:00
|
|
|
pf_q, err);
|
|
|
|
return -EIO;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (vsi->type == ICE_VSI_VF)
|
|
|
|
return 0;
|
|
|
|
|
2019-10-24 01:11:23 -07:00
|
|
|
/* configure Rx buffer alignment */
|
|
|
|
if (!vsi->netdev || test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags))
|
|
|
|
ice_clear_ring_build_skb_ena(ring);
|
|
|
|
else
|
|
|
|
ice_set_ring_build_skb_ena(ring);
|
|
|
|
|
2021-03-03 16:39:27 +01:00
|
|
|
ring->rx_offset = ice_rx_offset(ring);
|
|
|
|
|
2019-10-24 01:11:17 -07:00
|
|
|
/* init queue specific tail register */
|
|
|
|
ring->tail = hw->hw_addr + QRX_TAIL(pf_q);
|
|
|
|
writel(0, ring->tail);
|
2019-11-04 09:38:56 -08:00
|
|
|
|
2020-11-20 16:39:32 -08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2023-12-05 22:08:37 +01:00
|
|
|
static void ice_xsk_pool_fill_cb(struct ice_rx_ring *ring)
|
|
|
|
{
|
|
|
|
void *ctx_ptr = &ring->pkt_ctx;
|
|
|
|
struct xsk_cb_desc desc = {};
|
|
|
|
|
|
|
|
XSK_CHECK_PRIV_TYPE(struct ice_xdp_buff);
|
|
|
|
desc.src = &ctx_ptr;
|
|
|
|
desc.off = offsetof(struct ice_xdp_buff, pkt_ctx) -
|
|
|
|
sizeof(struct xdp_buff);
|
|
|
|
desc.bytes = sizeof(ctx_ptr);
|
|
|
|
xsk_pool_fill_cb(ring->xsk_pool, &desc);
|
|
|
|
}
|
|
|
|
|
2020-11-20 16:39:32 -08:00
|
|
|
/**
|
|
|
|
* ice_vsi_cfg_rxq - Configure an Rx queue
|
|
|
|
* @ring: the ring being configured
|
|
|
|
*
|
|
|
|
* Return 0 on success and a negative value on error.
|
|
|
|
*/
|
2024-01-23 12:58:45 +01:00
|
|
|
static int ice_vsi_cfg_rxq(struct ice_rx_ring *ring)
|
2020-11-20 16:39:32 -08:00
|
|
|
{
|
|
|
|
struct device *dev = ice_pf_to_dev(ring->vsi->back);
|
ice: Add support for XDP multi-buffer on Rx side
Ice driver needs to be a bit reworked on Rx data path in order to
support multi-buffer XDP. For skb path, it currently works in a way that
Rx ring carries pointer to skb so if driver didn't manage to combine
fragmented frame at current NAPI instance, it can restore the state on
next instance and keep looking for last fragment (so descriptor with EOP
bit set). What needs to be achieved is that xdp_buff needs to be
combined in such way (linear + frags part) in the first place. Then skb
will be ready to go in case of XDP_PASS or BPF program being not present
on interface. If BPF program is there, it would work on multi-buffer
XDP. At this point xdp_buff resides directly on Rx ring, so given the
fact that skb will be built straight from xdp_buff, there will be no
further need to carry skb on Rx ring.
Besides removing skb pointer from Rx ring, lots of members have been
moved around within ice_rx_ring. First and foremost reason was to place
rx_buf with xdp_buff on the same cacheline. This means that once we
touch rx_buf (which is a preceding step before touching xdp_buff),
xdp_buff will already be hot in cache. Second thing was that xdp_rxq is
used rather rarely and it occupies a separate cacheline, so maybe it is
better to have it at the end of ice_rx_ring.
Other change that affects ice_rx_ring is the introduction of
ice_rx_ring::first_desc. Its purpose is twofold - first is to propagate
rx_buf->act to all the parts of current xdp_buff after running XDP
program, so that ice_put_rx_buf() that got moved out of the main Rx
processing loop will be able to tak an appriopriate action on each
buffer. Second is for ice_construct_skb().
ice_construct_skb() has a copybreak mechanism which had an explicit
impact on xdp_buff->skb conversion in the new approach when legacy Rx
flag is toggled. It works in a way that linear part is 256 bytes long,
if frame is bigger than that, remaining bytes are going as a frag to
skb_shared_info.
This means while memcpying frags from xdp_buff to newly allocated skb,
care needs to be taken when picking the destination frag array entry.
Upon the time ice_construct_skb() is called, when dealing with
fragmented frame, current rx_buf points to the *last* fragment, but
copybreak needs to be done against the first one. That's where
ice_rx_ring::first_desc helps.
When frame building spans across NAPI polls (DD bit is not set on
current descriptor and xdp->data is not NULL) with current Rx buffer
handling state there might be some problems.
Since calls to ice_put_rx_buf() were pulled out of the main Rx
processing loop and were scoped from cached_ntc to current ntc, remember
that now mentioned function relies on rx_buf->act, which is set within
ice_run_xdp(). ice_run_xdp() is called when EOP bit was found, so
currently we could put Rx buffer with rx_buf->act being *uninitialized*.
To address this, change scoping to rely on first_desc on both boundaries
instead.
This also implies that cleaned_count which is used as an input to
ice_alloc_rx_buffers() and tells how many new buffers should be refilled
has to be adjusted. If it stayed as is, what could happen is a case
where ntc would go over ntu.
Therefore, remove cleaned_count altogether and use against allocing
routine newly introduced ICE_RX_DESC_UNUSED() macro which is an
equivalent of ICE_DESC_UNUSED() dedicated for Rx side and based on
struct ice_rx_ring::first_desc instead of next_to_clean.
Signed-off-by: Maciej Fijalkowski <maciej.fijalkowski@intel.com>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Reviewed-by: Alexander Lobakin <alexandr.lobakin@intel.com>
Link: https://lore.kernel.org/bpf/20230131204506.219292-11-maciej.fijalkowski@intel.com
2023-01-31 21:45:03 +01:00
|
|
|
u32 num_bufs = ICE_RX_DESC_UNUSED(ring);
|
2020-11-20 16:39:32 -08:00
|
|
|
int err;
|
|
|
|
|
|
|
|
ring->rx_buf_len = ring->vsi->rx_buf_len;
|
|
|
|
|
|
|
|
if (ring->vsi->type == ICE_VSI_PF) {
|
2024-01-24 20:15:59 +01:00
|
|
|
if (!xdp_rxq_info_is_reg(&ring->xdp_rxq)) {
|
|
|
|
err = __xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev,
|
|
|
|
ring->q_index,
|
|
|
|
ring->q_vector->napi.napi_id,
|
|
|
|
ring->rx_buf_len);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
}
|
2020-11-20 16:39:32 -08:00
|
|
|
|
|
|
|
ring->xsk_pool = ice_xsk_pool(ring);
|
|
|
|
if (ring->xsk_pool) {
|
2024-01-24 20:15:59 +01:00
|
|
|
xdp_rxq_info_unreg(&ring->xdp_rxq);
|
2020-11-20 16:39:32 -08:00
|
|
|
|
|
|
|
ring->rx_buf_len =
|
|
|
|
xsk_pool_get_rx_frame_size(ring->xsk_pool);
|
2024-01-24 20:15:59 +01:00
|
|
|
err = __xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev,
|
|
|
|
ring->q_index,
|
|
|
|
ring->q_vector->napi.napi_id,
|
|
|
|
ring->rx_buf_len);
|
|
|
|
if (err)
|
|
|
|
return err;
|
2020-11-20 16:39:32 -08:00
|
|
|
err = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
|
|
|
|
MEM_TYPE_XSK_BUFF_POOL,
|
|
|
|
NULL);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
xsk_pool_set_rxq_info(ring->xsk_pool, &ring->xdp_rxq);
|
2023-12-05 22:08:37 +01:00
|
|
|
ice_xsk_pool_fill_cb(ring);
|
2020-11-20 16:39:32 -08:00
|
|
|
|
|
|
|
dev_info(dev, "Registered XDP mem model MEM_TYPE_XSK_BUFF_POOL on Rx ring %d\n",
|
|
|
|
ring->q_index);
|
|
|
|
} else {
|
2024-01-24 20:15:59 +01:00
|
|
|
if (!xdp_rxq_info_is_reg(&ring->xdp_rxq)) {
|
|
|
|
err = __xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev,
|
|
|
|
ring->q_index,
|
|
|
|
ring->q_vector->napi.napi_id,
|
|
|
|
ring->rx_buf_len);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
}
|
2020-11-20 16:39:32 -08:00
|
|
|
|
|
|
|
err = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
|
|
|
|
MEM_TYPE_PAGE_SHARED,
|
|
|
|
NULL);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-01-31 21:44:55 +01:00
|
|
|
xdp_init_buff(&ring->xdp, ice_rx_pg_size(ring) / 2, &ring->xdp_rxq);
|
ice: Add support for XDP multi-buffer on Rx side
Ice driver needs to be a bit reworked on Rx data path in order to
support multi-buffer XDP. For skb path, it currently works in a way that
Rx ring carries pointer to skb so if driver didn't manage to combine
fragmented frame at current NAPI instance, it can restore the state on
next instance and keep looking for last fragment (so descriptor with EOP
bit set). What needs to be achieved is that xdp_buff needs to be
combined in such way (linear + frags part) in the first place. Then skb
will be ready to go in case of XDP_PASS or BPF program being not present
on interface. If BPF program is there, it would work on multi-buffer
XDP. At this point xdp_buff resides directly on Rx ring, so given the
fact that skb will be built straight from xdp_buff, there will be no
further need to carry skb on Rx ring.
Besides removing skb pointer from Rx ring, lots of members have been
moved around within ice_rx_ring. First and foremost reason was to place
rx_buf with xdp_buff on the same cacheline. This means that once we
touch rx_buf (which is a preceding step before touching xdp_buff),
xdp_buff will already be hot in cache. Second thing was that xdp_rxq is
used rather rarely and it occupies a separate cacheline, so maybe it is
better to have it at the end of ice_rx_ring.
Other change that affects ice_rx_ring is the introduction of
ice_rx_ring::first_desc. Its purpose is twofold - first is to propagate
rx_buf->act to all the parts of current xdp_buff after running XDP
program, so that ice_put_rx_buf() that got moved out of the main Rx
processing loop will be able to tak an appriopriate action on each
buffer. Second is for ice_construct_skb().
ice_construct_skb() has a copybreak mechanism which had an explicit
impact on xdp_buff->skb conversion in the new approach when legacy Rx
flag is toggled. It works in a way that linear part is 256 bytes long,
if frame is bigger than that, remaining bytes are going as a frag to
skb_shared_info.
This means while memcpying frags from xdp_buff to newly allocated skb,
care needs to be taken when picking the destination frag array entry.
Upon the time ice_construct_skb() is called, when dealing with
fragmented frame, current rx_buf points to the *last* fragment, but
copybreak needs to be done against the first one. That's where
ice_rx_ring::first_desc helps.
When frame building spans across NAPI polls (DD bit is not set on
current descriptor and xdp->data is not NULL) with current Rx buffer
handling state there might be some problems.
Since calls to ice_put_rx_buf() were pulled out of the main Rx
processing loop and were scoped from cached_ntc to current ntc, remember
that now mentioned function relies on rx_buf->act, which is set within
ice_run_xdp(). ice_run_xdp() is called when EOP bit was found, so
currently we could put Rx buffer with rx_buf->act being *uninitialized*.
To address this, change scoping to rely on first_desc on both boundaries
instead.
This also implies that cleaned_count which is used as an input to
ice_alloc_rx_buffers() and tells how many new buffers should be refilled
has to be adjusted. If it stayed as is, what could happen is a case
where ntc would go over ntu.
Therefore, remove cleaned_count altogether and use against allocing
routine newly introduced ICE_RX_DESC_UNUSED() macro which is an
equivalent of ICE_DESC_UNUSED() dedicated for Rx side and based on
struct ice_rx_ring::first_desc instead of next_to_clean.
Signed-off-by: Maciej Fijalkowski <maciej.fijalkowski@intel.com>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Reviewed-by: Alexander Lobakin <alexandr.lobakin@intel.com>
Link: https://lore.kernel.org/bpf/20230131204506.219292-11-maciej.fijalkowski@intel.com
2023-01-31 21:45:03 +01:00
|
|
|
ring->xdp.data = NULL;
|
2023-12-05 22:08:34 +01:00
|
|
|
ring->xdp_ext.pkt_ctx = &ring->pkt_ctx;
|
2020-11-20 16:39:32 -08:00
|
|
|
err = ice_setup_rx_ctx(ring);
|
|
|
|
if (err) {
|
|
|
|
dev_err(dev, "ice_setup_rx_ctx failed for RxQ %d, err %d\n",
|
|
|
|
ring->q_index, err);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2020-08-28 10:26:15 +02:00
|
|
|
if (ring->xsk_pool) {
|
2021-02-05 10:09:04 +01:00
|
|
|
bool ok;
|
|
|
|
|
2020-08-28 10:26:16 +02:00
|
|
|
if (!xsk_buff_can_alloc(ring->xsk_pool, num_bufs)) {
|
2020-08-28 10:26:15 +02:00
|
|
|
dev_warn(dev, "XSK buffer pool does not provide enough addresses to fill %d buffers on Rx ring %d\n",
|
2020-05-15 17:42:20 -07:00
|
|
|
num_bufs, ring->q_index);
|
|
|
|
dev_warn(dev, "Change Rx ring/fill queue size to avoid performance issues\n");
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2021-02-05 10:09:04 +01:00
|
|
|
ok = ice_alloc_rx_bufs_zc(ring, num_bufs);
|
2020-11-20 16:39:32 -08:00
|
|
|
if (!ok) {
|
|
|
|
u16 pf_q = ring->vsi->rxq_map[ring->q_index];
|
|
|
|
|
2020-08-28 10:26:15 +02:00
|
|
|
dev_info(dev, "Failed to allocate some buffers on XSK buffer pool enabled Rx ring %d (pf_q %d)\n",
|
2020-05-15 17:42:20 -07:00
|
|
|
ring->q_index, pf_q);
|
2020-11-20 16:39:32 -08:00
|
|
|
}
|
|
|
|
|
2020-05-15 17:42:20 -07:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
ice_alloc_rx_bufs(ring, num_bufs);
|
2019-10-24 01:11:17 -07:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2024-01-23 12:58:45 +01:00
|
|
|
int ice_vsi_cfg_single_rxq(struct ice_vsi *vsi, u16 q_idx)
|
|
|
|
{
|
|
|
|
if (q_idx >= vsi->num_rxq)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
return ice_vsi_cfg_rxq(vsi->rx_rings[q_idx]);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* ice_vsi_cfg_frame_size - setup max frame size and Rx buffer length
|
|
|
|
* @vsi: VSI
|
|
|
|
*/
|
|
|
|
static void ice_vsi_cfg_frame_size(struct ice_vsi *vsi)
|
|
|
|
{
|
|
|
|
if (!vsi->netdev || test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags)) {
|
|
|
|
vsi->max_frame = ICE_MAX_FRAME_LEGACY_RX;
|
|
|
|
vsi->rx_buf_len = ICE_RXBUF_1664;
|
|
|
|
#if (PAGE_SIZE < 8192)
|
|
|
|
} else if (!ICE_2K_TOO_SMALL_WITH_PADDING &&
|
|
|
|
(vsi->netdev->mtu <= ETH_DATA_LEN)) {
|
|
|
|
vsi->max_frame = ICE_RXBUF_1536 - NET_IP_ALIGN;
|
|
|
|
vsi->rx_buf_len = ICE_RXBUF_1536 - NET_IP_ALIGN;
|
|
|
|
#endif
|
|
|
|
} else {
|
|
|
|
vsi->max_frame = ICE_AQ_SET_MAC_FRAME_SIZE_MAX;
|
|
|
|
vsi->rx_buf_len = ICE_RXBUF_3072;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* ice_vsi_cfg_rxqs - Configure the VSI for Rx
|
|
|
|
* @vsi: the VSI being configured
|
|
|
|
*
|
|
|
|
* Return 0 on success and a negative value on error
|
|
|
|
* Configure the Rx VSI for operation.
|
|
|
|
*/
|
|
|
|
int ice_vsi_cfg_rxqs(struct ice_vsi *vsi)
|
|
|
|
{
|
|
|
|
u16 i;
|
|
|
|
|
|
|
|
if (vsi->type == ICE_VSI_VF)
|
|
|
|
goto setup_rings;
|
|
|
|
|
|
|
|
ice_vsi_cfg_frame_size(vsi);
|
|
|
|
setup_rings:
|
|
|
|
/* set up individual rings */
|
|
|
|
ice_for_each_rxq(vsi, i) {
|
|
|
|
int err = ice_vsi_cfg_rxq(vsi->rx_rings[i]);
|
|
|
|
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-10-24 01:11:17 -07:00
|
|
|
/**
|
|
|
|
* __ice_vsi_get_qs - helper function for assigning queues from PF to VSI
|
|
|
|
* @qs_cfg: gathered variables needed for pf->vsi queues assignment
|
|
|
|
*
|
|
|
|
* This function first tries to find contiguous space. If it is not successful,
|
|
|
|
* it tries with the scatter approach.
|
|
|
|
*
|
|
|
|
* Return 0 on success and -ENOMEM in case of no left space in PF queue bitmap
|
|
|
|
*/
|
|
|
|
int __ice_vsi_get_qs(struct ice_qs_cfg *qs_cfg)
|
|
|
|
{
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
ret = __ice_vsi_get_qs_contig(qs_cfg);
|
|
|
|
if (ret) {
|
|
|
|
/* contig failed, so try with scatter approach */
|
|
|
|
qs_cfg->mapping_mode = ICE_VSI_MAP_SCATTER;
|
2020-05-07 17:41:05 -07:00
|
|
|
qs_cfg->q_count = min_t(unsigned int, qs_cfg->q_count,
|
2019-10-24 01:11:17 -07:00
|
|
|
qs_cfg->scatter_count);
|
|
|
|
ret = __ice_vsi_get_qs_sc(qs_cfg);
|
|
|
|
}
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2020-01-22 07:21:29 -08:00
|
|
|
* ice_vsi_ctrl_one_rx_ring - start/stop VSI's Rx ring with no busy wait
|
2019-10-24 01:11:17 -07:00
|
|
|
* @vsi: the VSI being configured
|
2020-01-22 07:21:29 -08:00
|
|
|
* @ena: start or stop the Rx ring
|
|
|
|
* @rxq_idx: 0-based Rx queue index for the VSI passed in
|
|
|
|
* @wait: wait or don't wait for configuration to finish in hardware
|
|
|
|
*
|
|
|
|
* Return 0 on success and negative on error.
|
2019-10-24 01:11:17 -07:00
|
|
|
*/
|
2020-01-22 07:21:29 -08:00
|
|
|
int
|
|
|
|
ice_vsi_ctrl_one_rx_ring(struct ice_vsi *vsi, bool ena, u16 rxq_idx, bool wait)
|
2019-10-24 01:11:17 -07:00
|
|
|
{
|
|
|
|
int pf_q = vsi->rxq_map[rxq_idx];
|
|
|
|
struct ice_pf *pf = vsi->back;
|
|
|
|
struct ice_hw *hw = &pf->hw;
|
|
|
|
u32 rx_reg;
|
|
|
|
|
|
|
|
rx_reg = rd32(hw, QRX_CTRL(pf_q));
|
|
|
|
|
|
|
|
/* Skip if the queue is already in the requested state */
|
|
|
|
if (ena == !!(rx_reg & QRX_CTRL_QENA_STAT_M))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
/* turn on/off the queue */
|
|
|
|
if (ena)
|
|
|
|
rx_reg |= QRX_CTRL_QENA_REQ_M;
|
|
|
|
else
|
|
|
|
rx_reg &= ~QRX_CTRL_QENA_REQ_M;
|
|
|
|
wr32(hw, QRX_CTRL(pf_q), rx_reg);
|
|
|
|
|
2020-01-22 07:21:29 -08:00
|
|
|
if (!wait)
|
|
|
|
return 0;
|
2019-10-24 01:11:17 -07:00
|
|
|
|
2020-01-22 07:21:29 -08:00
|
|
|
ice_flush(hw);
|
|
|
|
return ice_pf_rxq_wait(pf, pf_q, ena);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* ice_vsi_wait_one_rx_ring - wait for a VSI's Rx ring to be stopped/started
|
|
|
|
* @vsi: the VSI being configured
|
|
|
|
* @ena: true/false to verify Rx ring has been enabled/disabled respectively
|
|
|
|
* @rxq_idx: 0-based Rx queue index for the VSI passed in
|
|
|
|
*
|
|
|
|
* This routine will wait for the given Rx queue of the VSI to reach the
|
|
|
|
* enabled or disabled state. Returns -ETIMEDOUT in case of failing to reach
|
|
|
|
* the requested state after multiple retries; else will return 0 in case of
|
|
|
|
* success.
|
|
|
|
*/
|
|
|
|
int ice_vsi_wait_one_rx_ring(struct ice_vsi *vsi, bool ena, u16 rxq_idx)
|
|
|
|
{
|
|
|
|
int pf_q = vsi->rxq_map[rxq_idx];
|
|
|
|
struct ice_pf *pf = vsi->back;
|
|
|
|
|
|
|
|
return ice_pf_rxq_wait(pf, pf_q, ena);
|
2019-10-24 01:11:17 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* ice_vsi_alloc_q_vectors - Allocate memory for interrupt vectors
|
|
|
|
* @vsi: the VSI being configured
|
|
|
|
*
|
|
|
|
* We allocate one q_vector per queue interrupt. If allocation fails we
|
|
|
|
* return -ENOMEM.
|
|
|
|
*/
|
|
|
|
int ice_vsi_alloc_q_vectors(struct ice_vsi *vsi)
|
|
|
|
{
|
2020-02-06 01:20:11 -08:00
|
|
|
struct device *dev = ice_pf_to_dev(vsi->back);
|
2020-05-07 17:41:05 -07:00
|
|
|
u16 v_idx;
|
|
|
|
int err;
|
2019-10-24 01:11:17 -07:00
|
|
|
|
|
|
|
if (vsi->q_vectors[0]) {
|
2019-11-08 06:23:26 -08:00
|
|
|
dev_dbg(dev, "VSI %d has existing q_vectors\n", vsi->vsi_num);
|
2019-10-24 01:11:17 -07:00
|
|
|
return -EEXIST;
|
|
|
|
}
|
|
|
|
|
2020-02-06 01:20:11 -08:00
|
|
|
for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++) {
|
2019-10-24 01:11:17 -07:00
|
|
|
err = ice_vsi_alloc_q_vector(vsi, v_idx);
|
|
|
|
if (err)
|
|
|
|
goto err_out;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
err_out:
|
|
|
|
while (v_idx--)
|
|
|
|
ice_free_q_vector(vsi, v_idx);
|
|
|
|
|
2019-11-08 06:23:26 -08:00
|
|
|
dev_err(dev, "Failed to allocate %d q_vector for VSI %d, ret=%d\n",
|
2019-10-24 01:11:17 -07:00
|
|
|
vsi->num_q_vectors, vsi->vsi_num, err);
|
|
|
|
vsi->num_q_vectors = 0;
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* ice_vsi_map_rings_to_vectors - Map VSI rings to interrupt vectors
|
|
|
|
* @vsi: the VSI being configured
|
|
|
|
*
|
|
|
|
* This function maps descriptor rings to the queue-specific vectors allotted
|
|
|
|
* through the MSI-X enabling code. On a constrained vector budget, we map Tx
|
|
|
|
* and Rx rings to the vector as "efficiently" as possible.
|
|
|
|
*/
|
|
|
|
void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi)
|
|
|
|
{
|
|
|
|
int q_vectors = vsi->num_q_vectors;
|
2020-05-07 17:41:05 -07:00
|
|
|
u16 tx_rings_rem, rx_rings_rem;
|
2019-10-24 01:11:17 -07:00
|
|
|
int v_id;
|
|
|
|
|
|
|
|
/* initially assigning remaining rings count to VSIs num queue value */
|
|
|
|
tx_rings_rem = vsi->num_txq;
|
|
|
|
rx_rings_rem = vsi->num_rxq;
|
|
|
|
|
|
|
|
for (v_id = 0; v_id < q_vectors; v_id++) {
|
|
|
|
struct ice_q_vector *q_vector = vsi->q_vectors[v_id];
|
2020-05-07 17:41:05 -07:00
|
|
|
u8 tx_rings_per_v, rx_rings_per_v;
|
|
|
|
u16 q_id, q_base;
|
2019-10-24 01:11:17 -07:00
|
|
|
|
|
|
|
/* Tx rings mapping to vector */
|
2020-05-07 17:41:05 -07:00
|
|
|
tx_rings_per_v = (u8)DIV_ROUND_UP(tx_rings_rem,
|
|
|
|
q_vectors - v_id);
|
2019-10-24 01:11:17 -07:00
|
|
|
q_vector->num_ring_tx = tx_rings_per_v;
|
2021-08-19 13:59:58 +02:00
|
|
|
q_vector->tx.tx_ring = NULL;
|
2019-10-24 01:11:17 -07:00
|
|
|
q_vector->tx.itr_idx = ICE_TX_ITR;
|
|
|
|
q_base = vsi->num_txq - tx_rings_rem;
|
|
|
|
|
|
|
|
for (q_id = q_base; q_id < (q_base + tx_rings_per_v); q_id++) {
|
2021-08-19 13:59:58 +02:00
|
|
|
struct ice_tx_ring *tx_ring = vsi->tx_rings[q_id];
|
2019-10-24 01:11:17 -07:00
|
|
|
|
|
|
|
tx_ring->q_vector = q_vector;
|
2021-08-19 13:59:58 +02:00
|
|
|
tx_ring->next = q_vector->tx.tx_ring;
|
|
|
|
q_vector->tx.tx_ring = tx_ring;
|
2019-10-24 01:11:17 -07:00
|
|
|
}
|
|
|
|
tx_rings_rem -= tx_rings_per_v;
|
|
|
|
|
|
|
|
/* Rx rings mapping to vector */
|
2020-05-07 17:41:05 -07:00
|
|
|
rx_rings_per_v = (u8)DIV_ROUND_UP(rx_rings_rem,
|
|
|
|
q_vectors - v_id);
|
2019-10-24 01:11:17 -07:00
|
|
|
q_vector->num_ring_rx = rx_rings_per_v;
|
2021-08-19 13:59:58 +02:00
|
|
|
q_vector->rx.rx_ring = NULL;
|
2019-10-24 01:11:17 -07:00
|
|
|
q_vector->rx.itr_idx = ICE_RX_ITR;
|
|
|
|
q_base = vsi->num_rxq - rx_rings_rem;
|
|
|
|
|
|
|
|
for (q_id = q_base; q_id < (q_base + rx_rings_per_v); q_id++) {
|
2021-08-19 13:59:58 +02:00
|
|
|
struct ice_rx_ring *rx_ring = vsi->rx_rings[q_id];
|
2019-10-24 01:11:17 -07:00
|
|
|
|
|
|
|
rx_ring->q_vector = q_vector;
|
2021-08-19 13:59:58 +02:00
|
|
|
rx_ring->next = q_vector->rx.rx_ring;
|
|
|
|
q_vector->rx.rx_ring = rx_ring;
|
2019-10-24 01:11:17 -07:00
|
|
|
}
|
|
|
|
rx_rings_rem -= rx_rings_per_v;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* ice_vsi_free_q_vectors - Free memory allocated for interrupt vectors
|
|
|
|
* @vsi: the VSI having memory freed
|
|
|
|
*/
|
|
|
|
void ice_vsi_free_q_vectors(struct ice_vsi *vsi)
|
|
|
|
{
|
|
|
|
int v_idx;
|
|
|
|
|
|
|
|
ice_for_each_q_vector(vsi, v_idx)
|
|
|
|
ice_free_q_vector(vsi, v_idx);
|
2023-07-06 08:25:51 +02:00
|
|
|
|
|
|
|
vsi->num_q_vectors = 0;
|
2019-10-24 01:11:17 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* ice_vsi_cfg_txq - Configure single Tx queue
|
|
|
|
* @vsi: the VSI that queue belongs to
|
|
|
|
* @ring: Tx ring to be configured
|
|
|
|
* @qg_buf: queue group buffer
|
|
|
|
*/
|
2024-01-23 12:58:46 +01:00
|
|
|
static int
|
2021-08-19 13:59:58 +02:00
|
|
|
ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_tx_ring *ring,
|
2019-10-24 01:11:18 -07:00
|
|
|
struct ice_aqc_add_tx_qgrp *qg_buf)
|
2019-10-24 01:11:17 -07:00
|
|
|
{
|
2020-06-29 17:27:46 -07:00
|
|
|
u8 buf_len = struct_size(qg_buf, txqs, 1);
|
2019-10-24 01:11:17 -07:00
|
|
|
struct ice_tlan_ctx tlan_ctx = { 0 };
|
|
|
|
struct ice_aqc_add_txqs_perq *txq;
|
2021-10-15 16:35:15 -07:00
|
|
|
struct ice_channel *ch = ring->ch;
|
2019-10-24 01:11:17 -07:00
|
|
|
struct ice_pf *pf = vsi->back;
|
2020-05-15 17:42:18 -07:00
|
|
|
struct ice_hw *hw = &pf->hw;
|
2021-10-07 15:56:57 -07:00
|
|
|
int status;
|
2019-10-24 01:11:17 -07:00
|
|
|
u16 pf_q;
|
2019-10-24 01:11:18 -07:00
|
|
|
u8 tc;
|
2019-10-24 01:11:17 -07:00
|
|
|
|
2021-03-02 10:12:02 -08:00
|
|
|
/* Configure XPS */
|
|
|
|
ice_cfg_xps_tx_ring(ring);
|
|
|
|
|
2019-10-24 01:11:17 -07:00
|
|
|
pf_q = ring->reg_idx;
|
|
|
|
ice_setup_tx_ctx(ring, &tlan_ctx, pf_q);
|
|
|
|
/* copy context contents into the qg_buf */
|
|
|
|
qg_buf->txqs[0].txq_id = cpu_to_le16(pf_q);
|
2020-05-15 17:42:18 -07:00
|
|
|
ice_set_ctx(hw, (u8 *)&tlan_ctx, qg_buf->txqs[0].txq_ctx,
|
2019-10-24 01:11:17 -07:00
|
|
|
ice_tlan_ctx_info);
|
|
|
|
|
|
|
|
/* init queue specific tail reg. It is referred as
|
|
|
|
* transmit comm scheduler queue doorbell.
|
|
|
|
*/
|
2020-05-15 17:42:18 -07:00
|
|
|
ring->tail = hw->hw_addr + QTX_COMM_DBELL(pf_q);
|
2019-10-24 01:11:17 -07:00
|
|
|
|
2019-10-24 01:11:18 -07:00
|
|
|
if (IS_ENABLED(CONFIG_DCB))
|
|
|
|
tc = ring->dcb_tc;
|
|
|
|
else
|
|
|
|
tc = 0;
|
|
|
|
|
2019-10-24 01:11:17 -07:00
|
|
|
/* Add unique software queue handle of the Tx queue per
|
|
|
|
* TC into the VSI Tx ring
|
|
|
|
*/
|
2024-03-01 12:54:11 +01:00
|
|
|
ring->q_handle = ice_calc_txq_handle(vsi, ring, tc);
|
2019-10-24 01:11:17 -07:00
|
|
|
|
2021-10-15 16:35:15 -07:00
|
|
|
if (ch)
|
|
|
|
status = ice_ena_vsi_txq(vsi->port_info, ch->ch_vsi->idx, 0,
|
|
|
|
ring->q_handle, 1, qg_buf, buf_len,
|
|
|
|
NULL);
|
|
|
|
else
|
|
|
|
status = ice_ena_vsi_txq(vsi->port_info, vsi->idx, tc,
|
|
|
|
ring->q_handle, 1, qg_buf, buf_len,
|
|
|
|
NULL);
|
2019-10-24 01:11:17 -07:00
|
|
|
if (status) {
|
2021-10-07 15:56:02 -07:00
|
|
|
dev_err(ice_pf_to_dev(pf), "Failed to set LAN Tx queue context, error: %d\n",
|
|
|
|
status);
|
2021-10-07 16:01:58 -07:00
|
|
|
return status;
|
2019-10-24 01:11:17 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Add Tx Queue TEID into the VSI Tx ring from the
|
|
|
|
* response. This will complete configuring and
|
|
|
|
* enabling the queue.
|
|
|
|
*/
|
|
|
|
txq = &qg_buf->txqs[0];
|
|
|
|
if (pf_q == le16_to_cpu(txq->txq_id))
|
|
|
|
ring->txq_teid = le32_to_cpu(txq->q_teid);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2024-01-23 12:58:46 +01:00
|
|
|
int ice_vsi_cfg_single_txq(struct ice_vsi *vsi, struct ice_tx_ring **tx_rings,
|
|
|
|
u16 q_idx)
|
|
|
|
{
|
2024-03-06 15:51:36 -08:00
|
|
|
DEFINE_RAW_FLEX(struct ice_aqc_add_tx_qgrp, qg_buf, txqs, 1);
|
2024-01-23 12:58:46 +01:00
|
|
|
|
|
|
|
if (q_idx >= vsi->alloc_txq || !tx_rings || !tx_rings[q_idx])
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
qg_buf->num_txqs = 1;
|
|
|
|
|
|
|
|
return ice_vsi_cfg_txq(vsi, tx_rings[q_idx], qg_buf);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* ice_vsi_cfg_txqs - Configure the VSI for Tx
|
|
|
|
* @vsi: the VSI being configured
|
|
|
|
* @rings: Tx ring array to be configured
|
|
|
|
* @count: number of Tx ring array elements
|
|
|
|
*
|
|
|
|
* Return 0 on success and a negative value on error
|
|
|
|
* Configure the Tx VSI for operation.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
ice_vsi_cfg_txqs(struct ice_vsi *vsi, struct ice_tx_ring **rings, u16 count)
|
|
|
|
{
|
2024-03-06 15:51:36 -08:00
|
|
|
DEFINE_RAW_FLEX(struct ice_aqc_add_tx_qgrp, qg_buf, txqs, 1);
|
2024-01-23 12:58:46 +01:00
|
|
|
int err = 0;
|
|
|
|
u16 q_idx;
|
|
|
|
|
|
|
|
qg_buf->num_txqs = 1;
|
|
|
|
|
|
|
|
for (q_idx = 0; q_idx < count; q_idx++) {
|
|
|
|
err = ice_vsi_cfg_txq(vsi, rings[q_idx], qg_buf);
|
|
|
|
if (err)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* ice_vsi_cfg_lan_txqs - Configure the VSI for Tx
|
|
|
|
* @vsi: the VSI being configured
|
|
|
|
*
|
|
|
|
* Return 0 on success and a negative value on error
|
|
|
|
* Configure the Tx VSI for operation.
|
|
|
|
*/
|
|
|
|
int ice_vsi_cfg_lan_txqs(struct ice_vsi *vsi)
|
|
|
|
{
|
|
|
|
return ice_vsi_cfg_txqs(vsi, vsi->tx_rings, vsi->num_txq);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* ice_vsi_cfg_xdp_txqs - Configure Tx queues dedicated for XDP in given VSI
|
|
|
|
* @vsi: the VSI being configured
|
|
|
|
*
|
|
|
|
* Return 0 on success and a negative value on error
|
|
|
|
* Configure the Tx queues dedicated for XDP in given VSI for operation.
|
|
|
|
*/
|
|
|
|
int ice_vsi_cfg_xdp_txqs(struct ice_vsi *vsi)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
ret = ice_vsi_cfg_txqs(vsi, vsi->xdp_rings, vsi->num_xdp_txq);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
ice_for_each_rxq(vsi, i)
|
|
|
|
ice_tx_xsk_pool(vsi, i);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-10-24 01:11:17 -07:00
|
|
|
/**
|
|
|
|
* ice_cfg_itr - configure the initial interrupt throttle values
|
|
|
|
* @hw: pointer to the HW structure
|
|
|
|
* @q_vector: interrupt vector that's being configured
|
|
|
|
*
|
|
|
|
* Configure interrupt throttling values for the ring containers that are
|
|
|
|
* associated with the interrupt vector passed in.
|
|
|
|
*/
|
|
|
|
void ice_cfg_itr(struct ice_hw *hw, struct ice_q_vector *q_vector)
|
|
|
|
{
|
|
|
|
ice_cfg_itr_gran(hw);
|
|
|
|
|
2021-03-31 14:16:56 -07:00
|
|
|
if (q_vector->num_ring_rx)
|
|
|
|
ice_write_itr(&q_vector->rx, q_vector->rx.itr_setting);
|
2019-10-24 01:11:17 -07:00
|
|
|
|
2021-03-31 14:16:56 -07:00
|
|
|
if (q_vector->num_ring_tx)
|
|
|
|
ice_write_itr(&q_vector->tx, q_vector->tx.itr_setting);
|
2019-10-24 01:11:17 -07:00
|
|
|
|
2021-03-31 14:16:56 -07:00
|
|
|
ice_write_intrl(q_vector, q_vector->intrl);
|
2019-10-24 01:11:17 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* ice_cfg_txq_interrupt - configure interrupt on Tx queue
|
|
|
|
* @vsi: the VSI being configured
|
|
|
|
* @txq: Tx queue being mapped to MSI-X vector
|
|
|
|
* @msix_idx: MSI-X vector index within the function
|
|
|
|
* @itr_idx: ITR index of the interrupt cause
|
|
|
|
*
|
|
|
|
* Configure interrupt on Tx queue by associating Tx queue to MSI-X vector
|
|
|
|
* within the function space.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
ice_cfg_txq_interrupt(struct ice_vsi *vsi, u16 txq, u16 msix_idx, u16 itr_idx)
|
|
|
|
{
|
|
|
|
struct ice_pf *pf = vsi->back;
|
|
|
|
struct ice_hw *hw = &pf->hw;
|
|
|
|
u32 val;
|
|
|
|
|
2023-12-05 17:01:05 -08:00
|
|
|
itr_idx = FIELD_PREP(QINT_TQCTL_ITR_INDX_M, itr_idx);
|
2019-10-24 01:11:17 -07:00
|
|
|
|
|
|
|
val = QINT_TQCTL_CAUSE_ENA_M | itr_idx |
|
2023-12-05 17:01:05 -08:00
|
|
|
FIELD_PREP(QINT_TQCTL_MSIX_INDX_M, msix_idx);
|
2019-10-24 01:11:17 -07:00
|
|
|
|
|
|
|
wr32(hw, QINT_TQCTL(vsi->txq_map[txq]), val);
|
2019-11-04 09:38:56 -08:00
|
|
|
if (ice_is_xdp_ena_vsi(vsi)) {
|
|
|
|
u32 xdp_txq = txq + vsi->num_xdp_txq;
|
|
|
|
|
|
|
|
wr32(hw, QINT_TQCTL(vsi->txq_map[xdp_txq]),
|
|
|
|
val);
|
|
|
|
}
|
|
|
|
ice_flush(hw);
|
2019-10-24 01:11:17 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* ice_cfg_rxq_interrupt - configure interrupt on Rx queue
|
|
|
|
* @vsi: the VSI being configured
|
|
|
|
* @rxq: Rx queue being mapped to MSI-X vector
|
|
|
|
* @msix_idx: MSI-X vector index within the function
|
|
|
|
* @itr_idx: ITR index of the interrupt cause
|
|
|
|
*
|
|
|
|
* Configure interrupt on Rx queue by associating Rx queue to MSI-X vector
|
|
|
|
* within the function space.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
ice_cfg_rxq_interrupt(struct ice_vsi *vsi, u16 rxq, u16 msix_idx, u16 itr_idx)
|
|
|
|
{
|
|
|
|
struct ice_pf *pf = vsi->back;
|
|
|
|
struct ice_hw *hw = &pf->hw;
|
|
|
|
u32 val;
|
|
|
|
|
2023-12-05 17:01:05 -08:00
|
|
|
itr_idx = FIELD_PREP(QINT_RQCTL_ITR_INDX_M, itr_idx);
|
2019-10-24 01:11:17 -07:00
|
|
|
|
|
|
|
val = QINT_RQCTL_CAUSE_ENA_M | itr_idx |
|
2023-12-05 17:01:05 -08:00
|
|
|
FIELD_PREP(QINT_RQCTL_MSIX_INDX_M, msix_idx);
|
2019-10-24 01:11:17 -07:00
|
|
|
|
|
|
|
wr32(hw, QINT_RQCTL(vsi->rxq_map[rxq]), val);
|
|
|
|
|
|
|
|
ice_flush(hw);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* ice_trigger_sw_intr - trigger a software interrupt
|
|
|
|
* @hw: pointer to the HW structure
|
|
|
|
* @q_vector: interrupt vector to trigger the software interrupt for
|
|
|
|
*/
|
2023-11-29 02:36:11 -05:00
|
|
|
void ice_trigger_sw_intr(struct ice_hw *hw, const struct ice_q_vector *q_vector)
|
2019-10-24 01:11:17 -07:00
|
|
|
{
|
|
|
|
wr32(hw, GLINT_DYN_CTL(q_vector->reg_idx),
|
|
|
|
(ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S) |
|
|
|
|
GLINT_DYN_CTL_SWINT_TRIG_M |
|
|
|
|
GLINT_DYN_CTL_INTENA_M);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* ice_vsi_stop_tx_ring - Disable single Tx ring
|
|
|
|
* @vsi: the VSI being configured
|
|
|
|
* @rst_src: reset source
|
|
|
|
* @rel_vmvf_num: Relative ID of VF/VM
|
|
|
|
* @ring: Tx ring to be stopped
|
|
|
|
* @txq_meta: Meta data of Tx ring to be stopped
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
ice_vsi_stop_tx_ring(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
|
2021-08-19 13:59:58 +02:00
|
|
|
u16 rel_vmvf_num, struct ice_tx_ring *ring,
|
2019-10-24 01:11:17 -07:00
|
|
|
struct ice_txq_meta *txq_meta)
|
|
|
|
{
|
|
|
|
struct ice_pf *pf = vsi->back;
|
|
|
|
struct ice_q_vector *q_vector;
|
|
|
|
struct ice_hw *hw = &pf->hw;
|
2021-10-07 15:56:57 -07:00
|
|
|
int status;
|
2019-10-24 01:11:17 -07:00
|
|
|
u32 val;
|
|
|
|
|
|
|
|
/* clear cause_ena bit for disabled queues */
|
|
|
|
val = rd32(hw, QINT_TQCTL(ring->reg_idx));
|
|
|
|
val &= ~QINT_TQCTL_CAUSE_ENA_M;
|
|
|
|
wr32(hw, QINT_TQCTL(ring->reg_idx), val);
|
|
|
|
|
|
|
|
/* software is expected to wait for 100 ns */
|
|
|
|
ndelay(100);
|
|
|
|
|
|
|
|
/* trigger a software interrupt for the vector
|
|
|
|
* associated to the queue to schedule NAPI handler
|
|
|
|
*/
|
|
|
|
q_vector = ring->q_vector;
|
2022-10-10 10:22:22 -04:00
|
|
|
if (q_vector && !(vsi->vf && ice_is_vf_disabled(vsi->vf)))
|
2019-10-24 01:11:17 -07:00
|
|
|
ice_trigger_sw_intr(hw, q_vector);
|
|
|
|
|
|
|
|
status = ice_dis_vsi_txq(vsi->port_info, txq_meta->vsi_idx,
|
|
|
|
txq_meta->tc, 1, &txq_meta->q_handle,
|
|
|
|
&txq_meta->q_id, &txq_meta->q_teid, rst_src,
|
|
|
|
rel_vmvf_num, NULL);
|
|
|
|
|
|
|
|
/* if the disable queue command was exercised during an
|
2021-10-07 15:58:01 -07:00
|
|
|
* active reset flow, -EBUSY is returned.
|
2019-10-24 01:11:17 -07:00
|
|
|
* This is not an error as the reset operation disables
|
|
|
|
* queues at the hardware level anyway.
|
|
|
|
*/
|
2021-10-07 15:58:01 -07:00
|
|
|
if (status == -EBUSY) {
|
2020-02-06 01:20:10 -08:00
|
|
|
dev_dbg(ice_pf_to_dev(vsi->back), "Reset in progress. LAN Tx queues already disabled\n");
|
2021-10-07 15:58:01 -07:00
|
|
|
} else if (status == -ENOENT) {
|
2020-02-06 01:20:10 -08:00
|
|
|
dev_dbg(ice_pf_to_dev(vsi->back), "LAN Tx queues do not exist, nothing to disable\n");
|
2019-10-24 01:11:17 -07:00
|
|
|
} else if (status) {
|
2021-10-07 15:56:02 -07:00
|
|
|
dev_dbg(ice_pf_to_dev(vsi->back), "Failed to disable LAN Tx queues, error: %d\n",
|
|
|
|
status);
|
2021-10-07 16:01:58 -07:00
|
|
|
return status;
|
2019-10-24 01:11:17 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* ice_fill_txq_meta - Prepare the Tx queue's meta data
|
|
|
|
* @vsi: VSI that ring belongs to
|
|
|
|
* @ring: ring that txq_meta will be based on
|
|
|
|
* @txq_meta: a helper struct that wraps Tx queue's information
|
|
|
|
*
|
|
|
|
* Set up a helper struct that will contain all the necessary fields that
|
|
|
|
* are needed for stopping Tx queue
|
|
|
|
*/
|
|
|
|
void
|
2023-11-29 02:36:11 -05:00
|
|
|
ice_fill_txq_meta(const struct ice_vsi *vsi, struct ice_tx_ring *ring,
|
2019-10-24 01:11:17 -07:00
|
|
|
struct ice_txq_meta *txq_meta)
|
|
|
|
{
|
2021-10-15 16:35:15 -07:00
|
|
|
struct ice_channel *ch = ring->ch;
|
2019-10-24 01:11:17 -07:00
|
|
|
u8 tc;
|
|
|
|
|
|
|
|
if (IS_ENABLED(CONFIG_DCB))
|
|
|
|
tc = ring->dcb_tc;
|
|
|
|
else
|
|
|
|
tc = 0;
|
|
|
|
|
|
|
|
txq_meta->q_id = ring->reg_idx;
|
|
|
|
txq_meta->q_teid = ring->txq_teid;
|
|
|
|
txq_meta->q_handle = ring->q_handle;
|
2021-10-15 16:35:15 -07:00
|
|
|
if (ch) {
|
|
|
|
txq_meta->vsi_idx = ch->ch_vsi->idx;
|
|
|
|
txq_meta->tc = 0;
|
|
|
|
} else {
|
|
|
|
txq_meta->vsi_idx = vsi->idx;
|
|
|
|
txq_meta->tc = tc;
|
|
|
|
}
|
2019-10-24 01:11:17 -07:00
|
|
|
}
|