2021-06-24 11:06:28 -07:00
|
|
|
/* SPDX-License-Identifier: (GPL-2.0 OR MIT)
|
|
|
|
* Google virtual Ethernet (gve) driver
|
|
|
|
*
|
|
|
|
* Copyright (C) 2015-2021 Google, Inc.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef _GVE_DQO_H_
|
|
|
|
#define _GVE_DQO_H_
|
|
|
|
|
|
|
|
#include "gve_adminq.h"
|
|
|
|
|
|
|
|
#define GVE_ITR_ENABLE_BIT_DQO BIT(0)
|
|
|
|
#define GVE_ITR_CLEAR_PBA_BIT_DQO BIT(1)
|
|
|
|
#define GVE_ITR_NO_UPDATE_DQO (3 << 3)
|
|
|
|
|
2021-06-24 11:06:30 -07:00
|
|
|
#define GVE_ITR_INTERVAL_DQO_SHIFT 5
|
|
|
|
#define GVE_ITR_INTERVAL_DQO_MASK ((1 << 12) - 1)
|
|
|
|
|
2021-06-24 11:06:28 -07:00
|
|
|
#define GVE_TX_IRQ_RATELIMIT_US_DQO 50
|
|
|
|
#define GVE_RX_IRQ_RATELIMIT_US_DQO 20
|
2021-12-15 16:46:52 -08:00
|
|
|
#define GVE_MAX_ITR_INTERVAL_DQO (GVE_ITR_INTERVAL_DQO_MASK * 2)
|
2021-06-24 11:06:28 -07:00
|
|
|
|
2021-06-24 11:06:31 -07:00
|
|
|
/* Timeout in seconds to wait for a reinjection completion after receiving
|
|
|
|
* its corresponding miss completion.
|
|
|
|
*/
|
|
|
|
#define GVE_REINJECT_COMPL_TIMEOUT 1
|
|
|
|
|
|
|
|
/* Timeout in seconds to deallocate the completion tag for a packet that was
|
|
|
|
* prematurely freed for not receiving a valid completion. This should be large
|
|
|
|
* enough to rule out the possibility of receiving the corresponding valid
|
|
|
|
* completion after this interval.
|
|
|
|
*/
|
|
|
|
#define GVE_DEALLOCATE_COMPL_TIMEOUT 60
|
|
|
|
|
2021-06-24 11:06:28 -07:00
|
|
|
netdev_tx_t gve_tx_dqo(struct sk_buff *skb, struct net_device *dev);
|
2023-11-16 08:57:07 +00:00
|
|
|
netdev_features_t gve_features_check_dqo(struct sk_buff *skb,
|
|
|
|
struct net_device *dev,
|
|
|
|
netdev_features_t features);
|
2021-06-24 11:06:28 -07:00
|
|
|
bool gve_tx_poll_dqo(struct gve_notify_block *block, bool do_clean);
|
2025-06-18 20:56:13 +00:00
|
|
|
bool gve_xdp_poll_dqo(struct gve_notify_block *block);
|
gve: implement DQO TX datapath for AF_XDP zero-copy
In the descriptor clean path, a number of changes need to be made to
accommodate out of order completions and double completions.
The XSK stack can only handle completions being processed in order, as a
single counter is incremented in xsk_tx_completed to sigify how many XSK
descriptors have been completed. Because completions can come back out
of order in DQ, a separate queue of XSK descriptors must be maintained.
This queue keeps the pending packets in the order that they were written
so that the descriptors can be counted in xsk_tx_completed in the same
order.
For double completions, a new pending packet state and type are
introduced. The new type, GVE_TX_PENDING_PACKET_DQO_XSK, plays an
anlogous role to pre-existing _SKB and _XDP_FRAME pending packet types
for XSK descriptors. The new state, GVE_PACKET_STATE_XSK_COMPLETE,
represents packets for which no more completions are expected. This
includes packets which have received a packet completion or reinjection
completion, as well as packets whose reinjection completion timer have
timed out. At this point, such packets can be counted as part of
xsk_tx_completed() and freed.
Reviewed-by: Willem de Bruijn <willemb@google.com>
Signed-off-by: Praveen Kaligineedi <pkaligineedi@google.com>
Signed-off-by: Joshua Washington <joshwash@google.com>
Signed-off-by: Jeroen de Borst <jeroendb@google.com>
Link: https://patch.msgid.link/20250717152839.973004-5-jeroendb@google.com
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
2025-07-17 08:28:38 -07:00
|
|
|
bool gve_xsk_tx_poll_dqo(struct gve_notify_block *block, int budget);
|
2021-06-24 11:06:28 -07:00
|
|
|
int gve_rx_poll_dqo(struct gve_notify_block *block, int budget);
|
2024-01-22 18:26:29 +00:00
|
|
|
int gve_tx_alloc_rings_dqo(struct gve_priv *priv,
|
|
|
|
struct gve_tx_alloc_rings_cfg *cfg);
|
|
|
|
void gve_tx_free_rings_dqo(struct gve_priv *priv,
|
|
|
|
struct gve_tx_alloc_rings_cfg *cfg);
|
|
|
|
void gve_tx_start_ring_dqo(struct gve_priv *priv, int idx);
|
|
|
|
void gve_tx_stop_ring_dqo(struct gve_priv *priv, int idx);
|
2024-05-01 23:25:49 +00:00
|
|
|
int gve_rx_alloc_ring_dqo(struct gve_priv *priv,
|
|
|
|
struct gve_rx_alloc_rings_cfg *cfg,
|
|
|
|
struct gve_rx_ring *rx,
|
|
|
|
int idx);
|
|
|
|
void gve_rx_free_ring_dqo(struct gve_priv *priv, struct gve_rx_ring *rx,
|
|
|
|
struct gve_rx_alloc_rings_cfg *cfg);
|
2024-01-22 18:26:29 +00:00
|
|
|
int gve_rx_alloc_rings_dqo(struct gve_priv *priv,
|
|
|
|
struct gve_rx_alloc_rings_cfg *cfg);
|
|
|
|
void gve_rx_free_rings_dqo(struct gve_priv *priv,
|
|
|
|
struct gve_rx_alloc_rings_cfg *cfg);
|
|
|
|
void gve_rx_start_ring_dqo(struct gve_priv *priv, int idx);
|
|
|
|
void gve_rx_stop_ring_dqo(struct gve_priv *priv, int idx);
|
2021-06-24 11:06:29 -07:00
|
|
|
int gve_clean_tx_done_dqo(struct gve_priv *priv, struct gve_tx_ring *tx,
|
|
|
|
struct napi_struct *napi);
|
|
|
|
void gve_rx_post_buffers_dqo(struct gve_rx_ring *rx);
|
|
|
|
void gve_rx_write_doorbell_dqo(const struct gve_priv *priv, int queue_idx);
|
2025-06-18 20:56:13 +00:00
|
|
|
void gve_xdp_tx_flush_dqo(struct gve_priv *priv, u32 xdp_qid);
|
2021-06-24 11:06:29 -07:00
|
|
|
|
|
|
|
static inline void
|
|
|
|
gve_tx_put_doorbell_dqo(const struct gve_priv *priv,
|
|
|
|
const struct gve_queue_resources *q_resources, u32 val)
|
|
|
|
{
|
|
|
|
u64 index;
|
|
|
|
|
|
|
|
index = be32_to_cpu(q_resources->db_index);
|
|
|
|
iowrite32(val, &priv->db_bar2[index]);
|
|
|
|
}
|
2021-06-24 11:06:28 -07:00
|
|
|
|
2021-06-24 11:06:30 -07:00
|
|
|
/* Builds register value to write to DQO IRQ doorbell to enable with specified
|
2021-12-15 16:46:52 -08:00
|
|
|
* ITR interval.
|
2021-06-24 11:06:30 -07:00
|
|
|
*/
|
2021-12-15 16:46:52 -08:00
|
|
|
static inline u32 gve_setup_itr_interval_dqo(u32 interval_us)
|
2021-06-24 11:06:30 -07:00
|
|
|
{
|
|
|
|
u32 result = GVE_ITR_ENABLE_BIT_DQO;
|
|
|
|
|
|
|
|
/* Interval has 2us granularity. */
|
2021-12-15 16:46:52 -08:00
|
|
|
interval_us >>= 1;
|
2021-06-24 11:06:30 -07:00
|
|
|
|
2021-12-15 16:46:52 -08:00
|
|
|
interval_us &= GVE_ITR_INTERVAL_DQO_MASK;
|
|
|
|
result |= (interval_us << GVE_ITR_INTERVAL_DQO_SHIFT);
|
2021-06-24 11:06:30 -07:00
|
|
|
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2021-06-24 11:06:28 -07:00
|
|
|
static inline void
|
|
|
|
gve_write_irq_doorbell_dqo(const struct gve_priv *priv,
|
|
|
|
const struct gve_notify_block *block, u32 val)
|
|
|
|
{
|
2021-12-15 16:46:46 -08:00
|
|
|
u32 index = be32_to_cpu(*block->irq_db_index);
|
2021-06-24 11:06:28 -07:00
|
|
|
|
|
|
|
iowrite32(val, &priv->db_bar2[index]);
|
|
|
|
}
|
|
|
|
|
2021-12-15 16:46:52 -08:00
|
|
|
/* Sets interrupt throttling interval and enables interrupt
|
|
|
|
* by writing to IRQ doorbell.
|
|
|
|
*/
|
|
|
|
static inline void
|
|
|
|
gve_set_itr_coalesce_usecs_dqo(struct gve_priv *priv,
|
|
|
|
struct gve_notify_block *block,
|
|
|
|
u32 usecs)
|
|
|
|
{
|
|
|
|
gve_write_irq_doorbell_dqo(priv, block,
|
|
|
|
gve_setup_itr_interval_dqo(usecs));
|
|
|
|
}
|
2024-01-22 18:26:28 +00:00
|
|
|
|
|
|
|
int gve_napi_poll_dqo(struct napi_struct *napi, int budget);
|
2021-06-24 11:06:28 -07:00
|
|
|
#endif /* _GVE_DQO_H_ */
|