2024-01-01 19:08:45 +00:00
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/*
* Copyright 2015 - 2021 Amazon . com , Inc . or its affiliates . All rights reserved .
*/
# include "ena_xdp.h"
2024-01-01 19:08:49 +00:00
static int validate_xdp_req_id ( struct ena_ring * tx_ring , u16 req_id )
2024-01-01 19:08:45 +00:00
{
struct ena_tx_buffer * tx_info ;
2024-01-01 19:08:49 +00:00
tx_info = & tx_ring - > tx_buffer_info [ req_id ] ;
2024-01-01 19:08:45 +00:00
if ( likely ( tx_info - > xdpf ) )
return 0 ;
2024-01-01 19:08:49 +00:00
return handle_invalid_req_id ( tx_ring , req_id , tx_info , true ) ;
2024-01-01 19:08:45 +00:00
}
2024-01-01 19:08:49 +00:00
static int ena_xdp_tx_map_frame ( struct ena_ring * tx_ring ,
2024-01-01 19:08:45 +00:00
struct ena_tx_buffer * tx_info ,
struct xdp_frame * xdpf ,
struct ena_com_tx_ctx * ena_tx_ctx )
{
2024-01-01 19:08:49 +00:00
struct ena_adapter * adapter = tx_ring - > adapter ;
2024-01-01 19:08:45 +00:00
struct ena_com_buf * ena_buf ;
int push_len = 0 ;
dma_addr_t dma ;
void * data ;
u32 size ;
tx_info - > xdpf = xdpf ;
data = tx_info - > xdpf - > data ;
size = tx_info - > xdpf - > len ;
2024-01-01 19:08:49 +00:00
if ( tx_ring - > tx_mem_queue_type = = ENA_ADMIN_PLACEMENT_POLICY_DEV ) {
2024-01-01 19:08:45 +00:00
/* Designate part of the packet for LLQ */
2024-01-01 19:08:49 +00:00
push_len = min_t ( u32 , size , tx_ring - > tx_max_header_size ) ;
2024-01-01 19:08:45 +00:00
ena_tx_ctx - > push_header = data ;
size - = push_len ;
data + = push_len ;
}
ena_tx_ctx - > header_len = push_len ;
if ( size > 0 ) {
2024-01-01 19:08:49 +00:00
dma = dma_map_single ( tx_ring - > dev ,
2024-01-01 19:08:45 +00:00
data ,
size ,
DMA_TO_DEVICE ) ;
2024-01-01 19:08:49 +00:00
if ( unlikely ( dma_mapping_error ( tx_ring - > dev , dma ) ) )
2024-01-01 19:08:45 +00:00
goto error_report_dma_error ;
tx_info - > map_linear_data = 0 ;
ena_buf = tx_info - > bufs ;
ena_buf - > paddr = dma ;
ena_buf - > len = size ;
ena_tx_ctx - > ena_bufs = ena_buf ;
ena_tx_ctx - > num_bufs = tx_info - > num_of_bufs = 1 ;
}
return 0 ;
error_report_dma_error :
2024-01-01 19:08:49 +00:00
ena_increase_stat ( & tx_ring - > tx_stats . dma_mapping_err , 1 ,
& tx_ring - > syncp ) ;
2024-01-01 19:08:45 +00:00
netif_warn ( adapter , tx_queued , adapter - > netdev , " Failed to map xdp buff \n " ) ;
return - EINVAL ;
}
2024-01-01 19:08:49 +00:00
int ena_xdp_xmit_frame ( struct ena_ring * tx_ring ,
2024-01-01 19:08:46 +00:00
struct ena_adapter * adapter ,
2024-01-01 19:08:45 +00:00
struct xdp_frame * xdpf ,
int flags )
{
struct ena_com_tx_ctx ena_tx_ctx = { } ;
struct ena_tx_buffer * tx_info ;
u16 next_to_use , req_id ;
int rc ;
2024-01-01 19:08:49 +00:00
next_to_use = tx_ring - > next_to_use ;
req_id = tx_ring - > free_ids [ next_to_use ] ;
tx_info = & tx_ring - > tx_buffer_info [ req_id ] ;
2024-01-01 19:08:45 +00:00
tx_info - > num_of_bufs = 0 ;
2024-01-01 19:08:49 +00:00
rc = ena_xdp_tx_map_frame ( tx_ring , tx_info , xdpf , & ena_tx_ctx ) ;
2024-01-01 19:08:45 +00:00
if ( unlikely ( rc ) )
2024-04-10 09:13:58 +00:00
goto err ;
2024-01-01 19:08:45 +00:00
ena_tx_ctx . req_id = req_id ;
2024-01-01 19:08:46 +00:00
rc = ena_xmit_common ( adapter ,
2024-01-01 19:08:49 +00:00
tx_ring ,
2024-01-01 19:08:45 +00:00
tx_info ,
& ena_tx_ctx ,
next_to_use ,
xdpf - > len ) ;
if ( rc )
goto error_unmap_dma ;
/* trigger the dma engine. ena_ring_tx_doorbell()
* calls a memory barrier inside it .
*/
if ( flags & XDP_XMIT_FLUSH )
2024-01-01 19:08:49 +00:00
ena_ring_tx_doorbell ( tx_ring ) ;
2024-01-01 19:08:45 +00:00
return rc ;
error_unmap_dma :
2024-01-01 19:08:49 +00:00
ena_unmap_tx_buff ( tx_ring , tx_info ) ;
2024-04-10 09:13:58 +00:00
err :
2024-01-01 19:08:45 +00:00
tx_info - > xdpf = NULL ;
2024-04-10 09:13:58 +00:00
2024-01-01 19:08:45 +00:00
return rc ;
}
int ena_xdp_xmit ( struct net_device * dev , int n ,
struct xdp_frame * * frames , u32 flags )
{
struct ena_adapter * adapter = netdev_priv ( dev ) ;
2024-01-01 19:08:49 +00:00
struct ena_ring * tx_ring ;
2024-01-01 19:08:45 +00:00
int qid , i , nxmit = 0 ;
if ( unlikely ( flags & ~ XDP_XMIT_FLAGS_MASK ) )
return - EINVAL ;
if ( ! test_bit ( ENA_FLAG_DEV_UP , & adapter - > flags ) )
return - ENETDOWN ;
/* We assume that all rings have the same XDP program */
if ( ! READ_ONCE ( adapter - > rx_ring - > xdp_bpf_prog ) )
return - ENXIO ;
qid = smp_processor_id ( ) % adapter - > xdp_num_queues ;
qid + = adapter - > xdp_first_ring ;
2024-01-01 19:08:49 +00:00
tx_ring = & adapter - > tx_ring [ qid ] ;
2024-01-01 19:08:45 +00:00
/* Other CPU ids might try to send thorugh this queue */
2024-01-01 19:08:49 +00:00
spin_lock ( & tx_ring - > xdp_tx_lock ) ;
2024-01-01 19:08:45 +00:00
for ( i = 0 ; i < n ; i + + ) {
2024-01-01 19:08:49 +00:00
if ( ena_xdp_xmit_frame ( tx_ring , adapter , frames [ i ] , 0 ) )
2024-01-01 19:08:45 +00:00
break ;
nxmit + + ;
}
/* Ring doorbell to make device aware of the packets */
if ( flags & XDP_XMIT_FLUSH )
2024-01-01 19:08:49 +00:00
ena_ring_tx_doorbell ( tx_ring ) ;
2024-01-01 19:08:45 +00:00
2024-01-01 19:08:49 +00:00
spin_unlock ( & tx_ring - > xdp_tx_lock ) ;
2024-01-01 19:08:45 +00:00
/* Return number of packets sent */
return nxmit ;
}
static void ena_init_all_xdp_queues ( struct ena_adapter * adapter )
{
adapter - > xdp_first_ring = adapter - > num_io_queues ;
adapter - > xdp_num_queues = adapter - > num_io_queues ;
ena_init_io_rings ( adapter ,
adapter - > xdp_first_ring ,
adapter - > xdp_num_queues ) ;
}
int ena_setup_and_create_all_xdp_queues ( struct ena_adapter * adapter )
{
u32 xdp_first_ring = adapter - > xdp_first_ring ;
u32 xdp_num_queues = adapter - > xdp_num_queues ;
int rc = 0 ;
rc = ena_setup_tx_resources_in_range ( adapter , xdp_first_ring , xdp_num_queues ) ;
if ( rc )
goto setup_err ;
rc = ena_create_io_tx_queues_in_range ( adapter , xdp_first_ring , xdp_num_queues ) ;
if ( rc )
goto create_err ;
return 0 ;
create_err :
ena_free_all_io_tx_resources_in_range ( adapter , xdp_first_ring , xdp_num_queues ) ;
setup_err :
return rc ;
}
/* Provides a way for both kernel and bpf-prog to know
* more about the RX - queue a given XDP frame arrived on .
*/
2024-01-01 19:08:53 +00:00
int ena_xdp_register_rxq_info ( struct ena_ring * rx_ring )
2024-01-01 19:08:45 +00:00
{
int rc ;
rc = xdp_rxq_info_reg ( & rx_ring - > xdp_rxq , rx_ring - > netdev , rx_ring - > qid , 0 ) ;
2024-01-01 19:08:53 +00:00
netif_dbg ( rx_ring - > adapter , ifup , rx_ring - > netdev , " Registering RX info for queue %d " ,
rx_ring - > qid ) ;
2024-01-01 19:08:45 +00:00
if ( rc ) {
netif_err ( rx_ring - > adapter , ifup , rx_ring - > netdev ,
" Failed to register xdp rx queue info. RX queue num %d rc: %d \n " ,
rx_ring - > qid , rc ) ;
goto err ;
}
rc = xdp_rxq_info_reg_mem_model ( & rx_ring - > xdp_rxq , MEM_TYPE_PAGE_SHARED , NULL ) ;
if ( rc ) {
netif_err ( rx_ring - > adapter , ifup , rx_ring - > netdev ,
" Failed to register xdp rx queue info memory model. RX queue num %d rc: %d \n " ,
rx_ring - > qid , rc ) ;
xdp_rxq_info_unreg ( & rx_ring - > xdp_rxq ) ;
}
err :
return rc ;
}
2024-01-01 19:08:53 +00:00
void ena_xdp_unregister_rxq_info ( struct ena_ring * rx_ring )
2024-01-01 19:08:45 +00:00
{
2024-01-01 19:08:53 +00:00
netif_dbg ( rx_ring - > adapter , ifdown , rx_ring - > netdev ,
" Unregistering RX info for queue %d " ,
rx_ring - > qid ) ;
2024-01-01 19:08:45 +00:00
xdp_rxq_info_unreg_mem_model ( & rx_ring - > xdp_rxq ) ;
xdp_rxq_info_unreg ( & rx_ring - > xdp_rxq ) ;
}
void ena_xdp_exchange_program_rx_in_range ( struct ena_adapter * adapter ,
struct bpf_prog * prog ,
int first , int count )
{
struct bpf_prog * old_bpf_prog ;
struct ena_ring * rx_ring ;
int i = 0 ;
for ( i = first ; i < count ; i + + ) {
rx_ring = & adapter - > rx_ring [ i ] ;
old_bpf_prog = xchg ( & rx_ring - > xdp_bpf_prog , prog ) ;
if ( ! old_bpf_prog & & prog ) {
rx_ring - > rx_headroom = XDP_PACKET_HEADROOM ;
} else if ( old_bpf_prog & & ! prog ) {
rx_ring - > rx_headroom = NET_SKB_PAD ;
}
}
}
static void ena_xdp_exchange_program ( struct ena_adapter * adapter ,
struct bpf_prog * prog )
{
struct bpf_prog * old_bpf_prog = xchg ( & adapter - > xdp_bpf_prog , prog ) ;
ena_xdp_exchange_program_rx_in_range ( adapter ,
prog ,
0 ,
adapter - > num_io_queues ) ;
if ( old_bpf_prog )
bpf_prog_put ( old_bpf_prog ) ;
}
static int ena_destroy_and_free_all_xdp_queues ( struct ena_adapter * adapter )
{
bool was_up ;
int rc ;
was_up = test_bit ( ENA_FLAG_DEV_UP , & adapter - > flags ) ;
if ( was_up )
ena_down ( adapter ) ;
adapter - > xdp_first_ring = 0 ;
adapter - > xdp_num_queues = 0 ;
ena_xdp_exchange_program ( adapter , NULL ) ;
if ( was_up ) {
rc = ena_up ( adapter ) ;
if ( rc )
return rc ;
}
return 0 ;
}
static int ena_xdp_set ( struct net_device * netdev , struct netdev_bpf * bpf )
{
struct ena_adapter * adapter = netdev_priv ( netdev ) ;
struct bpf_prog * prog = bpf - > prog ;
struct bpf_prog * old_bpf_prog ;
int rc , prev_mtu ;
bool is_up ;
is_up = test_bit ( ENA_FLAG_DEV_UP , & adapter - > flags ) ;
rc = ena_xdp_allowed ( adapter ) ;
if ( rc = = ENA_XDP_ALLOWED ) {
old_bpf_prog = adapter - > xdp_bpf_prog ;
if ( prog ) {
if ( ! is_up ) {
ena_init_all_xdp_queues ( adapter ) ;
} else if ( ! old_bpf_prog ) {
ena_down ( adapter ) ;
ena_init_all_xdp_queues ( adapter ) ;
}
ena_xdp_exchange_program ( adapter , prog ) ;
2024-01-01 19:08:52 +00:00
netif_dbg ( adapter , drv , adapter - > netdev , " Set a new XDP program \n " ) ;
2024-01-01 19:08:45 +00:00
if ( is_up & & ! old_bpf_prog ) {
rc = ena_up ( adapter ) ;
if ( rc )
return rc ;
}
xdp_features_set_redirect_target ( netdev , false ) ;
} else if ( old_bpf_prog ) {
xdp_features_clear_redirect_target ( netdev ) ;
2024-01-01 19:08:52 +00:00
netif_dbg ( adapter , drv , adapter - > netdev , " Removing XDP program \n " ) ;
2024-01-01 19:08:45 +00:00
rc = ena_destroy_and_free_all_xdp_queues ( adapter ) ;
if ( rc )
return rc ;
}
prev_mtu = netdev - > max_mtu ;
netdev - > max_mtu = prog ? ENA_XDP_MAX_MTU : adapter - > max_mtu ;
if ( ! old_bpf_prog )
netif_info ( adapter , drv , adapter - > netdev ,
" XDP program is set, changing the max_mtu from %d to %d " ,
prev_mtu , netdev - > max_mtu ) ;
} else if ( rc = = ENA_XDP_CURRENT_MTU_TOO_LARGE ) {
netif_err ( adapter , drv , adapter - > netdev ,
" Failed to set xdp program, the current MTU (%d) is larger than the maximum allowed MTU (%lu) while xdp is on " ,
netdev - > mtu , ENA_XDP_MAX_MTU ) ;
NL_SET_ERR_MSG_MOD ( bpf - > extack ,
" Failed to set xdp program, the current MTU is larger than the maximum allowed MTU. Check the dmesg for more info " ) ;
return - EINVAL ;
} else if ( rc = = ENA_XDP_NO_ENOUGH_QUEUES ) {
netif_err ( adapter , drv , adapter - > netdev ,
" Failed to set xdp program, the Rx/Tx channel count should be at most half of the maximum allowed channel count. The current queue count (%d), the maximal queue count (%d) \n " ,
adapter - > num_io_queues , adapter - > max_num_io_queues ) ;
NL_SET_ERR_MSG_MOD ( bpf - > extack ,
" Failed to set xdp program, there is no enough space for allocating XDP queues, Check the dmesg for more info " ) ;
return - EINVAL ;
}
return 0 ;
}
/* This is the main xdp callback, it's used by the kernel to set/unset the xdp
* program as well as to query the current xdp program id .
*/
int ena_xdp ( struct net_device * netdev , struct netdev_bpf * bpf )
{
switch ( bpf - > command ) {
case XDP_SETUP_PROG :
return ena_xdp_set ( netdev , bpf ) ;
default :
return - EINVAL ;
}
return 0 ;
}
2024-01-01 19:08:49 +00:00
static int ena_clean_xdp_irq ( struct ena_ring * tx_ring , u32 budget )
2024-01-01 19:08:45 +00:00
{
u32 total_done = 0 ;
u16 next_to_clean ;
int tx_pkts = 0 ;
u16 req_id ;
int rc ;
2024-01-01 19:08:49 +00:00
if ( unlikely ( ! tx_ring ) )
2024-01-01 19:08:45 +00:00
return 0 ;
2024-01-01 19:08:49 +00:00
next_to_clean = tx_ring - > next_to_clean ;
2024-01-01 19:08:45 +00:00
while ( tx_pkts < budget ) {
struct ena_tx_buffer * tx_info ;
struct xdp_frame * xdpf ;
2024-01-01 19:08:49 +00:00
rc = ena_com_tx_comp_req_id_get ( tx_ring - > ena_com_io_cq ,
2024-01-01 19:08:45 +00:00
& req_id ) ;
if ( rc ) {
if ( unlikely ( rc = = - EINVAL ) )
2024-01-01 19:08:49 +00:00
handle_invalid_req_id ( tx_ring , req_id , NULL , true ) ;
2024-01-01 19:08:45 +00:00
break ;
}
/* validate that the request id points to a valid xdp_frame */
2024-01-01 19:08:49 +00:00
rc = validate_xdp_req_id ( tx_ring , req_id ) ;
2024-01-01 19:08:45 +00:00
if ( rc )
break ;
2024-01-01 19:08:49 +00:00
tx_info = & tx_ring - > tx_buffer_info [ req_id ] ;
2024-01-01 19:08:45 +00:00
tx_info - > last_jiffies = 0 ;
2024-01-01 19:08:51 +00:00
xdpf = tx_info - > xdpf ;
tx_info - > xdpf = NULL ;
ena_unmap_tx_buff ( tx_ring , tx_info ) ;
xdp_return_frame ( xdpf ) ;
2024-01-01 19:08:45 +00:00
tx_pkts + + ;
total_done + = tx_info - > tx_descs ;
2024-01-01 19:08:49 +00:00
tx_ring - > free_ids [ next_to_clean ] = req_id ;
2024-01-01 19:08:45 +00:00
next_to_clean = ENA_TX_RING_IDX_NEXT ( next_to_clean ,
2024-01-01 19:08:49 +00:00
tx_ring - > ring_size ) ;
2024-01-01 19:08:51 +00:00
netif_dbg ( tx_ring - > adapter , tx_done , tx_ring - > netdev ,
" tx_poll: q %d pkt #%d req_id %d \n " , tx_ring - > qid , tx_pkts , req_id ) ;
2024-01-01 19:08:45 +00:00
}
2024-01-01 19:08:49 +00:00
tx_ring - > next_to_clean = next_to_clean ;
ena_com_comp_ack ( tx_ring - > ena_com_io_sq , total_done ) ;
2024-01-01 19:08:45 +00:00
2024-01-01 19:08:49 +00:00
netif_dbg ( tx_ring - > adapter , tx_done , tx_ring - > netdev ,
2024-01-01 19:08:45 +00:00
" tx_poll: q %d done. total pkts: %d \n " ,
2024-01-01 19:08:49 +00:00
tx_ring - > qid , tx_pkts ) ;
2024-01-01 19:08:45 +00:00
return tx_pkts ;
}
/* This is the XDP napi callback. XDP queues use a separate napi callback
* than Rx / Tx queues .
*/
int ena_xdp_io_poll ( struct napi_struct * napi , int budget )
{
struct ena_napi * ena_napi = container_of ( napi , struct ena_napi , napi ) ;
2024-01-01 19:08:49 +00:00
struct ena_ring * tx_ring ;
2024-01-01 19:08:51 +00:00
u32 work_done ;
2024-01-01 19:08:45 +00:00
int ret ;
2024-01-01 19:08:49 +00:00
tx_ring = ena_napi - > tx_ring ;
2024-01-01 19:08:45 +00:00
2024-01-01 19:08:49 +00:00
if ( ! test_bit ( ENA_FLAG_DEV_UP , & tx_ring - > adapter - > flags ) | |
test_bit ( ENA_FLAG_TRIGGER_RESET , & tx_ring - > adapter - > flags ) ) {
2024-01-01 19:08:45 +00:00
napi_complete_done ( napi , 0 ) ;
return 0 ;
}
2024-01-01 19:08:51 +00:00
work_done = ena_clean_xdp_irq ( tx_ring , budget ) ;
2024-01-01 19:08:45 +00:00
/* If the device is about to reset or down, avoid unmask
* the interrupt and return 0 so NAPI won ' t reschedule
*/
2024-01-01 19:08:49 +00:00
if ( unlikely ( ! test_bit ( ENA_FLAG_DEV_UP , & tx_ring - > adapter - > flags ) ) ) {
2024-01-01 19:08:45 +00:00
napi_complete_done ( napi , 0 ) ;
ret = 0 ;
2024-01-01 19:08:51 +00:00
} else if ( budget > work_done ) {
ena_increase_stat ( & tx_ring - > tx_stats . napi_comp , 1 ,
& tx_ring - > syncp ) ;
if ( napi_complete_done ( napi , work_done ) )
2024-01-01 19:08:49 +00:00
ena_unmask_interrupt ( tx_ring , NULL ) ;
2024-01-01 19:08:51 +00:00
2024-01-01 19:08:49 +00:00
ena_update_ring_numa_node ( tx_ring , NULL ) ;
2024-01-01 19:08:51 +00:00
ret = work_done ;
2024-01-01 19:08:45 +00:00
} else {
2024-01-01 19:08:51 +00:00
ret = budget ;
2024-01-01 19:08:45 +00:00
}
2024-01-01 19:08:49 +00:00
u64_stats_update_begin ( & tx_ring - > syncp ) ;
tx_ring - > tx_stats . tx_poll + + ;
u64_stats_update_end ( & tx_ring - > syncp ) ;
tx_ring - > tx_stats . last_napi_jiffies = jiffies ;
2024-01-01 19:08:45 +00:00
return ret ;
}