2025-02-04 13:56:15 -08:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0 */
|
|
|
|
#ifndef _NET_PAGE_POOL_MEMORY_PROVIDER_H
|
|
|
|
#define _NET_PAGE_POOL_MEMORY_PROVIDER_H
|
|
|
|
|
|
|
|
#include <net/netmem.h>
|
|
|
|
#include <net/page_pool/types.h>
|
|
|
|
|
2025-02-04 13:56:17 -08:00
|
|
|
struct netdev_rx_queue;
|
2025-04-02 18:34:04 -07:00
|
|
|
struct netlink_ext_ack;
|
2025-02-04 13:56:17 -08:00
|
|
|
struct sk_buff;
|
|
|
|
|
2025-02-04 13:56:15 -08:00
|
|
|
struct memory_provider_ops {
|
|
|
|
netmem_ref (*alloc_netmems)(struct page_pool *pool, gfp_t gfp);
|
|
|
|
bool (*release_netmem)(struct page_pool *pool, netmem_ref netmem);
|
|
|
|
int (*init)(struct page_pool *pool);
|
|
|
|
void (*destroy)(struct page_pool *pool);
|
2025-02-04 13:56:17 -08:00
|
|
|
int (*nl_fill)(void *mp_priv, struct sk_buff *rsp,
|
|
|
|
struct netdev_rx_queue *rxq);
|
2025-02-04 13:56:18 -08:00
|
|
|
void (*uninstall)(void *mp_priv, struct netdev_rx_queue *rxq);
|
2025-02-04 13:56:15 -08:00
|
|
|
};
|
|
|
|
|
2025-02-04 13:56:20 -08:00
|
|
|
bool net_mp_niov_set_dma_addr(struct net_iov *niov, dma_addr_t addr);
|
|
|
|
void net_mp_niov_set_page_pool(struct page_pool *pool, struct net_iov *niov);
|
|
|
|
void net_mp_niov_clear_page_pool(struct net_iov *niov);
|
|
|
|
|
2025-02-04 13:56:21 -08:00
|
|
|
int net_mp_open_rxq(struct net_device *dev, unsigned ifq_idx,
|
|
|
|
struct pp_memory_provider_params *p);
|
2025-04-02 18:34:04 -07:00
|
|
|
int __net_mp_open_rxq(struct net_device *dev, unsigned int rxq_idx,
|
|
|
|
const struct pp_memory_provider_params *p,
|
|
|
|
struct netlink_ext_ack *extack);
|
2025-02-04 13:56:21 -08:00
|
|
|
void net_mp_close_rxq(struct net_device *dev, unsigned ifq_idx,
|
|
|
|
struct pp_memory_provider_params *old_p);
|
2025-04-02 18:34:04 -07:00
|
|
|
void __net_mp_close_rxq(struct net_device *dev, unsigned int rxq_idx,
|
|
|
|
const struct pp_memory_provider_params *old_p);
|
2025-02-04 13:56:21 -08:00
|
|
|
|
2025-02-04 13:56:20 -08:00
|
|
|
/**
|
|
|
|
* net_mp_netmem_place_in_cache() - give a netmem to a page pool
|
|
|
|
* @pool: the page pool to place the netmem into
|
|
|
|
* @netmem: netmem to give
|
|
|
|
*
|
|
|
|
* Push an accounted netmem into the page pool's allocation cache. The caller
|
|
|
|
* must ensure that there is space in the cache. It should only be called off
|
|
|
|
* the mp_ops->alloc_netmems() path.
|
|
|
|
*/
|
|
|
|
static inline void net_mp_netmem_place_in_cache(struct page_pool *pool,
|
|
|
|
netmem_ref netmem)
|
|
|
|
{
|
|
|
|
pool->alloc.cache[pool->alloc.count++] = netmem;
|
|
|
|
}
|
|
|
|
|
2025-02-04 13:56:15 -08:00
|
|
|
#endif
|