mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-08-21 06:50:25 +00:00
net: hns3: refactor hns3_nic_reuse_page()
Split rx copybreak handle into a separate function from function hns3_nic_reuse_page() to improve code simplicity. Signed-off-by: Hao Chen <chenhao288@hisilicon.com> Signed-off-by: Guangbin Huang <huangguangbin2@huawei.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
ed0e658c51
commit
e74a726da2
1 changed files with 35 additions and 20 deletions
|
@ -3546,6 +3546,38 @@ static bool hns3_can_reuse_page(struct hns3_desc_cb *cb)
|
||||||
return page_count(cb->priv) == cb->pagecnt_bias;
|
return page_count(cb->priv) == cb->pagecnt_bias;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int hns3_handle_rx_copybreak(struct sk_buff *skb, int i,
|
||||||
|
struct hns3_enet_ring *ring,
|
||||||
|
int pull_len,
|
||||||
|
struct hns3_desc_cb *desc_cb)
|
||||||
|
{
|
||||||
|
struct hns3_desc *desc = &ring->desc[ring->next_to_clean];
|
||||||
|
u32 frag_offset = desc_cb->page_offset + pull_len;
|
||||||
|
int size = le16_to_cpu(desc->rx.size);
|
||||||
|
u32 frag_size = size - pull_len;
|
||||||
|
void *frag = napi_alloc_frag(frag_size);
|
||||||
|
|
||||||
|
if (unlikely(!frag)) {
|
||||||
|
u64_stats_update_begin(&ring->syncp);
|
||||||
|
ring->stats.frag_alloc_err++;
|
||||||
|
u64_stats_update_end(&ring->syncp);
|
||||||
|
|
||||||
|
hns3_rl_err(ring_to_netdev(ring),
|
||||||
|
"failed to allocate rx frag\n");
|
||||||
|
return -ENOMEM;
|
||||||
|
}
|
||||||
|
|
||||||
|
desc_cb->reuse_flag = 1;
|
||||||
|
memcpy(frag, desc_cb->buf + frag_offset, frag_size);
|
||||||
|
skb_add_rx_frag(skb, i, virt_to_page(frag),
|
||||||
|
offset_in_page(frag), frag_size, frag_size);
|
||||||
|
|
||||||
|
u64_stats_update_begin(&ring->syncp);
|
||||||
|
ring->stats.frag_alloc++;
|
||||||
|
u64_stats_update_end(&ring->syncp);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
static void hns3_nic_reuse_page(struct sk_buff *skb, int i,
|
static void hns3_nic_reuse_page(struct sk_buff *skb, int i,
|
||||||
struct hns3_enet_ring *ring, int pull_len,
|
struct hns3_enet_ring *ring, int pull_len,
|
||||||
struct hns3_desc_cb *desc_cb)
|
struct hns3_desc_cb *desc_cb)
|
||||||
|
@ -3555,6 +3587,7 @@ static void hns3_nic_reuse_page(struct sk_buff *skb, int i,
|
||||||
int size = le16_to_cpu(desc->rx.size);
|
int size = le16_to_cpu(desc->rx.size);
|
||||||
u32 truesize = hns3_buf_size(ring);
|
u32 truesize = hns3_buf_size(ring);
|
||||||
u32 frag_size = size - pull_len;
|
u32 frag_size = size - pull_len;
|
||||||
|
int ret = 0;
|
||||||
bool reused;
|
bool reused;
|
||||||
|
|
||||||
if (ring->page_pool) {
|
if (ring->page_pool) {
|
||||||
|
@ -3589,27 +3622,9 @@ static void hns3_nic_reuse_page(struct sk_buff *skb, int i,
|
||||||
desc_cb->page_offset = 0;
|
desc_cb->page_offset = 0;
|
||||||
desc_cb->reuse_flag = 1;
|
desc_cb->reuse_flag = 1;
|
||||||
} else if (frag_size <= ring->rx_copybreak) {
|
} else if (frag_size <= ring->rx_copybreak) {
|
||||||
void *frag = napi_alloc_frag(frag_size);
|
ret = hns3_handle_rx_copybreak(skb, i, ring, pull_len, desc_cb);
|
||||||
|
if (ret)
|
||||||
if (unlikely(!frag)) {
|
|
||||||
u64_stats_update_begin(&ring->syncp);
|
|
||||||
ring->stats.frag_alloc_err++;
|
|
||||||
u64_stats_update_end(&ring->syncp);
|
|
||||||
|
|
||||||
hns3_rl_err(ring_to_netdev(ring),
|
|
||||||
"failed to allocate rx frag\n");
|
|
||||||
goto out;
|
goto out;
|
||||||
}
|
|
||||||
|
|
||||||
desc_cb->reuse_flag = 1;
|
|
||||||
memcpy(frag, desc_cb->buf + frag_offset, frag_size);
|
|
||||||
skb_add_rx_frag(skb, i, virt_to_page(frag),
|
|
||||||
offset_in_page(frag), frag_size, frag_size);
|
|
||||||
|
|
||||||
u64_stats_update_begin(&ring->syncp);
|
|
||||||
ring->stats.frag_alloc++;
|
|
||||||
u64_stats_update_end(&ring->syncp);
|
|
||||||
return;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
out:
|
out:
|
||||||
|
|
Loading…
Add table
Reference in a new issue