mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-08-18 12:30:31 +00:00
net: rswitch: Add jumbo frames handling for TX
If the driver would like to transmit a jumbo frame like 2KiB or more, it should be split into multiple queues. In the near future, to support this, add handling specific descriptor types F{START,MID,END}. However, such jumbo frames will not happen yet because the maximum MTU size is still default for now. Signed-off-by: Yoshihiro Shimoda <yoshihiro.shimoda.uh@renesas.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
933416cc59
commit
d2c96b9d5f
1 changed files with 46 additions and 10 deletions
|
@ -1632,15 +1632,44 @@ static bool rswitch_ext_desc_set(struct rswitch_device *rdev,
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static u8 rswitch_ext_desc_get_die_dt(unsigned int nr_desc, unsigned int index)
|
||||||
|
{
|
||||||
|
if (nr_desc == 1)
|
||||||
|
return DT_FSINGLE | DIE;
|
||||||
|
if (index == 0)
|
||||||
|
return DT_FSTART;
|
||||||
|
if (nr_desc - 1 == index)
|
||||||
|
return DT_FEND | DIE;
|
||||||
|
return DT_FMID;
|
||||||
|
}
|
||||||
|
|
||||||
|
static u16 rswitch_ext_desc_get_len(u8 die_dt, unsigned int orig_len)
|
||||||
|
{
|
||||||
|
switch (die_dt & DT_MASK) {
|
||||||
|
case DT_FSINGLE:
|
||||||
|
case DT_FEND:
|
||||||
|
return (orig_len % RSWITCH_DESC_BUF_SIZE) ?: RSWITCH_DESC_BUF_SIZE;
|
||||||
|
case DT_FSTART:
|
||||||
|
case DT_FMID:
|
||||||
|
return RSWITCH_DESC_BUF_SIZE;
|
||||||
|
default:
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
static netdev_tx_t rswitch_start_xmit(struct sk_buff *skb, struct net_device *ndev)
|
static netdev_tx_t rswitch_start_xmit(struct sk_buff *skb, struct net_device *ndev)
|
||||||
{
|
{
|
||||||
struct rswitch_device *rdev = netdev_priv(ndev);
|
struct rswitch_device *rdev = netdev_priv(ndev);
|
||||||
struct rswitch_gwca_queue *gq = rdev->tx_queue;
|
struct rswitch_gwca_queue *gq = rdev->tx_queue;
|
||||||
|
dma_addr_t dma_addr, dma_addr_orig;
|
||||||
netdev_tx_t ret = NETDEV_TX_OK;
|
netdev_tx_t ret = NETDEV_TX_OK;
|
||||||
struct rswitch_ext_desc *desc;
|
struct rswitch_ext_desc *desc;
|
||||||
dma_addr_t dma_addr;
|
unsigned int i, nr_desc;
|
||||||
|
u8 die_dt;
|
||||||
|
u16 len;
|
||||||
|
|
||||||
if (rswitch_get_num_cur_queues(gq) >= gq->ring_size - 1) {
|
nr_desc = (skb->len - 1) / RSWITCH_DESC_BUF_SIZE + 1;
|
||||||
|
if (rswitch_get_num_cur_queues(gq) >= gq->ring_size - nr_desc) {
|
||||||
netif_stop_subqueue(ndev, 0);
|
netif_stop_subqueue(ndev, 0);
|
||||||
return NETDEV_TX_BUSY;
|
return NETDEV_TX_BUSY;
|
||||||
}
|
}
|
||||||
|
@ -1648,25 +1677,32 @@ static netdev_tx_t rswitch_start_xmit(struct sk_buff *skb, struct net_device *nd
|
||||||
if (skb_put_padto(skb, ETH_ZLEN))
|
if (skb_put_padto(skb, ETH_ZLEN))
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
dma_addr = dma_map_single(ndev->dev.parent, skb->data, skb->len, DMA_TO_DEVICE);
|
dma_addr_orig = dma_map_single(ndev->dev.parent, skb->data, skb->len, DMA_TO_DEVICE);
|
||||||
if (dma_mapping_error(ndev->dev.parent, dma_addr))
|
if (dma_mapping_error(ndev->dev.parent, dma_addr_orig))
|
||||||
goto err_kfree;
|
goto err_kfree;
|
||||||
|
|
||||||
gq->skbs[gq->cur] = skb;
|
gq->skbs[gq->cur] = skb;
|
||||||
gq->unmap_addrs[gq->cur] = dma_addr;
|
gq->unmap_addrs[gq->cur] = dma_addr_orig;
|
||||||
desc = &gq->tx_ring[gq->cur];
|
|
||||||
if (!rswitch_ext_desc_set(rdev, skb, desc, dma_addr, skb->len, DT_FSINGLE | DIE))
|
/* DT_FSTART should be set at last. So, this is reverse order. */
|
||||||
goto err_unmap;
|
for (i = nr_desc; i-- > 0; ) {
|
||||||
|
desc = &gq->tx_ring[rswitch_next_queue_index(gq, true, i)];
|
||||||
|
die_dt = rswitch_ext_desc_get_die_dt(nr_desc, i);
|
||||||
|
dma_addr = dma_addr_orig + i * RSWITCH_DESC_BUF_SIZE;
|
||||||
|
len = rswitch_ext_desc_get_len(die_dt, skb->len);
|
||||||
|
if (!rswitch_ext_desc_set(rdev, skb, desc, dma_addr, len, die_dt))
|
||||||
|
goto err_unmap;
|
||||||
|
}
|
||||||
|
|
||||||
wmb(); /* gq->cur must be incremented after die_dt was set */
|
wmb(); /* gq->cur must be incremented after die_dt was set */
|
||||||
|
|
||||||
gq->cur = rswitch_next_queue_index(gq, true, 1);
|
gq->cur = rswitch_next_queue_index(gq, true, nr_desc);
|
||||||
rswitch_modify(rdev->addr, GWTRC(gq->index), 0, BIT(gq->index % 32));
|
rswitch_modify(rdev->addr, GWTRC(gq->index), 0, BIT(gq->index % 32));
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
err_unmap:
|
err_unmap:
|
||||||
dma_unmap_single(ndev->dev.parent, dma_addr, skb->len, DMA_TO_DEVICE);
|
dma_unmap_single(ndev->dev.parent, dma_addr_orig, skb->len, DMA_TO_DEVICE);
|
||||||
|
|
||||||
err_kfree:
|
err_kfree:
|
||||||
dev_kfree_skb_any(skb);
|
dev_kfree_skb_any(skb);
|
||||||
|
|
Loading…
Add table
Reference in a new issue