mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-08-05 16:54:27 +00:00
Including fixes from Bluetooth and wireless.
A few more fixes for the locking changes trickling in. Nothing too alarming, I suspect those will continue for another release. Other than that things are slowing down nicely. Current release - fix to a fix: - Bluetooth: hci_event: use key encryption size when its known - tools: ynl-gen: allow multi-attr without nested-attributes again Current release - regressions: - locking fixes: - lock lower level devices when updating features - eth: bnxt_en: bring back rtnl_lock() in the bnxt_open() path - devmem: fix panic when Netlink socket closes after module unload Current release - new code bugs: - eth: txgbe: fixes for FW communication on new AML devices Previous releases - always broken: - sched: flush gso_skb list too during ->change(), avoid potential null-deref on reconfig - wifi: mt76: disable NAPI on driver removal - hv_netvsc: fix error "nvsp_rndis_pkt_complete error status: 2" Signed-off-by: Jakub Kicinski <kuba@kernel.org> -----BEGIN PGP SIGNATURE----- iQIzBAABCgAdFiEE6jPA+I1ugmIBA4hXMUZtbf5SIrsFAmgmEIkACgkQMUZtbf5S IrsaqxAAvs0rZyxrqPdMEy1URpbmSXMTqn0feqwKTRZj4mr9mMim/wCGNA4vf7BF gGrce91BVWK9jlqKwE+zFcKQgM5AylfmBIr0tCtMT+v6IUE8kPIxqphXbOD/1qQ+ n2Fem7rGjxRFeaisms7aRraN3CsYaAsqO+6sJsTN1fViVQGkMH8Vdn+VNPn6H6ps nvp9kL5WJ6VZkLAmv0DpQYNF9nc5SBw/wWSuovH4RZmmec3xJqsGCduhPJX7+xN+ obDT137QcMe+62DVZtyvcbzpEmTXNxwmZB7UqvkAYX1lI5t347Uu6JKMQWtcvi3o cPrCK+4jt7taZzNmAk3luWybJEBXNkQFAcKNf/hi82aHIYQ4my/GS4cVP78+ou24 IY3p14n4/pAfs8BBidBFBA4OTu2HL/c0ge6KDgzMovbdIvOLUFTnQIoAb6EAxOBX AdMiS4BQhl97qTWFjso95rmAb1lY8js/6+AA4XjMC/Ny1XDIsoRW9fJWl9IT0bDA nckDXzsg2cPkypVTccktiorWgQjoe8TBOYIOzdYogv7Ft9q/WQHfWu5j5oU4khL1 IBiknC4kZYs/yIY18z5Z6T9+vgHmBn3Wqod3qM3azT45/BzxPo+SAzFrb32Edjdt ykKL9SOChY6ua0VEV8Z0izgiEHi50103Kwz4RYcklZ8T5pkYeVI= =UUcH -----END PGP SIGNATURE----- Merge tag 'net-6.15-rc7' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net Pull networking fixes from Jakub Kicinski: "Including fixes from Bluetooth and wireless. A few more fixes for the locking changes trickling in. Nothing too alarming, I suspect those will continue for another release. Other than that things are slowing down nicely. Current release - fix to a fix: - Bluetooth: hci_event: use key encryption size when its known - tools: ynl-gen: allow multi-attr without nested-attributes again Current release - regressions: - locking fixes: - lock lower level devices when updating features - eth: bnxt_en: bring back rtnl_lock() in the bnxt_open() path - devmem: fix panic when Netlink socket closes after module unload Current release - new code bugs: - eth: txgbe: fixes for FW communication on new AML devices Previous releases - always broken: - sched: flush gso_skb list too during ->change(), avoid potential null-deref on reconfig - wifi: mt76: disable NAPI on driver removal - hv_netvsc: fix error 'nvsp_rndis_pkt_complete error status: 2'" * tag 'net-6.15-rc7' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net: (44 commits) net: devmem: fix kernel panic when netlink socket close after module unload tsnep: fix timestamping with a stacked DSA driver net/tls: fix kernel panic when alloc_page failed bnxt_en: bring back rtnl_lock() in the bnxt_open() path mlxsw: spectrum_router: Fix use-after-free when deleting GRE net devices wifi: mac80211: Set n_channels after allocating struct cfg80211_scan_request octeontx2-pf: Do not reallocate all ntuple filters wifi: mt76: mt7925: fix missing hdr_trans_tlv command for broadcast wtbl wifi: mt76: disable napi on driver removal Drivers: hv: vmbus: Remove vmbus_sendpacket_pagebuffer() hv_netvsc: Remove rmsg_pgcnt hv_netvsc: Preserve contiguous PFN grouping in the page buffer array hv_netvsc: Use vmbus_sendpacket_mpb_desc() to send VMBus messages Drivers: hv: Allow vmbus_sendpacket_mpb_desc() to create multiple ranges octeontx2-af: Fix CGX Receive counters net: ethernet: mtk_eth_soc: fix typo for declaration MT7988 ESW capability net: libwx: Fix FW mailbox unknown command net: libwx: Fix FW mailbox reply timeout net: txgbe: Fix to calculate EEPROM checksum for AML devices octeontx2-pf: macsec: Fix incorrect max transmit size in TX secy ...
This commit is contained in:
commit
ef935650e0
64 changed files with 698 additions and 361 deletions
|
@ -2017,7 +2017,8 @@ attribute-sets:
|
|||
attributes:
|
||||
-
|
||||
name: act
|
||||
type: nest
|
||||
type: indexed-array
|
||||
sub-type: nest
|
||||
nested-attributes: tc-act-attrs
|
||||
-
|
||||
name: police
|
||||
|
@ -2250,7 +2251,8 @@ attribute-sets:
|
|||
attributes:
|
||||
-
|
||||
name: act
|
||||
type: nest
|
||||
type: indexed-array
|
||||
sub-type: nest
|
||||
nested-attributes: tc-act-attrs
|
||||
-
|
||||
name: police
|
||||
|
@ -2745,7 +2747,7 @@ attribute-sets:
|
|||
type: u16
|
||||
byte-order: big-endian
|
||||
-
|
||||
name: key-l2-tpv3-sid
|
||||
name: key-l2tpv3-sid
|
||||
type: u32
|
||||
byte-order: big-endian
|
||||
-
|
||||
|
@ -3504,7 +3506,7 @@ attribute-sets:
|
|||
name: rate64
|
||||
type: u64
|
||||
-
|
||||
name: prate4
|
||||
name: prate64
|
||||
type: u64
|
||||
-
|
||||
name: burst
|
||||
|
|
|
@ -811,11 +811,9 @@ Documentation/devicetree/bindings/ptp/timestamper.txt for more details.
|
|||
3.2.4 Other caveats for MAC drivers
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Stacked PHCs, especially DSA (but not only) - since that doesn't require any
|
||||
modification to MAC drivers, so it is more difficult to ensure correctness of
|
||||
all possible code paths - is that they uncover bugs which were impossible to
|
||||
trigger before the existence of stacked PTP clocks. One example has to do with
|
||||
this line of code, already presented earlier::
|
||||
The use of stacked PHCs may uncover MAC driver bugs which were impossible to
|
||||
trigger without them. One example has to do with this line of code, already
|
||||
presented earlier::
|
||||
|
||||
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
|
||||
|
||||
|
|
|
@ -1077,68 +1077,10 @@ int vmbus_sendpacket(struct vmbus_channel *channel, void *buffer,
|
|||
EXPORT_SYMBOL(vmbus_sendpacket);
|
||||
|
||||
/*
|
||||
* vmbus_sendpacket_pagebuffer - Send a range of single-page buffer
|
||||
* packets using a GPADL Direct packet type. This interface allows you
|
||||
* to control notifying the host. This will be useful for sending
|
||||
* batched data. Also the sender can control the send flags
|
||||
* explicitly.
|
||||
*/
|
||||
int vmbus_sendpacket_pagebuffer(struct vmbus_channel *channel,
|
||||
struct hv_page_buffer pagebuffers[],
|
||||
u32 pagecount, void *buffer, u32 bufferlen,
|
||||
u64 requestid)
|
||||
{
|
||||
int i;
|
||||
struct vmbus_channel_packet_page_buffer desc;
|
||||
u32 descsize;
|
||||
u32 packetlen;
|
||||
u32 packetlen_aligned;
|
||||
struct kvec bufferlist[3];
|
||||
u64 aligned_data = 0;
|
||||
|
||||
if (pagecount > MAX_PAGE_BUFFER_COUNT)
|
||||
return -EINVAL;
|
||||
|
||||
/*
|
||||
* Adjust the size down since vmbus_channel_packet_page_buffer is the
|
||||
* largest size we support
|
||||
*/
|
||||
descsize = sizeof(struct vmbus_channel_packet_page_buffer) -
|
||||
((MAX_PAGE_BUFFER_COUNT - pagecount) *
|
||||
sizeof(struct hv_page_buffer));
|
||||
packetlen = descsize + bufferlen;
|
||||
packetlen_aligned = ALIGN(packetlen, sizeof(u64));
|
||||
|
||||
/* Setup the descriptor */
|
||||
desc.type = VM_PKT_DATA_USING_GPA_DIRECT;
|
||||
desc.flags = VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED;
|
||||
desc.dataoffset8 = descsize >> 3; /* in 8-bytes granularity */
|
||||
desc.length8 = (u16)(packetlen_aligned >> 3);
|
||||
desc.transactionid = VMBUS_RQST_ERROR; /* will be updated in hv_ringbuffer_write() */
|
||||
desc.reserved = 0;
|
||||
desc.rangecount = pagecount;
|
||||
|
||||
for (i = 0; i < pagecount; i++) {
|
||||
desc.range[i].len = pagebuffers[i].len;
|
||||
desc.range[i].offset = pagebuffers[i].offset;
|
||||
desc.range[i].pfn = pagebuffers[i].pfn;
|
||||
}
|
||||
|
||||
bufferlist[0].iov_base = &desc;
|
||||
bufferlist[0].iov_len = descsize;
|
||||
bufferlist[1].iov_base = buffer;
|
||||
bufferlist[1].iov_len = bufferlen;
|
||||
bufferlist[2].iov_base = &aligned_data;
|
||||
bufferlist[2].iov_len = (packetlen_aligned - packetlen);
|
||||
|
||||
return hv_ringbuffer_write(channel, bufferlist, 3, requestid, NULL);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(vmbus_sendpacket_pagebuffer);
|
||||
|
||||
/*
|
||||
* vmbus_sendpacket_multipagebuffer - Send a multi-page buffer packet
|
||||
* vmbus_sendpacket_mpb_desc - Send one or more multi-page buffer packets
|
||||
* using a GPADL Direct packet type.
|
||||
* The buffer includes the vmbus descriptor.
|
||||
* The desc argument must include space for the VMBus descriptor. The
|
||||
* rangecount field must already be set.
|
||||
*/
|
||||
int vmbus_sendpacket_mpb_desc(struct vmbus_channel *channel,
|
||||
struct vmbus_packet_mpb_array *desc,
|
||||
|
@ -1160,7 +1102,6 @@ int vmbus_sendpacket_mpb_desc(struct vmbus_channel *channel,
|
|||
desc->length8 = (u16)(packetlen_aligned >> 3);
|
||||
desc->transactionid = VMBUS_RQST_ERROR; /* will be updated in hv_ringbuffer_write() */
|
||||
desc->reserved = 0;
|
||||
desc->rangecount = 1;
|
||||
|
||||
bufferlist[0].iov_base = desc;
|
||||
bufferlist[0].iov_len = desc_size;
|
||||
|
|
|
@ -326,6 +326,26 @@ static void b53_get_vlan_entry(struct b53_device *dev, u16 vid,
|
|||
}
|
||||
}
|
||||
|
||||
static void b53_set_eap_mode(struct b53_device *dev, int port, int mode)
|
||||
{
|
||||
u64 eap_conf;
|
||||
|
||||
if (is5325(dev) || is5365(dev) || dev->chip_id == BCM5389_DEVICE_ID)
|
||||
return;
|
||||
|
||||
b53_read64(dev, B53_EAP_PAGE, B53_PORT_EAP_CONF(port), &eap_conf);
|
||||
|
||||
if (is63xx(dev)) {
|
||||
eap_conf &= ~EAP_MODE_MASK_63XX;
|
||||
eap_conf |= (u64)mode << EAP_MODE_SHIFT_63XX;
|
||||
} else {
|
||||
eap_conf &= ~EAP_MODE_MASK;
|
||||
eap_conf |= (u64)mode << EAP_MODE_SHIFT;
|
||||
}
|
||||
|
||||
b53_write64(dev, B53_EAP_PAGE, B53_PORT_EAP_CONF(port), eap_conf);
|
||||
}
|
||||
|
||||
static void b53_set_forwarding(struct b53_device *dev, int enable)
|
||||
{
|
||||
u8 mgmt;
|
||||
|
@ -586,6 +606,13 @@ int b53_setup_port(struct dsa_switch *ds, int port)
|
|||
b53_port_set_mcast_flood(dev, port, true);
|
||||
b53_port_set_learning(dev, port, false);
|
||||
|
||||
/* Force all traffic to go to the CPU port to prevent the ASIC from
|
||||
* trying to forward to bridged ports on matching FDB entries, then
|
||||
* dropping frames because it isn't allowed to forward there.
|
||||
*/
|
||||
if (dsa_is_user_port(ds, port))
|
||||
b53_set_eap_mode(dev, port, EAP_MODE_SIMPLIFIED);
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(b53_setup_port);
|
||||
|
@ -2042,6 +2069,9 @@ int b53_br_join(struct dsa_switch *ds, int port, struct dsa_bridge bridge,
|
|||
pvlan |= BIT(i);
|
||||
}
|
||||
|
||||
/* Disable redirection of unknown SA to the CPU port */
|
||||
b53_set_eap_mode(dev, port, EAP_MODE_BASIC);
|
||||
|
||||
/* Configure the local port VLAN control membership to include
|
||||
* remote ports and update the local port bitmask
|
||||
*/
|
||||
|
@ -2077,6 +2107,9 @@ void b53_br_leave(struct dsa_switch *ds, int port, struct dsa_bridge bridge)
|
|||
pvlan &= ~BIT(i);
|
||||
}
|
||||
|
||||
/* Enable redirection of unknown SA to the CPU port */
|
||||
b53_set_eap_mode(dev, port, EAP_MODE_SIMPLIFIED);
|
||||
|
||||
b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), pvlan);
|
||||
dev->ports[port].vlan_ctl_mask = pvlan;
|
||||
|
||||
|
|
|
@ -50,6 +50,9 @@
|
|||
/* Jumbo Frame Registers */
|
||||
#define B53_JUMBO_PAGE 0x40
|
||||
|
||||
/* EAP Registers */
|
||||
#define B53_EAP_PAGE 0x42
|
||||
|
||||
/* EEE Control Registers Page */
|
||||
#define B53_EEE_PAGE 0x92
|
||||
|
||||
|
@ -480,6 +483,17 @@
|
|||
#define JMS_MIN_SIZE 1518
|
||||
#define JMS_MAX_SIZE 9724
|
||||
|
||||
/*************************************************************************
|
||||
* EAP Page Registers
|
||||
*************************************************************************/
|
||||
#define B53_PORT_EAP_CONF(i) (0x20 + 8 * (i))
|
||||
#define EAP_MODE_SHIFT 51
|
||||
#define EAP_MODE_SHIFT_63XX 50
|
||||
#define EAP_MODE_MASK (0x3ull << EAP_MODE_SHIFT)
|
||||
#define EAP_MODE_MASK_63XX (0x3ull << EAP_MODE_SHIFT_63XX)
|
||||
#define EAP_MODE_BASIC 0
|
||||
#define EAP_MODE_SIMPLIFIED 3
|
||||
|
||||
/*************************************************************************
|
||||
* EEE Configuration Page Registers
|
||||
*************************************************************************/
|
||||
|
|
|
@ -265,16 +265,70 @@ static void ksz_phylink_mac_link_down(struct phylink_config *config,
|
|||
unsigned int mode,
|
||||
phy_interface_t interface);
|
||||
|
||||
/**
|
||||
* ksz_phylink_mac_disable_tx_lpi() - Callback to signal LPI support (Dummy)
|
||||
* @config: phylink config structure
|
||||
*
|
||||
* This function is a dummy handler. See ksz_phylink_mac_enable_tx_lpi() for
|
||||
* a detailed explanation of EEE/LPI handling in KSZ switches.
|
||||
*/
|
||||
static void ksz_phylink_mac_disable_tx_lpi(struct phylink_config *config)
|
||||
{
|
||||
}
|
||||
|
||||
/**
|
||||
* ksz_phylink_mac_enable_tx_lpi() - Callback to signal LPI support (Dummy)
|
||||
* @config: phylink config structure
|
||||
* @timer: timer value before entering LPI (unused)
|
||||
* @tx_clock_stop: whether to stop the TX clock in LPI mode (unused)
|
||||
*
|
||||
* This function signals to phylink that the driver architecture supports
|
||||
* LPI management, enabling phylink to control EEE advertisement during
|
||||
* negotiation according to IEEE Std 802.3 (Clause 78).
|
||||
*
|
||||
* Hardware Management of EEE/LPI State:
|
||||
* For KSZ switch ports with integrated PHYs (e.g., KSZ9893R ports 1-2),
|
||||
* observation and testing suggest that the actual EEE / Low Power Idle (LPI)
|
||||
* state transitions are managed autonomously by the hardware based on
|
||||
* the auto-negotiation results. (Note: While the datasheet describes EEE
|
||||
* operation based on negotiation, it doesn't explicitly detail the internal
|
||||
* MAC/PHY interaction, so autonomous hardware management of the MAC state
|
||||
* for LPI is inferred from observed behavior).
|
||||
* This hardware control, consistent with the switch's ability to operate
|
||||
* autonomously via strapping, means MAC-level software intervention is not
|
||||
* required or exposed for managing the LPI state once EEE is negotiated.
|
||||
* (Ref: KSZ9893R Data Sheet DS00002420D, primarily Section 4.7.5 explaining
|
||||
* EEE, also Sections 4.1.7 on Auto-Negotiation and 3.2.1 on Configuration
|
||||
* Straps).
|
||||
*
|
||||
* Additionally, ports configured as MAC interfaces (e.g., KSZ9893R port 3)
|
||||
* lack documented MAC-level LPI control.
|
||||
*
|
||||
* Therefore, this callback performs no action and serves primarily to inform
|
||||
* phylink of LPI awareness and to document the inferred hardware behavior.
|
||||
*
|
||||
* Returns: 0 (Always success)
|
||||
*/
|
||||
static int ksz_phylink_mac_enable_tx_lpi(struct phylink_config *config,
|
||||
u32 timer, bool tx_clock_stop)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct phylink_mac_ops ksz88x3_phylink_mac_ops = {
|
||||
.mac_config = ksz88x3_phylink_mac_config,
|
||||
.mac_link_down = ksz_phylink_mac_link_down,
|
||||
.mac_link_up = ksz8_phylink_mac_link_up,
|
||||
.mac_disable_tx_lpi = ksz_phylink_mac_disable_tx_lpi,
|
||||
.mac_enable_tx_lpi = ksz_phylink_mac_enable_tx_lpi,
|
||||
};
|
||||
|
||||
static const struct phylink_mac_ops ksz8_phylink_mac_ops = {
|
||||
.mac_config = ksz_phylink_mac_config,
|
||||
.mac_link_down = ksz_phylink_mac_link_down,
|
||||
.mac_link_up = ksz8_phylink_mac_link_up,
|
||||
.mac_disable_tx_lpi = ksz_phylink_mac_disable_tx_lpi,
|
||||
.mac_enable_tx_lpi = ksz_phylink_mac_enable_tx_lpi,
|
||||
};
|
||||
|
||||
static const struct ksz_dev_ops ksz88xx_dev_ops = {
|
||||
|
@ -358,6 +412,8 @@ static const struct phylink_mac_ops ksz9477_phylink_mac_ops = {
|
|||
.mac_config = ksz_phylink_mac_config,
|
||||
.mac_link_down = ksz_phylink_mac_link_down,
|
||||
.mac_link_up = ksz9477_phylink_mac_link_up,
|
||||
.mac_disable_tx_lpi = ksz_phylink_mac_disable_tx_lpi,
|
||||
.mac_enable_tx_lpi = ksz_phylink_mac_enable_tx_lpi,
|
||||
};
|
||||
|
||||
static const struct ksz_dev_ops ksz9477_dev_ops = {
|
||||
|
@ -401,6 +457,8 @@ static const struct phylink_mac_ops lan937x_phylink_mac_ops = {
|
|||
.mac_config = ksz_phylink_mac_config,
|
||||
.mac_link_down = ksz_phylink_mac_link_down,
|
||||
.mac_link_up = ksz9477_phylink_mac_link_up,
|
||||
.mac_disable_tx_lpi = ksz_phylink_mac_disable_tx_lpi,
|
||||
.mac_enable_tx_lpi = ksz_phylink_mac_enable_tx_lpi,
|
||||
};
|
||||
|
||||
static const struct ksz_dev_ops lan937x_dev_ops = {
|
||||
|
@ -2016,6 +2074,18 @@ static void ksz_phylink_get_caps(struct dsa_switch *ds, int port,
|
|||
|
||||
if (dev->dev_ops->get_caps)
|
||||
dev->dev_ops->get_caps(dev, port, config);
|
||||
|
||||
if (ds->ops->support_eee && ds->ops->support_eee(ds, port)) {
|
||||
memcpy(config->lpi_interfaces, config->supported_interfaces,
|
||||
sizeof(config->lpi_interfaces));
|
||||
|
||||
config->lpi_capabilities = MAC_100FD;
|
||||
if (dev->info->gbit_capable[port])
|
||||
config->lpi_capabilities |= MAC_1000FD;
|
||||
|
||||
/* EEE is fully operational */
|
||||
config->eee_enabled_default = true;
|
||||
}
|
||||
}
|
||||
|
||||
void ksz_r_mib_stats64(struct ksz_device *dev, int port)
|
||||
|
@ -3008,31 +3078,6 @@ static u32 ksz_get_phy_flags(struct dsa_switch *ds, int port)
|
|||
if (!port)
|
||||
return MICREL_KSZ8_P1_ERRATA;
|
||||
break;
|
||||
case KSZ8567_CHIP_ID:
|
||||
/* KSZ8567R Errata DS80000752C Module 4 */
|
||||
case KSZ8765_CHIP_ID:
|
||||
case KSZ8794_CHIP_ID:
|
||||
case KSZ8795_CHIP_ID:
|
||||
/* KSZ879x/KSZ877x/KSZ876x Errata DS80000687C Module 2 */
|
||||
case KSZ9477_CHIP_ID:
|
||||
/* KSZ9477S Errata DS80000754A Module 4 */
|
||||
case KSZ9567_CHIP_ID:
|
||||
/* KSZ9567S Errata DS80000756A Module 4 */
|
||||
case KSZ9896_CHIP_ID:
|
||||
/* KSZ9896C Errata DS80000757A Module 3 */
|
||||
case KSZ9897_CHIP_ID:
|
||||
case LAN9646_CHIP_ID:
|
||||
/* KSZ9897R Errata DS80000758C Module 4 */
|
||||
/* Energy Efficient Ethernet (EEE) feature select must be manually disabled
|
||||
* The EEE feature is enabled by default, but it is not fully
|
||||
* operational. It must be manually disabled through register
|
||||
* controls. If not disabled, the PHY ports can auto-negotiate
|
||||
* to enable EEE, and this feature can cause link drops when
|
||||
* linked to another device supporting EEE.
|
||||
*
|
||||
* The same item appears in the errata for all switches above.
|
||||
*/
|
||||
return MICREL_NO_EEE;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -3466,6 +3511,20 @@ static int ksz_max_mtu(struct dsa_switch *ds, int port)
|
|||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
/**
|
||||
* ksz_support_eee - Determine Energy Efficient Ethernet (EEE) support for a
|
||||
* port
|
||||
* @ds: Pointer to the DSA switch structure
|
||||
* @port: Port number to check
|
||||
*
|
||||
* This function also documents devices where EEE was initially advertised but
|
||||
* later withdrawn due to reliability issues, as described in official errata
|
||||
* documents. These devices are explicitly listed to record known limitations,
|
||||
* even if there is no technical necessity for runtime checks.
|
||||
*
|
||||
* Returns: true if the internal PHY on the given port supports fully
|
||||
* operational EEE, false otherwise.
|
||||
*/
|
||||
static bool ksz_support_eee(struct dsa_switch *ds, int port)
|
||||
{
|
||||
struct ksz_device *dev = ds->priv;
|
||||
|
@ -3475,15 +3534,35 @@ static bool ksz_support_eee(struct dsa_switch *ds, int port)
|
|||
|
||||
switch (dev->chip_id) {
|
||||
case KSZ8563_CHIP_ID:
|
||||
case KSZ8567_CHIP_ID:
|
||||
case KSZ9477_CHIP_ID:
|
||||
case KSZ9563_CHIP_ID:
|
||||
case KSZ9567_CHIP_ID:
|
||||
case KSZ9893_CHIP_ID:
|
||||
return true;
|
||||
case KSZ8567_CHIP_ID:
|
||||
/* KSZ8567R Errata DS80000752C Module 4 */
|
||||
case KSZ8765_CHIP_ID:
|
||||
case KSZ8794_CHIP_ID:
|
||||
case KSZ8795_CHIP_ID:
|
||||
/* KSZ879x/KSZ877x/KSZ876x Errata DS80000687C Module 2 */
|
||||
case KSZ9477_CHIP_ID:
|
||||
/* KSZ9477S Errata DS80000754A Module 4 */
|
||||
case KSZ9567_CHIP_ID:
|
||||
/* KSZ9567S Errata DS80000756A Module 4 */
|
||||
case KSZ9896_CHIP_ID:
|
||||
/* KSZ9896C Errata DS80000757A Module 3 */
|
||||
case KSZ9897_CHIP_ID:
|
||||
case LAN9646_CHIP_ID:
|
||||
return true;
|
||||
/* KSZ9897R Errata DS80000758C Module 4 */
|
||||
/* Energy Efficient Ethernet (EEE) feature select must be
|
||||
* manually disabled
|
||||
* The EEE feature is enabled by default, but it is not fully
|
||||
* operational. It must be manually disabled through register
|
||||
* controls. If not disabled, the PHY ports can auto-negotiate
|
||||
* to enable EEE, and this feature can cause link drops when
|
||||
* linked to another device supporting EEE.
|
||||
*
|
||||
* The same item appears in the errata for all switches above.
|
||||
*/
|
||||
break;
|
||||
}
|
||||
|
||||
return false;
|
||||
|
|
|
@ -2081,6 +2081,7 @@ static void sja1105_bridge_stp_state_set(struct dsa_switch *ds, int port,
|
|||
switch (state) {
|
||||
case BR_STATE_DISABLED:
|
||||
case BR_STATE_BLOCKING:
|
||||
case BR_STATE_LISTENING:
|
||||
/* From UM10944 description of DRPDTAG (why put this there?):
|
||||
* "Management traffic flows to the port regardless of the state
|
||||
* of the INGRESS flag". So BPDUs are still be allowed to pass.
|
||||
|
@ -2090,11 +2091,6 @@ static void sja1105_bridge_stp_state_set(struct dsa_switch *ds, int port,
|
|||
mac[port].egress = false;
|
||||
mac[port].dyn_learn = false;
|
||||
break;
|
||||
case BR_STATE_LISTENING:
|
||||
mac[port].ingress = true;
|
||||
mac[port].egress = false;
|
||||
mac[port].dyn_learn = false;
|
||||
break;
|
||||
case BR_STATE_LEARNING:
|
||||
mac[port].ingress = true;
|
||||
mac[port].egress = false;
|
||||
|
|
|
@ -14013,13 +14013,28 @@ static void bnxt_unlock_sp(struct bnxt *bp)
|
|||
netdev_unlock(bp->dev);
|
||||
}
|
||||
|
||||
/* Same as bnxt_lock_sp() with additional rtnl_lock */
|
||||
static void bnxt_rtnl_lock_sp(struct bnxt *bp)
|
||||
{
|
||||
clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
|
||||
rtnl_lock();
|
||||
netdev_lock(bp->dev);
|
||||
}
|
||||
|
||||
static void bnxt_rtnl_unlock_sp(struct bnxt *bp)
|
||||
{
|
||||
set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
|
||||
netdev_unlock(bp->dev);
|
||||
rtnl_unlock();
|
||||
}
|
||||
|
||||
/* Only called from bnxt_sp_task() */
|
||||
static void bnxt_reset(struct bnxt *bp, bool silent)
|
||||
{
|
||||
bnxt_lock_sp(bp);
|
||||
bnxt_rtnl_lock_sp(bp);
|
||||
if (test_bit(BNXT_STATE_OPEN, &bp->state))
|
||||
bnxt_reset_task(bp, silent);
|
||||
bnxt_unlock_sp(bp);
|
||||
bnxt_rtnl_unlock_sp(bp);
|
||||
}
|
||||
|
||||
/* Only called from bnxt_sp_task() */
|
||||
|
@ -14027,9 +14042,9 @@ static void bnxt_rx_ring_reset(struct bnxt *bp)
|
|||
{
|
||||
int i;
|
||||
|
||||
bnxt_lock_sp(bp);
|
||||
bnxt_rtnl_lock_sp(bp);
|
||||
if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
|
||||
bnxt_unlock_sp(bp);
|
||||
bnxt_rtnl_unlock_sp(bp);
|
||||
return;
|
||||
}
|
||||
/* Disable and flush TPA before resetting the RX ring */
|
||||
|
@ -14068,7 +14083,7 @@ static void bnxt_rx_ring_reset(struct bnxt *bp)
|
|||
}
|
||||
if (bp->flags & BNXT_FLAG_TPA)
|
||||
bnxt_set_tpa(bp, true);
|
||||
bnxt_unlock_sp(bp);
|
||||
bnxt_rtnl_unlock_sp(bp);
|
||||
}
|
||||
|
||||
static void bnxt_fw_fatal_close(struct bnxt *bp)
|
||||
|
@ -14960,15 +14975,17 @@ static void bnxt_fw_reset_task(struct work_struct *work)
|
|||
bp->fw_reset_state = BNXT_FW_RESET_STATE_OPENING;
|
||||
fallthrough;
|
||||
case BNXT_FW_RESET_STATE_OPENING:
|
||||
while (!netdev_trylock(bp->dev)) {
|
||||
while (!rtnl_trylock()) {
|
||||
bnxt_queue_fw_reset_work(bp, HZ / 10);
|
||||
return;
|
||||
}
|
||||
netdev_lock(bp->dev);
|
||||
rc = bnxt_open(bp->dev);
|
||||
if (rc) {
|
||||
netdev_err(bp->dev, "bnxt_open() failed during FW reset\n");
|
||||
bnxt_fw_reset_abort(bp, rc);
|
||||
netdev_unlock(bp->dev);
|
||||
rtnl_unlock();
|
||||
goto ulp_start;
|
||||
}
|
||||
|
||||
|
@ -14988,6 +15005,7 @@ static void bnxt_fw_reset_task(struct work_struct *work)
|
|||
bnxt_dl_health_fw_status_update(bp, true);
|
||||
}
|
||||
netdev_unlock(bp->dev);
|
||||
rtnl_unlock();
|
||||
bnxt_ulp_start(bp, 0);
|
||||
bnxt_reenable_sriov(bp);
|
||||
netdev_lock(bp->dev);
|
||||
|
@ -15936,7 +15954,7 @@ err_reset:
|
|||
rc);
|
||||
napi_enable_locked(&bnapi->napi);
|
||||
bnxt_db_nq_arm(bp, &cpr->cp_db, cpr->cp_raw_cons);
|
||||
bnxt_reset_task(bp, true);
|
||||
netif_close(dev);
|
||||
return rc;
|
||||
}
|
||||
|
||||
|
@ -16752,6 +16770,7 @@ static int bnxt_resume(struct device *device)
|
|||
struct bnxt *bp = netdev_priv(dev);
|
||||
int rc = 0;
|
||||
|
||||
rtnl_lock();
|
||||
netdev_lock(dev);
|
||||
rc = pci_enable_device(bp->pdev);
|
||||
if (rc) {
|
||||
|
@ -16796,6 +16815,7 @@ static int bnxt_resume(struct device *device)
|
|||
|
||||
resume_exit:
|
||||
netdev_unlock(bp->dev);
|
||||
rtnl_unlock();
|
||||
bnxt_ulp_start(bp, rc);
|
||||
if (!rc)
|
||||
bnxt_reenable_sriov(bp);
|
||||
|
@ -16961,6 +16981,7 @@ static void bnxt_io_resume(struct pci_dev *pdev)
|
|||
int err;
|
||||
|
||||
netdev_info(bp->dev, "PCI Slot Resume\n");
|
||||
rtnl_lock();
|
||||
netdev_lock(netdev);
|
||||
|
||||
err = bnxt_hwrm_func_qcaps(bp);
|
||||
|
@ -16978,6 +16999,7 @@ static void bnxt_io_resume(struct pci_dev *pdev)
|
|||
netif_device_attach(netdev);
|
||||
|
||||
netdev_unlock(netdev);
|
||||
rtnl_unlock();
|
||||
bnxt_ulp_start(bp, err);
|
||||
if (!err)
|
||||
bnxt_reenable_sriov(bp);
|
||||
|
|
|
@ -997,22 +997,15 @@ static void macb_update_stats(struct macb *bp)
|
|||
|
||||
static int macb_halt_tx(struct macb *bp)
|
||||
{
|
||||
unsigned long halt_time, timeout;
|
||||
u32 status;
|
||||
u32 status;
|
||||
|
||||
macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(THALT));
|
||||
|
||||
timeout = jiffies + usecs_to_jiffies(MACB_HALT_TIMEOUT);
|
||||
do {
|
||||
halt_time = jiffies;
|
||||
status = macb_readl(bp, TSR);
|
||||
if (!(status & MACB_BIT(TGO)))
|
||||
return 0;
|
||||
|
||||
udelay(250);
|
||||
} while (time_before(halt_time, timeout));
|
||||
|
||||
return -ETIMEDOUT;
|
||||
/* Poll TSR until TGO is cleared or timeout. */
|
||||
return read_poll_timeout_atomic(macb_readl, status,
|
||||
!(status & MACB_BIT(TGO)),
|
||||
250, MACB_HALT_TIMEOUT, false,
|
||||
bp, TSR);
|
||||
}
|
||||
|
||||
static void macb_tx_unmap(struct macb *bp, struct macb_tx_skb *tx_skb, int budget)
|
||||
|
|
|
@ -67,6 +67,8 @@
|
|||
#define TSNEP_TX_TYPE_XDP_NDO_MAP_PAGE (TSNEP_TX_TYPE_XDP_NDO | TSNEP_TX_TYPE_MAP_PAGE)
|
||||
#define TSNEP_TX_TYPE_XDP (TSNEP_TX_TYPE_XDP_TX | TSNEP_TX_TYPE_XDP_NDO)
|
||||
#define TSNEP_TX_TYPE_XSK BIT(12)
|
||||
#define TSNEP_TX_TYPE_TSTAMP BIT(13)
|
||||
#define TSNEP_TX_TYPE_SKB_TSTAMP (TSNEP_TX_TYPE_SKB | TSNEP_TX_TYPE_TSTAMP)
|
||||
|
||||
#define TSNEP_XDP_TX BIT(0)
|
||||
#define TSNEP_XDP_REDIRECT BIT(1)
|
||||
|
@ -386,8 +388,7 @@ static void tsnep_tx_activate(struct tsnep_tx *tx, int index, int length,
|
|||
if (entry->skb) {
|
||||
entry->properties = length & TSNEP_DESC_LENGTH_MASK;
|
||||
entry->properties |= TSNEP_DESC_INTERRUPT_FLAG;
|
||||
if ((entry->type & TSNEP_TX_TYPE_SKB) &&
|
||||
(skb_shinfo(entry->skb)->tx_flags & SKBTX_IN_PROGRESS))
|
||||
if ((entry->type & TSNEP_TX_TYPE_SKB_TSTAMP) == TSNEP_TX_TYPE_SKB_TSTAMP)
|
||||
entry->properties |= TSNEP_DESC_EXTENDED_WRITEBACK_FLAG;
|
||||
|
||||
/* toggle user flag to prevent false acknowledge
|
||||
|
@ -479,7 +480,8 @@ static int tsnep_tx_map_frag(skb_frag_t *frag, struct tsnep_tx_entry *entry,
|
|||
return mapped;
|
||||
}
|
||||
|
||||
static int tsnep_tx_map(struct sk_buff *skb, struct tsnep_tx *tx, int count)
|
||||
static int tsnep_tx_map(struct sk_buff *skb, struct tsnep_tx *tx, int count,
|
||||
bool do_tstamp)
|
||||
{
|
||||
struct device *dmadev = tx->adapter->dmadev;
|
||||
struct tsnep_tx_entry *entry;
|
||||
|
@ -505,6 +507,9 @@ static int tsnep_tx_map(struct sk_buff *skb, struct tsnep_tx *tx, int count)
|
|||
entry->type = TSNEP_TX_TYPE_SKB_INLINE;
|
||||
mapped = 0;
|
||||
}
|
||||
|
||||
if (do_tstamp)
|
||||
entry->type |= TSNEP_TX_TYPE_TSTAMP;
|
||||
} else {
|
||||
skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1];
|
||||
|
||||
|
@ -558,11 +563,12 @@ static int tsnep_tx_unmap(struct tsnep_tx *tx, int index, int count)
|
|||
static netdev_tx_t tsnep_xmit_frame_ring(struct sk_buff *skb,
|
||||
struct tsnep_tx *tx)
|
||||
{
|
||||
int count = 1;
|
||||
struct tsnep_tx_entry *entry;
|
||||
bool do_tstamp = false;
|
||||
int count = 1;
|
||||
int length;
|
||||
int i;
|
||||
int retval;
|
||||
int i;
|
||||
|
||||
if (skb_shinfo(skb)->nr_frags > 0)
|
||||
count += skb_shinfo(skb)->nr_frags;
|
||||
|
@ -579,7 +585,13 @@ static netdev_tx_t tsnep_xmit_frame_ring(struct sk_buff *skb,
|
|||
entry = &tx->entry[tx->write];
|
||||
entry->skb = skb;
|
||||
|
||||
retval = tsnep_tx_map(skb, tx, count);
|
||||
if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
|
||||
tx->adapter->hwtstamp_config.tx_type == HWTSTAMP_TX_ON) {
|
||||
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
|
||||
do_tstamp = true;
|
||||
}
|
||||
|
||||
retval = tsnep_tx_map(skb, tx, count, do_tstamp);
|
||||
if (retval < 0) {
|
||||
tsnep_tx_unmap(tx, tx->write, count);
|
||||
dev_kfree_skb_any(entry->skb);
|
||||
|
@ -591,9 +603,6 @@ static netdev_tx_t tsnep_xmit_frame_ring(struct sk_buff *skb,
|
|||
}
|
||||
length = retval;
|
||||
|
||||
if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)
|
||||
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
|
||||
|
||||
for (i = 0; i < count; i++)
|
||||
tsnep_tx_activate(tx, (tx->write + i) & TSNEP_RING_MASK, length,
|
||||
i == count - 1);
|
||||
|
@ -844,8 +853,7 @@ static bool tsnep_tx_poll(struct tsnep_tx *tx, int napi_budget)
|
|||
|
||||
length = tsnep_tx_unmap(tx, tx->read, count);
|
||||
|
||||
if ((entry->type & TSNEP_TX_TYPE_SKB) &&
|
||||
(skb_shinfo(entry->skb)->tx_flags & SKBTX_IN_PROGRESS) &&
|
||||
if (((entry->type & TSNEP_TX_TYPE_SKB_TSTAMP) == TSNEP_TX_TYPE_SKB_TSTAMP) &&
|
||||
(__le32_to_cpu(entry->desc_wb->properties) &
|
||||
TSNEP_DESC_EXTENDED_WRITEBACK_FLAG)) {
|
||||
struct skb_shared_hwtstamps hwtstamps;
|
||||
|
|
|
@ -717,6 +717,11 @@ int cgx_get_rx_stats(void *cgxd, int lmac_id, int idx, u64 *rx_stat)
|
|||
|
||||
if (!is_lmac_valid(cgx, lmac_id))
|
||||
return -ENODEV;
|
||||
|
||||
/* pass lmac as 0 for CGX_CMR_RX_STAT9-12 */
|
||||
if (idx >= CGX_RX_STAT_GLOBAL_INDEX)
|
||||
lmac_id = 0;
|
||||
|
||||
*rx_stat = cgx_read(cgx, lmac_id, CGXX_CMRX_RX_STAT0 + (idx * 8));
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -531,7 +531,8 @@ static int cn10k_mcs_write_tx_secy(struct otx2_nic *pfvf,
|
|||
if (sw_tx_sc->encrypt)
|
||||
sectag_tci |= (MCS_TCI_E | MCS_TCI_C);
|
||||
|
||||
policy = FIELD_PREP(MCS_TX_SECY_PLCY_MTU, secy->netdev->mtu);
|
||||
policy = FIELD_PREP(MCS_TX_SECY_PLCY_MTU,
|
||||
pfvf->netdev->mtu + OTX2_ETH_HLEN);
|
||||
/* Write SecTag excluding AN bits(1..0) */
|
||||
policy |= FIELD_PREP(MCS_TX_SECY_PLCY_ST_TCI, sectag_tci >> 2);
|
||||
policy |= FIELD_PREP(MCS_TX_SECY_PLCY_ST_OFFSET, tag_offset);
|
||||
|
|
|
@ -356,6 +356,7 @@ struct otx2_flow_config {
|
|||
struct list_head flow_list_tc;
|
||||
u8 ucast_flt_cnt;
|
||||
bool ntuple;
|
||||
u16 ntuple_cnt;
|
||||
};
|
||||
|
||||
struct dev_hw_ops {
|
||||
|
|
|
@ -41,6 +41,7 @@ static int otx2_dl_mcam_count_set(struct devlink *devlink, u32 id,
|
|||
if (!pfvf->flow_cfg)
|
||||
return 0;
|
||||
|
||||
pfvf->flow_cfg->ntuple_cnt = ctx->val.vu16;
|
||||
otx2_alloc_mcam_entries(pfvf, ctx->val.vu16);
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -315,7 +315,7 @@ static void otx2_get_pauseparam(struct net_device *netdev,
|
|||
struct otx2_nic *pfvf = netdev_priv(netdev);
|
||||
struct cgx_pause_frm_cfg *req, *rsp;
|
||||
|
||||
if (is_otx2_lbkvf(pfvf->pdev))
|
||||
if (is_otx2_lbkvf(pfvf->pdev) || is_otx2_sdp_rep(pfvf->pdev))
|
||||
return;
|
||||
|
||||
mutex_lock(&pfvf->mbox.lock);
|
||||
|
@ -347,7 +347,7 @@ static int otx2_set_pauseparam(struct net_device *netdev,
|
|||
if (pause->autoneg)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (is_otx2_lbkvf(pfvf->pdev))
|
||||
if (is_otx2_lbkvf(pfvf->pdev) || is_otx2_sdp_rep(pfvf->pdev))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (pause->rx_pause)
|
||||
|
@ -941,8 +941,8 @@ static u32 otx2_get_link(struct net_device *netdev)
|
|||
{
|
||||
struct otx2_nic *pfvf = netdev_priv(netdev);
|
||||
|
||||
/* LBK link is internal and always UP */
|
||||
if (is_otx2_lbkvf(pfvf->pdev))
|
||||
/* LBK and SDP links are internal and always UP */
|
||||
if (is_otx2_lbkvf(pfvf->pdev) || is_otx2_sdp_rep(pfvf->pdev))
|
||||
return 1;
|
||||
return pfvf->linfo.link_up;
|
||||
}
|
||||
|
@ -1413,7 +1413,7 @@ static int otx2vf_get_link_ksettings(struct net_device *netdev,
|
|||
{
|
||||
struct otx2_nic *pfvf = netdev_priv(netdev);
|
||||
|
||||
if (is_otx2_lbkvf(pfvf->pdev)) {
|
||||
if (is_otx2_lbkvf(pfvf->pdev) || is_otx2_sdp_rep(pfvf->pdev)) {
|
||||
cmd->base.duplex = DUPLEX_FULL;
|
||||
cmd->base.speed = SPEED_100000;
|
||||
} else {
|
||||
|
|
|
@ -247,7 +247,7 @@ int otx2_mcam_entry_init(struct otx2_nic *pfvf)
|
|||
mutex_unlock(&pfvf->mbox.lock);
|
||||
|
||||
/* Allocate entries for Ntuple filters */
|
||||
count = otx2_alloc_mcam_entries(pfvf, OTX2_DEFAULT_FLOWCOUNT);
|
||||
count = otx2_alloc_mcam_entries(pfvf, flow_cfg->ntuple_cnt);
|
||||
if (count <= 0) {
|
||||
otx2_clear_ntuple_flow_info(pfvf, flow_cfg);
|
||||
return 0;
|
||||
|
@ -307,6 +307,7 @@ int otx2_mcam_flow_init(struct otx2_nic *pf)
|
|||
INIT_LIST_HEAD(&pf->flow_cfg->flow_list_tc);
|
||||
|
||||
pf->flow_cfg->ucast_flt_cnt = OTX2_DEFAULT_UNICAST_FLOWS;
|
||||
pf->flow_cfg->ntuple_cnt = OTX2_DEFAULT_FLOWCOUNT;
|
||||
|
||||
/* Allocate bare minimum number of MCAM entries needed for
|
||||
* unicast and ntuple filters.
|
||||
|
|
|
@ -4748,7 +4748,7 @@ static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
|
|||
}
|
||||
|
||||
if (mtk_is_netsys_v3_or_greater(mac->hw) &&
|
||||
MTK_HAS_CAPS(mac->hw->soc->caps, MTK_ESW_BIT) &&
|
||||
MTK_HAS_CAPS(mac->hw->soc->caps, MTK_ESW) &&
|
||||
id == MTK_GMAC1_ID) {
|
||||
mac->phylink_config.mac_capabilities = MAC_ASYM_PAUSE |
|
||||
MAC_SYM_PAUSE |
|
||||
|
|
|
@ -4349,6 +4349,10 @@ static netdev_features_t mlx5e_fix_uplink_rep_features(struct net_device *netdev
|
|||
if (netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER)
|
||||
netdev_warn(netdev, "Disabling HW_VLAN CTAG FILTERING, not supported in switchdev mode\n");
|
||||
|
||||
features &= ~NETIF_F_HW_MACSEC;
|
||||
if (netdev->features & NETIF_F_HW_MACSEC)
|
||||
netdev_warn(netdev, "Disabling HW MACsec offload, not supported in switchdev mode\n");
|
||||
|
||||
return features;
|
||||
}
|
||||
|
||||
|
|
|
@ -3014,6 +3014,9 @@ static int mlxsw_sp_neigh_rif_made_sync(struct mlxsw_sp *mlxsw_sp,
|
|||
.rif = rif,
|
||||
};
|
||||
|
||||
if (!mlxsw_sp_dev_lower_is_port(mlxsw_sp_rif_dev(rif)))
|
||||
return 0;
|
||||
|
||||
neigh_for_each(&arp_tbl, mlxsw_sp_neigh_rif_made_sync_each, &rms);
|
||||
if (rms.err)
|
||||
goto err_arp;
|
||||
|
|
|
@ -203,7 +203,7 @@ static struct pci_driver qede_pci_driver = {
|
|||
};
|
||||
|
||||
static struct qed_eth_cb_ops qede_ll_ops = {
|
||||
{
|
||||
.common = {
|
||||
#ifdef CONFIG_RFS_ACCEL
|
||||
.arfs_filter_op = qede_arfs_filter_op,
|
||||
#endif
|
||||
|
|
|
@ -1484,8 +1484,11 @@ static int qlcnic_sriov_channel_cfg_cmd(struct qlcnic_adapter *adapter, u8 cmd_o
|
|||
}
|
||||
|
||||
cmd_op = (cmd.rsp.arg[0] & 0xff);
|
||||
if (cmd.rsp.arg[0] >> 25 == 2)
|
||||
return 2;
|
||||
if (cmd.rsp.arg[0] >> 25 == 2) {
|
||||
ret = 2;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (cmd_op == QLCNIC_BC_CMD_CHANNEL_INIT)
|
||||
set_bit(QLC_BC_VF_STATE, &vf->state);
|
||||
else
|
||||
|
|
|
@ -434,14 +434,20 @@ static int wx_host_interface_command_r(struct wx *wx, u32 *buffer,
|
|||
wr32m(wx, WX_SW2FW_MBOX_CMD, WX_SW2FW_MBOX_CMD_VLD, WX_SW2FW_MBOX_CMD_VLD);
|
||||
|
||||
/* polling reply from FW */
|
||||
err = read_poll_timeout(wx_poll_fw_reply, reply, reply, 1000, 50000,
|
||||
true, wx, buffer, send_cmd);
|
||||
err = read_poll_timeout(wx_poll_fw_reply, reply, reply, 2000,
|
||||
timeout * 1000, true, wx, buffer, send_cmd);
|
||||
if (err) {
|
||||
wx_err(wx, "Polling from FW messages timeout, cmd: 0x%x, index: %d\n",
|
||||
send_cmd, wx->swfw_index);
|
||||
goto rel_out;
|
||||
}
|
||||
|
||||
if (hdr->cmd_or_resp.ret_status == 0x80) {
|
||||
wx_err(wx, "Unknown FW command: 0x%x\n", send_cmd);
|
||||
err = -EINVAL;
|
||||
goto rel_out;
|
||||
}
|
||||
|
||||
/* expect no reply from FW then return */
|
||||
if (!return_data)
|
||||
goto rel_out;
|
||||
|
|
|
@ -99,9 +99,15 @@ static int txgbe_calc_eeprom_checksum(struct wx *wx, u16 *checksum)
|
|||
}
|
||||
local_buffer = eeprom_ptrs;
|
||||
|
||||
for (i = 0; i < TXGBE_EEPROM_LAST_WORD; i++)
|
||||
for (i = 0; i < TXGBE_EEPROM_LAST_WORD; i++) {
|
||||
if (wx->mac.type == wx_mac_aml) {
|
||||
if (i >= TXGBE_EEPROM_I2C_SRART_PTR &&
|
||||
i < TXGBE_EEPROM_I2C_END_PTR)
|
||||
local_buffer[i] = 0xffff;
|
||||
}
|
||||
if (i != wx->eeprom.sw_region_offset + TXGBE_EEPROM_CHECKSUM)
|
||||
*checksum += local_buffer[i];
|
||||
}
|
||||
|
||||
kvfree(eeprom_ptrs);
|
||||
|
||||
|
|
|
@ -158,6 +158,8 @@
|
|||
#define TXGBE_EEPROM_VERSION_L 0x1D
|
||||
#define TXGBE_EEPROM_VERSION_H 0x1E
|
||||
#define TXGBE_ISCSI_BOOT_CONFIG 0x07
|
||||
#define TXGBE_EEPROM_I2C_SRART_PTR 0x580
|
||||
#define TXGBE_EEPROM_I2C_END_PTR 0x800
|
||||
|
||||
#define TXGBE_MAX_MSIX_VECTORS 64
|
||||
#define TXGBE_MAX_FDIR_INDICES 63
|
||||
|
|
|
@ -158,7 +158,6 @@ struct hv_netvsc_packet {
|
|||
u8 cp_partial; /* partial copy into send buffer */
|
||||
|
||||
u8 rmsg_size; /* RNDIS header and PPI size */
|
||||
u8 rmsg_pgcnt; /* page count of RNDIS header and PPI */
|
||||
u8 page_buf_cnt;
|
||||
|
||||
u16 q_idx;
|
||||
|
@ -893,6 +892,18 @@ struct nvsp_message {
|
|||
sizeof(struct nvsp_message))
|
||||
#define NETVSC_MIN_IN_MSG_SIZE sizeof(struct vmpacket_descriptor)
|
||||
|
||||
/* Maximum # of contiguous data ranges that can make up a trasmitted packet.
|
||||
* Typically it's the max SKB fragments plus 2 for the rndis packet and the
|
||||
* linear portion of the SKB. But if MAX_SKB_FRAGS is large, the value may
|
||||
* need to be limited to MAX_PAGE_BUFFER_COUNT, which is the max # of entries
|
||||
* in a GPA direct packet sent to netvsp over VMBus.
|
||||
*/
|
||||
#if MAX_SKB_FRAGS + 2 < MAX_PAGE_BUFFER_COUNT
|
||||
#define MAX_DATA_RANGES (MAX_SKB_FRAGS + 2)
|
||||
#else
|
||||
#define MAX_DATA_RANGES MAX_PAGE_BUFFER_COUNT
|
||||
#endif
|
||||
|
||||
/* Estimated requestor size:
|
||||
* out_ring_size/min_out_msg_size + in_ring_size/min_in_msg_size
|
||||
*/
|
||||
|
|
|
@ -953,8 +953,7 @@ static void netvsc_copy_to_send_buf(struct netvsc_device *net_device,
|
|||
+ pend_size;
|
||||
int i;
|
||||
u32 padding = 0;
|
||||
u32 page_count = packet->cp_partial ? packet->rmsg_pgcnt :
|
||||
packet->page_buf_cnt;
|
||||
u32 page_count = packet->cp_partial ? 1 : packet->page_buf_cnt;
|
||||
u32 remain;
|
||||
|
||||
/* Add padding */
|
||||
|
@ -1055,6 +1054,42 @@ static int netvsc_dma_map(struct hv_device *hv_dev,
|
|||
return 0;
|
||||
}
|
||||
|
||||
/* Build an "array" of mpb entries describing the data to be transferred
|
||||
* over VMBus. After the desc header fields, each "array" entry is variable
|
||||
* size, and each entry starts after the end of the previous entry. The
|
||||
* "offset" and "len" fields for each entry imply the size of the entry.
|
||||
*
|
||||
* The pfns are in HV_HYP_PAGE_SIZE, because all communication with Hyper-V
|
||||
* uses that granularity, even if the system page size of the guest is larger.
|
||||
* Each entry in the input "pb" array must describe a contiguous range of
|
||||
* guest physical memory so that the pfns are sequential if the range crosses
|
||||
* a page boundary. The offset field must be < HV_HYP_PAGE_SIZE.
|
||||
*/
|
||||
static inline void netvsc_build_mpb_array(struct hv_page_buffer *pb,
|
||||
u32 page_buffer_count,
|
||||
struct vmbus_packet_mpb_array *desc,
|
||||
u32 *desc_size)
|
||||
{
|
||||
struct hv_mpb_array *mpb_entry = &desc->range;
|
||||
int i, j;
|
||||
|
||||
for (i = 0; i < page_buffer_count; i++) {
|
||||
u32 offset = pb[i].offset;
|
||||
u32 len = pb[i].len;
|
||||
|
||||
mpb_entry->offset = offset;
|
||||
mpb_entry->len = len;
|
||||
|
||||
for (j = 0; j < HVPFN_UP(offset + len); j++)
|
||||
mpb_entry->pfn_array[j] = pb[i].pfn + j;
|
||||
|
||||
mpb_entry = (struct hv_mpb_array *)&mpb_entry->pfn_array[j];
|
||||
}
|
||||
|
||||
desc->rangecount = page_buffer_count;
|
||||
*desc_size = (char *)mpb_entry - (char *)desc;
|
||||
}
|
||||
|
||||
static inline int netvsc_send_pkt(
|
||||
struct hv_device *device,
|
||||
struct hv_netvsc_packet *packet,
|
||||
|
@ -1097,8 +1132,11 @@ static inline int netvsc_send_pkt(
|
|||
|
||||
packet->dma_range = NULL;
|
||||
if (packet->page_buf_cnt) {
|
||||
struct vmbus_channel_packet_page_buffer desc;
|
||||
u32 desc_size;
|
||||
|
||||
if (packet->cp_partial)
|
||||
pb += packet->rmsg_pgcnt;
|
||||
pb++;
|
||||
|
||||
ret = netvsc_dma_map(ndev_ctx->device_ctx, packet, pb);
|
||||
if (ret) {
|
||||
|
@ -1106,11 +1144,12 @@ static inline int netvsc_send_pkt(
|
|||
goto exit;
|
||||
}
|
||||
|
||||
ret = vmbus_sendpacket_pagebuffer(out_channel,
|
||||
pb, packet->page_buf_cnt,
|
||||
&nvmsg, sizeof(nvmsg),
|
||||
req_id);
|
||||
|
||||
netvsc_build_mpb_array(pb, packet->page_buf_cnt,
|
||||
(struct vmbus_packet_mpb_array *)&desc,
|
||||
&desc_size);
|
||||
ret = vmbus_sendpacket_mpb_desc(out_channel,
|
||||
(struct vmbus_packet_mpb_array *)&desc,
|
||||
desc_size, &nvmsg, sizeof(nvmsg), req_id);
|
||||
if (ret)
|
||||
netvsc_dma_unmap(ndev_ctx->device_ctx, packet);
|
||||
} else {
|
||||
|
@ -1259,7 +1298,7 @@ int netvsc_send(struct net_device *ndev,
|
|||
packet->send_buf_index = section_index;
|
||||
|
||||
if (packet->cp_partial) {
|
||||
packet->page_buf_cnt -= packet->rmsg_pgcnt;
|
||||
packet->page_buf_cnt--;
|
||||
packet->total_data_buflen = msd_len + packet->rmsg_size;
|
||||
} else {
|
||||
packet->page_buf_cnt = 0;
|
||||
|
|
|
@ -326,43 +326,10 @@ static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb,
|
|||
return txq;
|
||||
}
|
||||
|
||||
static u32 fill_pg_buf(unsigned long hvpfn, u32 offset, u32 len,
|
||||
struct hv_page_buffer *pb)
|
||||
{
|
||||
int j = 0;
|
||||
|
||||
hvpfn += offset >> HV_HYP_PAGE_SHIFT;
|
||||
offset = offset & ~HV_HYP_PAGE_MASK;
|
||||
|
||||
while (len > 0) {
|
||||
unsigned long bytes;
|
||||
|
||||
bytes = HV_HYP_PAGE_SIZE - offset;
|
||||
if (bytes > len)
|
||||
bytes = len;
|
||||
pb[j].pfn = hvpfn;
|
||||
pb[j].offset = offset;
|
||||
pb[j].len = bytes;
|
||||
|
||||
offset += bytes;
|
||||
len -= bytes;
|
||||
|
||||
if (offset == HV_HYP_PAGE_SIZE && len) {
|
||||
hvpfn++;
|
||||
offset = 0;
|
||||
j++;
|
||||
}
|
||||
}
|
||||
|
||||
return j + 1;
|
||||
}
|
||||
|
||||
static u32 init_page_array(void *hdr, u32 len, struct sk_buff *skb,
|
||||
struct hv_netvsc_packet *packet,
|
||||
struct hv_page_buffer *pb)
|
||||
{
|
||||
u32 slots_used = 0;
|
||||
char *data = skb->data;
|
||||
int frags = skb_shinfo(skb)->nr_frags;
|
||||
int i;
|
||||
|
||||
|
@ -371,28 +338,27 @@ static u32 init_page_array(void *hdr, u32 len, struct sk_buff *skb,
|
|||
* 2. skb linear data
|
||||
* 3. skb fragment data
|
||||
*/
|
||||
slots_used += fill_pg_buf(virt_to_hvpfn(hdr),
|
||||
offset_in_hvpage(hdr),
|
||||
len,
|
||||
&pb[slots_used]);
|
||||
|
||||
pb[0].offset = offset_in_hvpage(hdr);
|
||||
pb[0].len = len;
|
||||
pb[0].pfn = virt_to_hvpfn(hdr);
|
||||
packet->rmsg_size = len;
|
||||
packet->rmsg_pgcnt = slots_used;
|
||||
|
||||
slots_used += fill_pg_buf(virt_to_hvpfn(data),
|
||||
offset_in_hvpage(data),
|
||||
skb_headlen(skb),
|
||||
&pb[slots_used]);
|
||||
pb[1].offset = offset_in_hvpage(skb->data);
|
||||
pb[1].len = skb_headlen(skb);
|
||||
pb[1].pfn = virt_to_hvpfn(skb->data);
|
||||
|
||||
for (i = 0; i < frags; i++) {
|
||||
skb_frag_t *frag = skb_shinfo(skb)->frags + i;
|
||||
struct hv_page_buffer *cur_pb = &pb[i + 2];
|
||||
u64 pfn = page_to_hvpfn(skb_frag_page(frag));
|
||||
u32 offset = skb_frag_off(frag);
|
||||
|
||||
slots_used += fill_pg_buf(page_to_hvpfn(skb_frag_page(frag)),
|
||||
skb_frag_off(frag),
|
||||
skb_frag_size(frag),
|
||||
&pb[slots_used]);
|
||||
cur_pb->offset = offset_in_hvpage(offset);
|
||||
cur_pb->len = skb_frag_size(frag);
|
||||
cur_pb->pfn = pfn + (offset >> HV_HYP_PAGE_SHIFT);
|
||||
}
|
||||
return slots_used;
|
||||
return frags + 2;
|
||||
}
|
||||
|
||||
static int count_skb_frag_slots(struct sk_buff *skb)
|
||||
|
@ -483,7 +449,7 @@ static int netvsc_xmit(struct sk_buff *skb, struct net_device *net, bool xdp_tx)
|
|||
struct net_device *vf_netdev;
|
||||
u32 rndis_msg_size;
|
||||
u32 hash;
|
||||
struct hv_page_buffer pb[MAX_PAGE_BUFFER_COUNT];
|
||||
struct hv_page_buffer pb[MAX_DATA_RANGES];
|
||||
|
||||
/* If VF is present and up then redirect packets to it.
|
||||
* Skip the VF if it is marked down or has no carrier.
|
||||
|
|
|
@ -225,8 +225,7 @@ static int rndis_filter_send_request(struct rndis_device *dev,
|
|||
struct rndis_request *req)
|
||||
{
|
||||
struct hv_netvsc_packet *packet;
|
||||
struct hv_page_buffer page_buf[2];
|
||||
struct hv_page_buffer *pb = page_buf;
|
||||
struct hv_page_buffer pb;
|
||||
int ret;
|
||||
|
||||
/* Setup the packet to send it */
|
||||
|
@ -235,27 +234,14 @@ static int rndis_filter_send_request(struct rndis_device *dev,
|
|||
packet->total_data_buflen = req->request_msg.msg_len;
|
||||
packet->page_buf_cnt = 1;
|
||||
|
||||
pb[0].pfn = virt_to_phys(&req->request_msg) >>
|
||||
HV_HYP_PAGE_SHIFT;
|
||||
pb[0].len = req->request_msg.msg_len;
|
||||
pb[0].offset = offset_in_hvpage(&req->request_msg);
|
||||
|
||||
/* Add one page_buf when request_msg crossing page boundary */
|
||||
if (pb[0].offset + pb[0].len > HV_HYP_PAGE_SIZE) {
|
||||
packet->page_buf_cnt++;
|
||||
pb[0].len = HV_HYP_PAGE_SIZE -
|
||||
pb[0].offset;
|
||||
pb[1].pfn = virt_to_phys((void *)&req->request_msg
|
||||
+ pb[0].len) >> HV_HYP_PAGE_SHIFT;
|
||||
pb[1].offset = 0;
|
||||
pb[1].len = req->request_msg.msg_len -
|
||||
pb[0].len;
|
||||
}
|
||||
pb.pfn = virt_to_phys(&req->request_msg) >> HV_HYP_PAGE_SHIFT;
|
||||
pb.len = req->request_msg.msg_len;
|
||||
pb.offset = offset_in_hvpage(&req->request_msg);
|
||||
|
||||
trace_rndis_send(dev->ndev, 0, &req->request_msg);
|
||||
|
||||
rcu_read_lock_bh();
|
||||
ret = netvsc_send(dev->ndev, packet, NULL, pb, NULL, false);
|
||||
ret = netvsc_send(dev->ndev, packet, NULL, &pb, NULL, false);
|
||||
rcu_read_unlock_bh();
|
||||
|
||||
return ret;
|
||||
|
|
|
@ -2027,12 +2027,6 @@ static int ksz9477_config_init(struct phy_device *phydev)
|
|||
return err;
|
||||
}
|
||||
|
||||
/* According to KSZ9477 Errata DS80000754C (Module 4) all EEE modes
|
||||
* in this switch shall be regarded as broken.
|
||||
*/
|
||||
if (phydev->dev_flags & MICREL_NO_EEE)
|
||||
phy_disable_eee(phydev);
|
||||
|
||||
return kszphy_config_init(phydev);
|
||||
}
|
||||
|
||||
|
@ -5705,7 +5699,6 @@ static struct phy_driver ksphy_driver[] = {
|
|||
.handle_interrupt = kszphy_handle_interrupt,
|
||||
.suspend = genphy_suspend,
|
||||
.resume = ksz9477_resume,
|
||||
.get_features = ksz9477_get_features,
|
||||
} };
|
||||
|
||||
module_phy_driver(ksphy_driver);
|
||||
|
|
|
@ -1011,6 +1011,7 @@ void mt76_dma_cleanup(struct mt76_dev *dev)
|
|||
int i;
|
||||
|
||||
mt76_worker_disable(&dev->tx_worker);
|
||||
napi_disable(&dev->tx_napi);
|
||||
netif_napi_del(&dev->tx_napi);
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(dev->phys); i++) {
|
||||
|
|
|
@ -1924,14 +1924,14 @@ mt7925_mcu_sta_cmd(struct mt76_phy *phy,
|
|||
mt7925_mcu_sta_mld_tlv(skb, info->vif, info->link_sta->sta);
|
||||
mt7925_mcu_sta_eht_mld_tlv(skb, info->vif, info->link_sta->sta);
|
||||
}
|
||||
|
||||
mt7925_mcu_sta_hdr_trans_tlv(skb, info->vif, info->link_sta);
|
||||
}
|
||||
|
||||
if (!info->enable) {
|
||||
mt7925_mcu_sta_remove_tlv(skb);
|
||||
mt76_connac_mcu_add_tlv(skb, STA_REC_MLD_OFF,
|
||||
sizeof(struct tlv));
|
||||
} else {
|
||||
mt7925_mcu_sta_hdr_trans_tlv(skb, info->vif, info->link_sta);
|
||||
}
|
||||
|
||||
return mt76_mcu_skb_send_msg(dev, skb, info->cmd, true);
|
||||
|
|
|
@ -1819,6 +1819,7 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
|
|||
return SCSI_MLQUEUE_DEVICE_BUSY;
|
||||
}
|
||||
|
||||
payload->rangecount = 1;
|
||||
payload->range.len = length;
|
||||
payload->range.offset = offset_in_hvpg;
|
||||
|
||||
|
|
|
@ -1167,13 +1167,6 @@ extern int vmbus_sendpacket(struct vmbus_channel *channel,
|
|||
enum vmbus_packet_type type,
|
||||
u32 flags);
|
||||
|
||||
extern int vmbus_sendpacket_pagebuffer(struct vmbus_channel *channel,
|
||||
struct hv_page_buffer pagebuffers[],
|
||||
u32 pagecount,
|
||||
void *buffer,
|
||||
u32 bufferlen,
|
||||
u64 requestid);
|
||||
|
||||
extern int vmbus_sendpacket_mpb_desc(struct vmbus_channel *channel,
|
||||
struct vmbus_packet_mpb_array *mpb,
|
||||
u32 desc_size,
|
||||
|
|
|
@ -44,7 +44,6 @@
|
|||
#define MICREL_PHY_50MHZ_CLK BIT(0)
|
||||
#define MICREL_PHY_FXEN BIT(1)
|
||||
#define MICREL_KSZ8_P1_ERRATA BIT(2)
|
||||
#define MICREL_NO_EEE BIT(3)
|
||||
|
||||
#define MICREL_KSZ9021_EXTREG_CTRL 0xB
|
||||
#define MICREL_KSZ9021_EXTREG_DATA_WRITE 0xC
|
||||
|
|
|
@ -1798,6 +1798,7 @@ struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
|
|||
void hci_uuids_clear(struct hci_dev *hdev);
|
||||
|
||||
void hci_link_keys_clear(struct hci_dev *hdev);
|
||||
u8 *hci_conn_key_enc_size(struct hci_conn *conn);
|
||||
struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr);
|
||||
struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
|
||||
bdaddr_t *bdaddr, u8 *val, u8 type,
|
||||
|
|
|
@ -1031,6 +1031,21 @@ static inline struct sk_buff *__qdisc_dequeue_head(struct qdisc_skb_head *qh)
|
|||
return skb;
|
||||
}
|
||||
|
||||
static inline struct sk_buff *qdisc_dequeue_internal(struct Qdisc *sch, bool direct)
|
||||
{
|
||||
struct sk_buff *skb;
|
||||
|
||||
skb = __skb_dequeue(&sch->gso_skb);
|
||||
if (skb) {
|
||||
sch->q.qlen--;
|
||||
return skb;
|
||||
}
|
||||
if (direct)
|
||||
return __qdisc_dequeue_head(&sch->q);
|
||||
else
|
||||
return sch->dequeue(sch);
|
||||
}
|
||||
|
||||
static inline struct sk_buff *qdisc_dequeue_head(struct Qdisc *sch)
|
||||
{
|
||||
struct sk_buff *skb = __qdisc_dequeue_head(&sch->q);
|
||||
|
|
|
@ -506,28 +506,32 @@ batadv_hardif_is_iface_up(const struct batadv_hard_iface *hard_iface)
|
|||
return false;
|
||||
}
|
||||
|
||||
static void batadv_check_known_mac_addr(const struct net_device *net_dev)
|
||||
static void batadv_check_known_mac_addr(const struct batadv_hard_iface *hard_iface)
|
||||
{
|
||||
const struct batadv_hard_iface *hard_iface;
|
||||
const struct net_device *mesh_iface = hard_iface->mesh_iface;
|
||||
const struct batadv_hard_iface *tmp_hard_iface;
|
||||
|
||||
rcu_read_lock();
|
||||
list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
|
||||
if (hard_iface->if_status != BATADV_IF_ACTIVE &&
|
||||
hard_iface->if_status != BATADV_IF_TO_BE_ACTIVATED)
|
||||
if (!mesh_iface)
|
||||
return;
|
||||
|
||||
list_for_each_entry(tmp_hard_iface, &batadv_hardif_list, list) {
|
||||
if (tmp_hard_iface == hard_iface)
|
||||
continue;
|
||||
|
||||
if (hard_iface->net_dev == net_dev)
|
||||
if (tmp_hard_iface->mesh_iface != mesh_iface)
|
||||
continue;
|
||||
|
||||
if (!batadv_compare_eth(hard_iface->net_dev->dev_addr,
|
||||
net_dev->dev_addr))
|
||||
if (tmp_hard_iface->if_status == BATADV_IF_NOT_IN_USE)
|
||||
continue;
|
||||
|
||||
if (!batadv_compare_eth(tmp_hard_iface->net_dev->dev_addr,
|
||||
hard_iface->net_dev->dev_addr))
|
||||
continue;
|
||||
|
||||
pr_warn("The newly added mac address (%pM) already exists on: %s\n",
|
||||
net_dev->dev_addr, hard_iface->net_dev->name);
|
||||
hard_iface->net_dev->dev_addr, tmp_hard_iface->net_dev->name);
|
||||
pr_warn("It is strongly recommended to keep mac addresses unique to avoid problems!\n");
|
||||
}
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -763,6 +767,8 @@ int batadv_hardif_enable_interface(struct batadv_hard_iface *hard_iface,
|
|||
hard_iface->net_dev->name, hardif_mtu,
|
||||
required_mtu);
|
||||
|
||||
batadv_check_known_mac_addr(hard_iface);
|
||||
|
||||
if (batadv_hardif_is_iface_up(hard_iface))
|
||||
batadv_hardif_activate_interface(hard_iface);
|
||||
else
|
||||
|
@ -901,7 +907,6 @@ batadv_hardif_add_interface(struct net_device *net_dev)
|
|||
|
||||
batadv_v_hardif_init(hard_iface);
|
||||
|
||||
batadv_check_known_mac_addr(hard_iface->net_dev);
|
||||
kref_get(&hard_iface->refcount);
|
||||
list_add_tail_rcu(&hard_iface->list, &batadv_hardif_list);
|
||||
batadv_hardif_generation++;
|
||||
|
@ -988,7 +993,7 @@ static int batadv_hard_if_event(struct notifier_block *this,
|
|||
if (hard_iface->if_status == BATADV_IF_NOT_IN_USE)
|
||||
goto hardif_put;
|
||||
|
||||
batadv_check_known_mac_addr(hard_iface->net_dev);
|
||||
batadv_check_known_mac_addr(hard_iface);
|
||||
|
||||
bat_priv = netdev_priv(hard_iface->mesh_iface);
|
||||
bat_priv->algo_ops->iface.update_mac(hard_iface);
|
||||
|
|
|
@ -3023,3 +3023,27 @@ void hci_conn_tx_dequeue(struct hci_conn *conn)
|
|||
|
||||
kfree_skb(skb);
|
||||
}
|
||||
|
||||
u8 *hci_conn_key_enc_size(struct hci_conn *conn)
|
||||
{
|
||||
if (conn->type == ACL_LINK) {
|
||||
struct link_key *key;
|
||||
|
||||
key = hci_find_link_key(conn->hdev, &conn->dst);
|
||||
if (!key)
|
||||
return NULL;
|
||||
|
||||
return &key->pin_len;
|
||||
} else if (conn->type == LE_LINK) {
|
||||
struct smp_ltk *ltk;
|
||||
|
||||
ltk = hci_find_ltk(conn->hdev, &conn->dst, conn->dst_type,
|
||||
conn->role);
|
||||
if (!ltk)
|
||||
return NULL;
|
||||
|
||||
return <k->enc_size;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
|
|
@ -739,10 +739,17 @@ static u8 hci_cc_read_enc_key_size(struct hci_dev *hdev, void *data,
|
|||
handle);
|
||||
conn->enc_key_size = 0;
|
||||
} else {
|
||||
u8 *key_enc_size = hci_conn_key_enc_size(conn);
|
||||
|
||||
conn->enc_key_size = rp->key_size;
|
||||
status = 0;
|
||||
|
||||
if (conn->enc_key_size < hdev->min_enc_key_size) {
|
||||
/* Attempt to check if the key size is too small or if it has
|
||||
* been downgraded from the last time it was stored as part of
|
||||
* the link_key.
|
||||
*/
|
||||
if (conn->enc_key_size < hdev->min_enc_key_size ||
|
||||
(key_enc_size && conn->enc_key_size < *key_enc_size)) {
|
||||
/* As slave role, the conn->state has been set to
|
||||
* BT_CONNECTED and l2cap conn req might not be received
|
||||
* yet, at this moment the l2cap layer almost does
|
||||
|
@ -755,6 +762,10 @@ static u8 hci_cc_read_enc_key_size(struct hci_dev *hdev, void *data,
|
|||
clear_bit(HCI_CONN_ENCRYPT, &conn->flags);
|
||||
clear_bit(HCI_CONN_AES_CCM, &conn->flags);
|
||||
}
|
||||
|
||||
/* Update the key encryption size with the connection one */
|
||||
if (key_enc_size && *key_enc_size != conn->enc_key_size)
|
||||
*key_enc_size = conn->enc_key_size;
|
||||
}
|
||||
|
||||
hci_encrypt_cfm(conn, status);
|
||||
|
@ -3065,6 +3076,34 @@ static void hci_inquiry_result_evt(struct hci_dev *hdev, void *edata,
|
|||
hci_dev_unlock(hdev);
|
||||
}
|
||||
|
||||
static int hci_read_enc_key_size(struct hci_dev *hdev, struct hci_conn *conn)
|
||||
{
|
||||
struct hci_cp_read_enc_key_size cp;
|
||||
u8 *key_enc_size = hci_conn_key_enc_size(conn);
|
||||
|
||||
if (!read_key_size_capable(hdev)) {
|
||||
conn->enc_key_size = HCI_LINK_KEY_SIZE;
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
bt_dev_dbg(hdev, "hcon %p", conn);
|
||||
|
||||
memset(&cp, 0, sizeof(cp));
|
||||
cp.handle = cpu_to_le16(conn->handle);
|
||||
|
||||
/* If the key enc_size is already known, use it as conn->enc_key_size,
|
||||
* otherwise use hdev->min_enc_key_size so the likes of
|
||||
* l2cap_check_enc_key_size don't fail while waiting for
|
||||
* HCI_OP_READ_ENC_KEY_SIZE response.
|
||||
*/
|
||||
if (key_enc_size && *key_enc_size)
|
||||
conn->enc_key_size = *key_enc_size;
|
||||
else
|
||||
conn->enc_key_size = hdev->min_enc_key_size;
|
||||
|
||||
return hci_send_cmd(hdev, HCI_OP_READ_ENC_KEY_SIZE, sizeof(cp), &cp);
|
||||
}
|
||||
|
||||
static void hci_conn_complete_evt(struct hci_dev *hdev, void *data,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
|
@ -3157,23 +3196,11 @@ static void hci_conn_complete_evt(struct hci_dev *hdev, void *data,
|
|||
if (ev->encr_mode == 1 && !test_bit(HCI_CONN_ENCRYPT, &conn->flags) &&
|
||||
ev->link_type == ACL_LINK) {
|
||||
struct link_key *key;
|
||||
struct hci_cp_read_enc_key_size cp;
|
||||
|
||||
key = hci_find_link_key(hdev, &ev->bdaddr);
|
||||
if (key) {
|
||||
set_bit(HCI_CONN_ENCRYPT, &conn->flags);
|
||||
|
||||
if (!read_key_size_capable(hdev)) {
|
||||
conn->enc_key_size = HCI_LINK_KEY_SIZE;
|
||||
} else {
|
||||
cp.handle = cpu_to_le16(conn->handle);
|
||||
if (hci_send_cmd(hdev, HCI_OP_READ_ENC_KEY_SIZE,
|
||||
sizeof(cp), &cp)) {
|
||||
bt_dev_err(hdev, "sending read key size failed");
|
||||
conn->enc_key_size = HCI_LINK_KEY_SIZE;
|
||||
}
|
||||
}
|
||||
|
||||
hci_read_enc_key_size(hdev, conn);
|
||||
hci_encrypt_cfm(conn, ev->status);
|
||||
}
|
||||
}
|
||||
|
@ -3612,24 +3639,8 @@ static void hci_encrypt_change_evt(struct hci_dev *hdev, void *data,
|
|||
|
||||
/* Try reading the encryption key size for encrypted ACL links */
|
||||
if (!ev->status && ev->encrypt && conn->type == ACL_LINK) {
|
||||
struct hci_cp_read_enc_key_size cp;
|
||||
|
||||
/* Only send HCI_Read_Encryption_Key_Size if the
|
||||
* controller really supports it. If it doesn't, assume
|
||||
* the default size (16).
|
||||
*/
|
||||
if (!read_key_size_capable(hdev)) {
|
||||
conn->enc_key_size = HCI_LINK_KEY_SIZE;
|
||||
if (hci_read_enc_key_size(hdev, conn))
|
||||
goto notify;
|
||||
}
|
||||
|
||||
cp.handle = cpu_to_le16(conn->handle);
|
||||
if (hci_send_cmd(hdev, HCI_OP_READ_ENC_KEY_SIZE,
|
||||
sizeof(cp), &cp)) {
|
||||
bt_dev_err(hdev, "sending read key size failed");
|
||||
conn->enc_key_size = HCI_LINK_KEY_SIZE;
|
||||
goto notify;
|
||||
}
|
||||
|
||||
goto unlock;
|
||||
}
|
||||
|
|
|
@ -7506,11 +7506,16 @@ static void add_device_complete(struct hci_dev *hdev, void *data, int err)
|
|||
struct mgmt_cp_add_device *cp = cmd->param;
|
||||
|
||||
if (!err) {
|
||||
struct hci_conn_params *params;
|
||||
|
||||
params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
|
||||
le_addr_type(cp->addr.type));
|
||||
|
||||
device_added(cmd->sk, hdev, &cp->addr.bdaddr, cp->addr.type,
|
||||
cp->action);
|
||||
device_flags_changed(NULL, hdev, &cp->addr.bdaddr,
|
||||
cp->addr.type, hdev->conn_flags,
|
||||
PTR_UINT(cmd->user_data));
|
||||
params ? params->flags : 0);
|
||||
}
|
||||
|
||||
mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_ADD_DEVICE,
|
||||
|
@ -7613,8 +7618,6 @@ static int add_device(struct sock *sk, struct hci_dev *hdev,
|
|||
goto unlock;
|
||||
}
|
||||
|
||||
cmd->user_data = UINT_PTR(current_flags);
|
||||
|
||||
err = hci_cmd_sync_queue(hdev, add_device_sync, cmd,
|
||||
add_device_complete);
|
||||
if (err < 0) {
|
||||
|
|
|
@ -10441,6 +10441,7 @@ static void netdev_sync_lower_features(struct net_device *upper,
|
|||
if (!(features & feature) && (lower->features & feature)) {
|
||||
netdev_dbg(upper, "Disabling feature %pNF on lower dev %s.\n",
|
||||
&feature, lower->name);
|
||||
netdev_lock_ops(lower);
|
||||
lower->wanted_features &= ~feature;
|
||||
__netdev_update_features(lower);
|
||||
|
||||
|
@ -10449,6 +10450,7 @@ static void netdev_sync_lower_features(struct net_device *upper,
|
|||
&feature, lower->name);
|
||||
else
|
||||
netdev_features_change(lower);
|
||||
netdev_unlock_ops(lower);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -200,6 +200,8 @@ net_devmem_bind_dmabuf(struct net_device *dev, unsigned int dmabuf_fd,
|
|||
|
||||
refcount_set(&binding->ref, 1);
|
||||
|
||||
mutex_init(&binding->lock);
|
||||
|
||||
binding->dmabuf = dmabuf;
|
||||
|
||||
binding->attachment = dma_buf_attach(binding->dmabuf, dev->dev.parent);
|
||||
|
@ -379,6 +381,11 @@ static void mp_dmabuf_devmem_uninstall(void *mp_priv,
|
|||
xa_for_each(&binding->bound_rxqs, xa_idx, bound_rxq) {
|
||||
if (bound_rxq == rxq) {
|
||||
xa_erase(&binding->bound_rxqs, xa_idx);
|
||||
if (xa_empty(&binding->bound_rxqs)) {
|
||||
mutex_lock(&binding->lock);
|
||||
binding->dev = NULL;
|
||||
mutex_unlock(&binding->lock);
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -20,6 +20,8 @@ struct net_devmem_dmabuf_binding {
|
|||
struct sg_table *sgt;
|
||||
struct net_device *dev;
|
||||
struct gen_pool *chunk_pool;
|
||||
/* Protect dev */
|
||||
struct mutex lock;
|
||||
|
||||
/* The user holds a ref (via the netlink API) for as long as they want
|
||||
* the binding to remain alive. Each page pool using this binding holds
|
||||
|
|
|
@ -979,14 +979,25 @@ void netdev_nl_sock_priv_destroy(struct netdev_nl_sock *priv)
|
|||
{
|
||||
struct net_devmem_dmabuf_binding *binding;
|
||||
struct net_devmem_dmabuf_binding *temp;
|
||||
netdevice_tracker dev_tracker;
|
||||
struct net_device *dev;
|
||||
|
||||
mutex_lock(&priv->lock);
|
||||
list_for_each_entry_safe(binding, temp, &priv->bindings, list) {
|
||||
mutex_lock(&binding->lock);
|
||||
dev = binding->dev;
|
||||
if (!dev) {
|
||||
mutex_unlock(&binding->lock);
|
||||
net_devmem_unbind_dmabuf(binding);
|
||||
continue;
|
||||
}
|
||||
netdev_hold(dev, &dev_tracker, GFP_KERNEL);
|
||||
mutex_unlock(&binding->lock);
|
||||
|
||||
netdev_lock(dev);
|
||||
net_devmem_unbind_dmabuf(binding);
|
||||
netdev_unlock(dev);
|
||||
netdev_put(dev, &dev_tracker);
|
||||
}
|
||||
mutex_unlock(&priv->lock);
|
||||
}
|
||||
|
|
|
@ -1354,10 +1354,12 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
|
|||
hw->wiphy->software_iftypes |= BIT(NL80211_IFTYPE_MONITOR);
|
||||
|
||||
|
||||
local->int_scan_req = kzalloc(sizeof(*local->int_scan_req) +
|
||||
sizeof(void *) * channels, GFP_KERNEL);
|
||||
local->int_scan_req = kzalloc(struct_size(local->int_scan_req,
|
||||
channels, channels),
|
||||
GFP_KERNEL);
|
||||
if (!local->int_scan_req)
|
||||
return -ENOMEM;
|
||||
local->int_scan_req->n_channels = channels;
|
||||
|
||||
eth_broadcast_addr(local->int_scan_req->bssid);
|
||||
|
||||
|
|
|
@ -117,11 +117,18 @@ static int mctp_dump_addrinfo(struct sk_buff *skb, struct netlink_callback *cb)
|
|||
struct net_device *dev;
|
||||
struct ifaddrmsg *hdr;
|
||||
struct mctp_dev *mdev;
|
||||
int ifindex, rc;
|
||||
int ifindex = 0, rc;
|
||||
|
||||
hdr = nlmsg_data(cb->nlh);
|
||||
// filter by ifindex if requested
|
||||
ifindex = hdr->ifa_index;
|
||||
/* Filter by ifindex if a header is provided */
|
||||
if (cb->nlh->nlmsg_len >= nlmsg_msg_size(sizeof(*hdr))) {
|
||||
hdr = nlmsg_data(cb->nlh);
|
||||
ifindex = hdr->ifa_index;
|
||||
} else {
|
||||
if (cb->strict_check) {
|
||||
NL_SET_ERR_MSG(cb->extack, "mctp: Invalid header for addr dump request");
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
rcu_read_lock();
|
||||
for_each_netdev_dump(net, dev, mcb->ifindex) {
|
||||
|
|
|
@ -313,8 +313,10 @@ static void mctp_flow_prepare_output(struct sk_buff *skb, struct mctp_dev *dev)
|
|||
|
||||
key = flow->key;
|
||||
|
||||
if (WARN_ON(key->dev && key->dev != dev))
|
||||
if (key->dev) {
|
||||
WARN_ON(key->dev != dev);
|
||||
return;
|
||||
}
|
||||
|
||||
mctp_dev_set_key(dev, key);
|
||||
}
|
||||
|
|
|
@ -144,7 +144,7 @@ static int codel_change(struct Qdisc *sch, struct nlattr *opt,
|
|||
|
||||
qlen = sch->q.qlen;
|
||||
while (sch->q.qlen > sch->limit) {
|
||||
struct sk_buff *skb = __qdisc_dequeue_head(&sch->q);
|
||||
struct sk_buff *skb = qdisc_dequeue_internal(sch, true);
|
||||
|
||||
dropped += qdisc_pkt_len(skb);
|
||||
qdisc_qstats_backlog_dec(sch, skb);
|
||||
|
|
|
@ -1136,7 +1136,7 @@ static int fq_change(struct Qdisc *sch, struct nlattr *opt,
|
|||
sch_tree_lock(sch);
|
||||
}
|
||||
while (sch->q.qlen > sch->limit) {
|
||||
struct sk_buff *skb = fq_dequeue(sch);
|
||||
struct sk_buff *skb = qdisc_dequeue_internal(sch, false);
|
||||
|
||||
if (!skb)
|
||||
break;
|
||||
|
|
|
@ -441,7 +441,7 @@ static int fq_codel_change(struct Qdisc *sch, struct nlattr *opt,
|
|||
|
||||
while (sch->q.qlen > sch->limit ||
|
||||
q->memory_usage > q->memory_limit) {
|
||||
struct sk_buff *skb = fq_codel_dequeue(sch);
|
||||
struct sk_buff *skb = qdisc_dequeue_internal(sch, false);
|
||||
|
||||
q->cstats.drop_len += qdisc_pkt_len(skb);
|
||||
rtnl_kfree_skbs(skb, skb);
|
||||
|
|
|
@ -366,7 +366,7 @@ static int fq_pie_change(struct Qdisc *sch, struct nlattr *opt,
|
|||
|
||||
/* Drop excess packets if new limit is lower */
|
||||
while (sch->q.qlen > sch->limit) {
|
||||
struct sk_buff *skb = fq_pie_qdisc_dequeue(sch);
|
||||
struct sk_buff *skb = qdisc_dequeue_internal(sch, false);
|
||||
|
||||
len_dropped += qdisc_pkt_len(skb);
|
||||
num_dropped += 1;
|
||||
|
|
|
@ -564,7 +564,7 @@ static int hhf_change(struct Qdisc *sch, struct nlattr *opt,
|
|||
qlen = sch->q.qlen;
|
||||
prev_backlog = sch->qstats.backlog;
|
||||
while (sch->q.qlen > sch->limit) {
|
||||
struct sk_buff *skb = hhf_dequeue(sch);
|
||||
struct sk_buff *skb = qdisc_dequeue_internal(sch, false);
|
||||
|
||||
rtnl_kfree_skbs(skb, skb);
|
||||
}
|
||||
|
|
|
@ -195,7 +195,7 @@ static int pie_change(struct Qdisc *sch, struct nlattr *opt,
|
|||
/* Drop excess packets if new limit is lower */
|
||||
qlen = sch->q.qlen;
|
||||
while (sch->q.qlen > sch->limit) {
|
||||
struct sk_buff *skb = __qdisc_dequeue_head(&sch->q);
|
||||
struct sk_buff *skb = qdisc_dequeue_internal(sch, true);
|
||||
|
||||
dropped += qdisc_pkt_len(skb);
|
||||
qdisc_qstats_backlog_dec(sch, skb);
|
||||
|
|
|
@ -396,7 +396,6 @@ static int tls_strp_read_copy(struct tls_strparser *strp, bool qshort)
|
|||
return 0;
|
||||
|
||||
shinfo = skb_shinfo(strp->anchor);
|
||||
shinfo->frag_list = NULL;
|
||||
|
||||
/* If we don't know the length go max plus page for cipher overhead */
|
||||
need_spc = strp->stm.full_len ?: TLS_MAX_PAYLOAD_SIZE + PAGE_SIZE;
|
||||
|
@ -412,6 +411,8 @@ static int tls_strp_read_copy(struct tls_strparser *strp, bool qshort)
|
|||
page, 0, 0);
|
||||
}
|
||||
|
||||
shinfo->frag_list = NULL;
|
||||
|
||||
strp->copy_mode = 1;
|
||||
strp->stm.offset = 0;
|
||||
|
||||
|
|
|
@ -338,16 +338,24 @@ def main():
|
|||
print('Capabilities:')
|
||||
[print(f'\t{v}') for v in bits_to_dict(tsinfo['timestamping'])]
|
||||
|
||||
print(f'PTP Hardware Clock: {tsinfo["phc-index"]}')
|
||||
print(f'PTP Hardware Clock: {tsinfo.get("phc-index", "none")}')
|
||||
|
||||
print('Hardware Transmit Timestamp Modes:')
|
||||
[print(f'\t{v}') for v in bits_to_dict(tsinfo['tx-types'])]
|
||||
if 'tx-types' in tsinfo:
|
||||
print('Hardware Transmit Timestamp Modes:')
|
||||
[print(f'\t{v}') for v in bits_to_dict(tsinfo['tx-types'])]
|
||||
else:
|
||||
print('Hardware Transmit Timestamp Modes: none')
|
||||
|
||||
print('Hardware Receive Filter Modes:')
|
||||
[print(f'\t{v}') for v in bits_to_dict(tsinfo['rx-filters'])]
|
||||
if 'rx-filters' in tsinfo:
|
||||
print('Hardware Receive Filter Modes:')
|
||||
[print(f'\t{v}') for v in bits_to_dict(tsinfo['rx-filters'])]
|
||||
else:
|
||||
print('Hardware Receive Filter Modes: none')
|
||||
|
||||
if 'stats' in tsinfo and tsinfo['stats']:
|
||||
print('Statistics:')
|
||||
[print(f'\t{k}: {v}') for k, v in tsinfo['stats'].items()]
|
||||
|
||||
print('Statistics:')
|
||||
[print(f'\t{k}: {v}') for k, v in tsinfo['stats'].items()]
|
||||
return
|
||||
|
||||
print(f'Settings for {args.device}:')
|
||||
|
|
|
@ -1143,10 +1143,9 @@ class Family(SpecFamily):
|
|||
self.pure_nested_structs[nested].request = True
|
||||
if attr in rs_members['reply']:
|
||||
self.pure_nested_structs[nested].reply = True
|
||||
|
||||
if spec.is_multi_val():
|
||||
child = self.pure_nested_structs.get(nested)
|
||||
child.in_multi_val = True
|
||||
if spec.is_multi_val():
|
||||
child = self.pure_nested_structs.get(nested)
|
||||
child.in_multi_val = True
|
||||
|
||||
self._sort_pure_types()
|
||||
|
||||
|
|
|
@ -431,6 +431,22 @@ static int parse_address(const char *str, int port, struct sockaddr_in6 *sin6)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static struct netdev_queue_id *create_queues(void)
|
||||
{
|
||||
struct netdev_queue_id *queues;
|
||||
size_t i = 0;
|
||||
|
||||
queues = calloc(num_queues, sizeof(*queues));
|
||||
for (i = 0; i < num_queues; i++) {
|
||||
queues[i]._present.type = 1;
|
||||
queues[i]._present.id = 1;
|
||||
queues[i].type = NETDEV_QUEUE_TYPE_RX;
|
||||
queues[i].id = start_queue + i;
|
||||
}
|
||||
|
||||
return queues;
|
||||
}
|
||||
|
||||
int do_server(struct memory_buffer *mem)
|
||||
{
|
||||
char ctrl_data[sizeof(int) * 20000];
|
||||
|
@ -448,7 +464,6 @@ int do_server(struct memory_buffer *mem)
|
|||
char buffer[256];
|
||||
int socket_fd;
|
||||
int client_fd;
|
||||
size_t i = 0;
|
||||
int ret;
|
||||
|
||||
ret = parse_address(server_ip, atoi(port), &server_sin);
|
||||
|
@ -471,16 +486,7 @@ int do_server(struct memory_buffer *mem)
|
|||
|
||||
sleep(1);
|
||||
|
||||
queues = malloc(sizeof(*queues) * num_queues);
|
||||
|
||||
for (i = 0; i < num_queues; i++) {
|
||||
queues[i]._present.type = 1;
|
||||
queues[i]._present.id = 1;
|
||||
queues[i].type = NETDEV_QUEUE_TYPE_RX;
|
||||
queues[i].id = start_queue + i;
|
||||
}
|
||||
|
||||
if (bind_rx_queue(ifindex, mem->fd, queues, num_queues, &ys))
|
||||
if (bind_rx_queue(ifindex, mem->fd, create_queues(), num_queues, &ys))
|
||||
error(1, 0, "Failed to bind\n");
|
||||
|
||||
tmp_mem = malloc(mem->size);
|
||||
|
@ -545,7 +551,6 @@ int do_server(struct memory_buffer *mem)
|
|||
goto cleanup;
|
||||
}
|
||||
|
||||
i++;
|
||||
for (cm = CMSG_FIRSTHDR(&msg); cm; cm = CMSG_NXTHDR(&msg, cm)) {
|
||||
if (cm->cmsg_level != SOL_SOCKET ||
|
||||
(cm->cmsg_type != SCM_DEVMEM_DMABUF &&
|
||||
|
@ -630,10 +635,8 @@ cleanup:
|
|||
|
||||
void run_devmem_tests(void)
|
||||
{
|
||||
struct netdev_queue_id *queues;
|
||||
struct memory_buffer *mem;
|
||||
struct ynl_sock *ys;
|
||||
size_t i = 0;
|
||||
|
||||
mem = provider->alloc(getpagesize() * NUM_PAGES);
|
||||
|
||||
|
@ -641,38 +644,24 @@ void run_devmem_tests(void)
|
|||
if (configure_rss())
|
||||
error(1, 0, "rss error\n");
|
||||
|
||||
queues = calloc(num_queues, sizeof(*queues));
|
||||
|
||||
if (configure_headersplit(1))
|
||||
error(1, 0, "Failed to configure header split\n");
|
||||
|
||||
if (!bind_rx_queue(ifindex, mem->fd, queues, num_queues, &ys))
|
||||
if (!bind_rx_queue(ifindex, mem->fd,
|
||||
calloc(num_queues, sizeof(struct netdev_queue_id)),
|
||||
num_queues, &ys))
|
||||
error(1, 0, "Binding empty queues array should have failed\n");
|
||||
|
||||
for (i = 0; i < num_queues; i++) {
|
||||
queues[i]._present.type = 1;
|
||||
queues[i]._present.id = 1;
|
||||
queues[i].type = NETDEV_QUEUE_TYPE_RX;
|
||||
queues[i].id = start_queue + i;
|
||||
}
|
||||
|
||||
if (configure_headersplit(0))
|
||||
error(1, 0, "Failed to configure header split\n");
|
||||
|
||||
if (!bind_rx_queue(ifindex, mem->fd, queues, num_queues, &ys))
|
||||
if (!bind_rx_queue(ifindex, mem->fd, create_queues(), num_queues, &ys))
|
||||
error(1, 0, "Configure dmabuf with header split off should have failed\n");
|
||||
|
||||
if (configure_headersplit(1))
|
||||
error(1, 0, "Failed to configure header split\n");
|
||||
|
||||
for (i = 0; i < num_queues; i++) {
|
||||
queues[i]._present.type = 1;
|
||||
queues[i]._present.id = 1;
|
||||
queues[i].type = NETDEV_QUEUE_TYPE_RX;
|
||||
queues[i].id = start_queue + i;
|
||||
}
|
||||
|
||||
if (bind_rx_queue(ifindex, mem->fd, queues, num_queues, &ys))
|
||||
if (bind_rx_queue(ifindex, mem->fd, create_queues(), num_queues, &ys))
|
||||
error(1, 0, "Failed to bind\n");
|
||||
|
||||
/* Deactivating a bound queue should not be legal */
|
||||
|
|
|
@ -189,5 +189,29 @@
|
|||
"teardown": [
|
||||
"$TC qdisc del dev $DUMMY handle 1: root"
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": "deb1",
|
||||
"name": "CODEL test qdisc limit trimming",
|
||||
"category": ["qdisc", "codel"],
|
||||
"plugins": {
|
||||
"requires": ["nsPlugin", "scapyPlugin"]
|
||||
},
|
||||
"setup": [
|
||||
"$TC qdisc add dev $DEV1 handle 1: root codel limit 10"
|
||||
],
|
||||
"scapy": [
|
||||
{
|
||||
"iface": "$DEV0",
|
||||
"count": 10,
|
||||
"packet": "Ether(type=0x800)/IP(src='10.0.0.10',dst='10.0.0.20')/TCP(sport=5000,dport=10)"
|
||||
}
|
||||
],
|
||||
"cmdUnderTest": "$TC qdisc change dev $DEV1 handle 1: root codel limit 1",
|
||||
"expExitCode": "0",
|
||||
"verifyCmd": "$TC qdisc show dev $DEV1",
|
||||
"matchPattern": "qdisc codel 1: root refcnt [0-9]+ limit 1p target 5ms interval 100ms",
|
||||
"matchCount": "1",
|
||||
"teardown": ["$TC qdisc del dev $DEV1 handle 1: root"]
|
||||
}
|
||||
]
|
||||
|
|
|
@ -377,5 +377,27 @@
|
|||
"teardown": [
|
||||
"$TC qdisc del dev $DUMMY handle 1: root"
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": "9479",
|
||||
"name": "FQ test qdisc limit trimming",
|
||||
"category": ["qdisc", "fq"],
|
||||
"plugins": {"requires": ["nsPlugin", "scapyPlugin"]},
|
||||
"setup": [
|
||||
"$TC qdisc add dev $DEV1 handle 1: root fq limit 10"
|
||||
],
|
||||
"scapy": [
|
||||
{
|
||||
"iface": "$DEV0",
|
||||
"count": 10,
|
||||
"packet": "Ether(type=0x800)/IP(src='10.0.0.10',dst='10.0.0.20')/TCP(sport=5000,dport=10)"
|
||||
}
|
||||
],
|
||||
"cmdUnderTest": "$TC qdisc change dev $DEV1 handle 1: root fq limit 1",
|
||||
"expExitCode": "0",
|
||||
"verifyCmd": "$TC qdisc show dev $DEV1",
|
||||
"matchPattern": "qdisc fq 1: root refcnt [0-9]+ limit 1p",
|
||||
"matchCount": "1",
|
||||
"teardown": ["$TC qdisc del dev $DEV1 handle 1: root"]
|
||||
}
|
||||
]
|
||||
|
|
|
@ -294,5 +294,27 @@
|
|||
"teardown": [
|
||||
"$TC qdisc del dev $DUMMY handle 1: root"
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": "0436",
|
||||
"name": "FQ_CODEL test qdisc limit trimming",
|
||||
"category": ["qdisc", "fq_codel"],
|
||||
"plugins": {"requires": ["nsPlugin", "scapyPlugin"]},
|
||||
"setup": [
|
||||
"$TC qdisc add dev $DEV1 handle 1: root fq_codel limit 10"
|
||||
],
|
||||
"scapy": [
|
||||
{
|
||||
"iface": "$DEV0",
|
||||
"count": 10,
|
||||
"packet": "Ether(type=0x800)/IP(src='10.0.0.10',dst='10.0.0.20')/TCP(sport=5000,dport=10)"
|
||||
}
|
||||
],
|
||||
"cmdUnderTest": "$TC qdisc change dev $DEV1 handle 1: root fq_codel limit 1",
|
||||
"expExitCode": "0",
|
||||
"verifyCmd": "$TC qdisc show dev $DEV1",
|
||||
"matchPattern": "qdisc fq_codel 1: root refcnt [0-9]+ limit 1p flows 1024 quantum.*target 5ms interval 100ms memory_limit 32Mb ecn drop_batch 64",
|
||||
"matchCount": "1",
|
||||
"teardown": ["$TC qdisc del dev $DEV1 handle 1: root"]
|
||||
}
|
||||
]
|
||||
|
|
|
@ -18,5 +18,27 @@
|
|||
"matchCount": "1",
|
||||
"teardown": [
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": "83bf",
|
||||
"name": "FQ_PIE test qdisc limit trimming",
|
||||
"category": ["qdisc", "fq_pie"],
|
||||
"plugins": {"requires": ["nsPlugin", "scapyPlugin"]},
|
||||
"setup": [
|
||||
"$TC qdisc add dev $DEV1 handle 1: root fq_pie limit 10"
|
||||
],
|
||||
"scapy": [
|
||||
{
|
||||
"iface": "$DEV0",
|
||||
"count": 10,
|
||||
"packet": "Ether(type=0x800)/IP(src='10.0.0.10',dst='10.0.0.20')/TCP(sport=5000,dport=10)"
|
||||
}
|
||||
],
|
||||
"cmdUnderTest": "$TC qdisc change dev $DEV1 handle 1: root fq_pie limit 1",
|
||||
"expExitCode": "0",
|
||||
"verifyCmd": "$TC qdisc show dev $DEV1",
|
||||
"matchPattern": "qdisc fq_pie 1: root refcnt [0-9]+ limit 1p",
|
||||
"matchCount": "1",
|
||||
"teardown": ["$TC qdisc del dev $DEV1 handle 1: root"]
|
||||
}
|
||||
]
|
||||
|
|
|
@ -188,5 +188,27 @@
|
|||
"teardown": [
|
||||
"$TC qdisc del dev $DUMMY handle 1: root"
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": "385f",
|
||||
"name": "HHF test qdisc limit trimming",
|
||||
"category": ["qdisc", "hhf"],
|
||||
"plugins": {"requires": ["nsPlugin", "scapyPlugin"]},
|
||||
"setup": [
|
||||
"$TC qdisc add dev $DEV1 handle 1: root hhf limit 10"
|
||||
],
|
||||
"scapy": [
|
||||
{
|
||||
"iface": "$DEV0",
|
||||
"count": 10,
|
||||
"packet": "Ether(type=0x800)/IP(src='10.0.0.10',dst='10.0.0.20')/TCP(sport=5000,dport=10)"
|
||||
}
|
||||
],
|
||||
"cmdUnderTest": "$TC qdisc change dev $DEV1 handle 1: root hhf limit 1",
|
||||
"expExitCode": "0",
|
||||
"verifyCmd": "$TC qdisc show dev $DEV1",
|
||||
"matchPattern": "qdisc hhf 1: root refcnt [0-9]+ limit 1p.*hh_limit 2048 reset_timeout 40ms admit_bytes 128Kb evict_timeout 1s non_hh_weight 2",
|
||||
"matchCount": "1",
|
||||
"teardown": ["$TC qdisc del dev $DEV1 handle 1: root"]
|
||||
}
|
||||
]
|
||||
|
|
24
tools/testing/selftests/tc-testing/tc-tests/qdiscs/pie.json
Normal file
24
tools/testing/selftests/tc-testing/tc-tests/qdiscs/pie.json
Normal file
|
@ -0,0 +1,24 @@
|
|||
[
|
||||
{
|
||||
"id": "6158",
|
||||
"name": "PIE test qdisc limit trimming",
|
||||
"category": ["qdisc", "pie"],
|
||||
"plugins": {"requires": ["nsPlugin", "scapyPlugin"]},
|
||||
"setup": [
|
||||
"$TC qdisc add dev $DEV1 handle 1: root pie limit 10"
|
||||
],
|
||||
"scapy": [
|
||||
{
|
||||
"iface": "$DEV0",
|
||||
"count": 10,
|
||||
"packet": "Ether(type=0x800)/IP(src='10.0.0.10',dst='10.0.0.20')/TCP(sport=5000,dport=10)"
|
||||
}
|
||||
],
|
||||
"cmdUnderTest": "$TC qdisc change dev $DEV1 handle 1: root pie limit 1",
|
||||
"expExitCode": "0",
|
||||
"verifyCmd": "$TC qdisc show dev $DEV1",
|
||||
"matchPattern": "qdisc pie 1: root refcnt [0-9]+ limit 1p",
|
||||
"matchCount": "1",
|
||||
"teardown": ["$TC qdisc del dev $DEV1 handle 1: root"]
|
||||
}
|
||||
]
|
|
@ -1264,21 +1264,25 @@ static void test_unsent_bytes_client(const struct test_opts *opts, int type)
|
|||
send_buf(fd, buf, sizeof(buf), 0, sizeof(buf));
|
||||
control_expectln("RECEIVED");
|
||||
|
||||
ret = ioctl(fd, SIOCOUTQ, &sock_bytes_unsent);
|
||||
if (ret < 0) {
|
||||
if (errno == EOPNOTSUPP) {
|
||||
fprintf(stderr, "Test skipped, SIOCOUTQ not supported.\n");
|
||||
} else {
|
||||
/* SIOCOUTQ isn't guaranteed to instantly track sent data. Even though
|
||||
* the "RECEIVED" message means that the other side has received the
|
||||
* data, there can be a delay in our kernel before updating the "unsent
|
||||
* bytes" counter. Repeat SIOCOUTQ until it returns 0.
|
||||
*/
|
||||
timeout_begin(TIMEOUT);
|
||||
do {
|
||||
ret = ioctl(fd, SIOCOUTQ, &sock_bytes_unsent);
|
||||
if (ret < 0) {
|
||||
if (errno == EOPNOTSUPP) {
|
||||
fprintf(stderr, "Test skipped, SIOCOUTQ not supported.\n");
|
||||
break;
|
||||
}
|
||||
perror("ioctl");
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
} else if (ret == 0 && sock_bytes_unsent != 0) {
|
||||
fprintf(stderr,
|
||||
"Unexpected 'SIOCOUTQ' value, expected 0, got %i\n",
|
||||
sock_bytes_unsent);
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
|
||||
timeout_check("SIOCOUTQ");
|
||||
} while (sock_bytes_unsent != 0);
|
||||
timeout_end();
|
||||
close(fd);
|
||||
}
|
||||
|
||||
|
|
Loading…
Add table
Reference in a new issue