2024-04-29 07:44:25 -07:00
|
|
|
# SPDX-License-Identifier: GPL-2.0
|
|
|
|
|
selftests: drv-net: wait for iperf client to stop sending
A few packets may still be sent out during the termination of iperf
processes. These late packets cause failures in rss_ctx.py when they
arrive on queues expected to be empty.
Example failure observed:
Check failed 2 != 0 traffic on inactive queues (context 1):
[0, 0, 1, 1, 386385, 397196, 0, 0, 0, 0, ...]
Check failed 4 != 0 traffic on inactive queues (context 2):
[0, 0, 0, 0, 2, 2, 247152, 253013, 0, 0, ...]
Check failed 2 != 0 traffic on inactive queues (context 3):
[0, 0, 0, 0, 0, 0, 1, 1, 282434, 283070, ...]
To avoid such failures, wait until all client sockets for the requested
port are either closed or in the TIME_WAIT state.
Fixes: 847aa551fa78 ("selftests: drv-net: rss_ctx: factor out send traffic and check")
Signed-off-by: Nimrod Oren <noren@nvidia.com>
Reviewed-by: Gal Pressman <gal@nvidia.com>
Reviewed-by: Carolina Jubran <cjubran@nvidia.com>
Reviewed-by: Simon Horman <horms@kernel.org>
Link: https://patch.msgid.link/20250722122655.3194442-1-noren@nvidia.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2025-07-22 15:26:55 +03:00
|
|
|
import re
|
2024-04-29 07:44:25 -07:00
|
|
|
import time
|
|
|
|
|
2025-05-07 07:01:07 -07:00
|
|
|
from lib.py import ksft_pr, cmd, ip, rand_port, wait_port_listen
|
2024-04-29 07:44:25 -07:00
|
|
|
|
|
|
|
class GenerateTraffic:
|
2024-06-25 18:24:56 -07:00
|
|
|
def __init__(self, env, port=None):
|
2025-07-23 16:54:54 +03:00
|
|
|
env.require_cmd("iperf3", local=True, remote=True)
|
2024-04-29 07:44:25 -07:00
|
|
|
|
|
|
|
self.env = env
|
|
|
|
|
selftests: drv-net: wait for iperf client to stop sending
A few packets may still be sent out during the termination of iperf
processes. These late packets cause failures in rss_ctx.py when they
arrive on queues expected to be empty.
Example failure observed:
Check failed 2 != 0 traffic on inactive queues (context 1):
[0, 0, 1, 1, 386385, 397196, 0, 0, 0, 0, ...]
Check failed 4 != 0 traffic on inactive queues (context 2):
[0, 0, 0, 0, 2, 2, 247152, 253013, 0, 0, ...]
Check failed 2 != 0 traffic on inactive queues (context 3):
[0, 0, 0, 0, 0, 0, 1, 1, 282434, 283070, ...]
To avoid such failures, wait until all client sockets for the requested
port are either closed or in the TIME_WAIT state.
Fixes: 847aa551fa78 ("selftests: drv-net: rss_ctx: factor out send traffic and check")
Signed-off-by: Nimrod Oren <noren@nvidia.com>
Reviewed-by: Gal Pressman <gal@nvidia.com>
Reviewed-by: Carolina Jubran <cjubran@nvidia.com>
Reviewed-by: Simon Horman <horms@kernel.org>
Link: https://patch.msgid.link/20250722122655.3194442-1-noren@nvidia.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2025-07-22 15:26:55 +03:00
|
|
|
self.port = rand_port() if port is None else port
|
|
|
|
self._iperf_server = cmd(f"iperf3 -s -1 -p {self.port}", background=True)
|
|
|
|
wait_port_listen(self.port)
|
2024-04-29 07:44:25 -07:00
|
|
|
time.sleep(0.1)
|
selftests: drv-net: wait for iperf client to stop sending
A few packets may still be sent out during the termination of iperf
processes. These late packets cause failures in rss_ctx.py when they
arrive on queues expected to be empty.
Example failure observed:
Check failed 2 != 0 traffic on inactive queues (context 1):
[0, 0, 1, 1, 386385, 397196, 0, 0, 0, 0, ...]
Check failed 4 != 0 traffic on inactive queues (context 2):
[0, 0, 0, 0, 2, 2, 247152, 253013, 0, 0, ...]
Check failed 2 != 0 traffic on inactive queues (context 3):
[0, 0, 0, 0, 0, 0, 1, 1, 282434, 283070, ...]
To avoid such failures, wait until all client sockets for the requested
port are either closed or in the TIME_WAIT state.
Fixes: 847aa551fa78 ("selftests: drv-net: rss_ctx: factor out send traffic and check")
Signed-off-by: Nimrod Oren <noren@nvidia.com>
Reviewed-by: Gal Pressman <gal@nvidia.com>
Reviewed-by: Carolina Jubran <cjubran@nvidia.com>
Reviewed-by: Simon Horman <horms@kernel.org>
Link: https://patch.msgid.link/20250722122655.3194442-1-noren@nvidia.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2025-07-22 15:26:55 +03:00
|
|
|
self._iperf_client = cmd(f"iperf3 -c {env.addr} -P 16 -p {self.port} -t 86400",
|
2024-04-29 07:44:25 -07:00
|
|
|
background=True, host=env.remote)
|
|
|
|
|
|
|
|
# Wait for traffic to ramp up
|
2024-06-25 18:24:55 -07:00
|
|
|
if not self._wait_pkts(pps=1000):
|
|
|
|
self.stop(verbose=True)
|
|
|
|
raise Exception("iperf3 traffic did not ramp up")
|
|
|
|
|
|
|
|
def _wait_pkts(self, pkt_cnt=None, pps=None):
|
|
|
|
"""
|
|
|
|
Wait until we've seen pkt_cnt or until traffic ramps up to pps.
|
|
|
|
Only one of pkt_cnt or pss can be specified.
|
|
|
|
"""
|
|
|
|
pkt_start = ip("-s link show dev " + self.env.ifname, json=True)[0]["stats64"]["rx"]["packets"]
|
2024-04-29 07:44:25 -07:00
|
|
|
for _ in range(50):
|
|
|
|
time.sleep(0.1)
|
2024-06-25 18:24:55 -07:00
|
|
|
pkt_now = ip("-s link show dev " + self.env.ifname, json=True)[0]["stats64"]["rx"]["packets"]
|
|
|
|
if pps:
|
|
|
|
if pkt_now - pkt_start > pps / 10:
|
|
|
|
return True
|
|
|
|
pkt_start = pkt_now
|
|
|
|
elif pkt_cnt:
|
|
|
|
if pkt_now - pkt_start > pkt_cnt:
|
|
|
|
return True
|
|
|
|
return False
|
|
|
|
|
|
|
|
def wait_pkts_and_stop(self, pkt_cnt):
|
|
|
|
failed = not self._wait_pkts(pkt_cnt=pkt_cnt)
|
|
|
|
self.stop(verbose=failed)
|
2024-04-29 07:44:25 -07:00
|
|
|
|
|
|
|
def stop(self, verbose=None):
|
|
|
|
self._iperf_client.process(terminate=True)
|
|
|
|
if verbose:
|
|
|
|
ksft_pr(">> Client:")
|
|
|
|
ksft_pr(self._iperf_client.stdout)
|
|
|
|
ksft_pr(self._iperf_client.stderr)
|
|
|
|
self._iperf_server.process(terminate=True)
|
|
|
|
if verbose:
|
|
|
|
ksft_pr(">> Server:")
|
|
|
|
ksft_pr(self._iperf_server.stdout)
|
|
|
|
ksft_pr(self._iperf_server.stderr)
|
selftests: drv-net: wait for iperf client to stop sending
A few packets may still be sent out during the termination of iperf
processes. These late packets cause failures in rss_ctx.py when they
arrive on queues expected to be empty.
Example failure observed:
Check failed 2 != 0 traffic on inactive queues (context 1):
[0, 0, 1, 1, 386385, 397196, 0, 0, 0, 0, ...]
Check failed 4 != 0 traffic on inactive queues (context 2):
[0, 0, 0, 0, 2, 2, 247152, 253013, 0, 0, ...]
Check failed 2 != 0 traffic on inactive queues (context 3):
[0, 0, 0, 0, 0, 0, 1, 1, 282434, 283070, ...]
To avoid such failures, wait until all client sockets for the requested
port are either closed or in the TIME_WAIT state.
Fixes: 847aa551fa78 ("selftests: drv-net: rss_ctx: factor out send traffic and check")
Signed-off-by: Nimrod Oren <noren@nvidia.com>
Reviewed-by: Gal Pressman <gal@nvidia.com>
Reviewed-by: Carolina Jubran <cjubran@nvidia.com>
Reviewed-by: Simon Horman <horms@kernel.org>
Link: https://patch.msgid.link/20250722122655.3194442-1-noren@nvidia.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2025-07-22 15:26:55 +03:00
|
|
|
self._wait_client_stopped()
|
|
|
|
|
|
|
|
def _wait_client_stopped(self, sleep=0.005, timeout=5):
|
|
|
|
end = time.monotonic() + timeout
|
|
|
|
|
|
|
|
live_port_pattern = re.compile(fr":{self.port:04X} 0[^6] ")
|
|
|
|
|
|
|
|
while time.monotonic() < end:
|
|
|
|
data = cmd("cat /proc/net/tcp*", host=self.env.remote).stdout
|
|
|
|
if not live_port_pattern.search(data):
|
|
|
|
return
|
|
|
|
time.sleep(sleep)
|
|
|
|
raise Exception(f"Waiting for client to stop timed out after {timeout}s")
|