2019-05-20 19:08:01 +02:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0-or-later */
|
2016-08-23 15:27:24 +01:00
|
|
|
/* AF_RXRPC tracepoints
|
|
|
|
*
|
|
|
|
* Copyright (C) 2016 Red Hat, Inc. All Rights Reserved.
|
|
|
|
* Written by David Howells (dhowells@redhat.com)
|
|
|
|
*/
|
|
|
|
#undef TRACE_SYSTEM
|
|
|
|
#define TRACE_SYSTEM rxrpc
|
|
|
|
|
|
|
|
#if !defined(_TRACE_RXRPC_H) || defined(TRACE_HEADER_MULTI_READ)
|
|
|
|
#define _TRACE_RXRPC_H
|
|
|
|
|
|
|
|
#include <linux/tracepoint.h>
|
2018-05-10 23:26:01 +01:00
|
|
|
#include <linux/errqueue.h>
|
2016-08-23 15:27:24 +01:00
|
|
|
|
2017-01-05 10:38:33 +00:00
|
|
|
/*
|
|
|
|
* Declare tracing information enums and their string mappings for display.
|
|
|
|
*/
|
2022-10-06 21:45:42 +01:00
|
|
|
#define rxrpc_abort_reasons \
|
|
|
|
/* AFS errors */ \
|
|
|
|
EM(afs_abort_general_error, "afs-error") \
|
|
|
|
EM(afs_abort_interrupted, "afs-intr") \
|
|
|
|
EM(afs_abort_oom, "afs-oom") \
|
|
|
|
EM(afs_abort_op_not_supported, "afs-op-notsupp") \
|
|
|
|
EM(afs_abort_probeuuid_negative, "afs-probeuuid-neg") \
|
|
|
|
EM(afs_abort_send_data_error, "afs-send-data") \
|
|
|
|
EM(afs_abort_unmarshal_error, "afs-unmarshal") \
|
2025-04-11 10:52:49 +01:00
|
|
|
EM(afs_abort_unsupported_sec_class, "afs-unsup-sec-class") \
|
2022-10-06 21:45:42 +01:00
|
|
|
/* rxperf errors */ \
|
|
|
|
EM(rxperf_abort_general_error, "rxperf-error") \
|
|
|
|
EM(rxperf_abort_oom, "rxperf-oom") \
|
|
|
|
EM(rxperf_abort_op_not_supported, "rxperf-op-notsupp") \
|
|
|
|
EM(rxperf_abort_unmarshal_error, "rxperf-unmarshal") \
|
|
|
|
/* RxKAD security errors */ \
|
|
|
|
EM(rxkad_abort_1_short_check, "rxkad1-short-check") \
|
|
|
|
EM(rxkad_abort_1_short_data, "rxkad1-short-data") \
|
|
|
|
EM(rxkad_abort_1_short_encdata, "rxkad1-short-encdata") \
|
|
|
|
EM(rxkad_abort_1_short_header, "rxkad1-short-hdr") \
|
|
|
|
EM(rxkad_abort_2_short_check, "rxkad2-short-check") \
|
|
|
|
EM(rxkad_abort_2_short_data, "rxkad2-short-data") \
|
|
|
|
EM(rxkad_abort_2_short_header, "rxkad2-short-hdr") \
|
|
|
|
EM(rxkad_abort_2_short_len, "rxkad2-short-len") \
|
|
|
|
EM(rxkad_abort_bad_checksum, "rxkad2-bad-cksum") \
|
|
|
|
EM(rxkad_abort_chall_key_expired, "rxkad-chall-key-exp") \
|
|
|
|
EM(rxkad_abort_chall_level, "rxkad-chall-level") \
|
|
|
|
EM(rxkad_abort_chall_no_key, "rxkad-chall-nokey") \
|
|
|
|
EM(rxkad_abort_chall_short, "rxkad-chall-short") \
|
|
|
|
EM(rxkad_abort_chall_version, "rxkad-chall-version") \
|
|
|
|
EM(rxkad_abort_resp_bad_callid, "rxkad-resp-bad-callid") \
|
|
|
|
EM(rxkad_abort_resp_bad_checksum, "rxkad-resp-bad-cksum") \
|
|
|
|
EM(rxkad_abort_resp_bad_param, "rxkad-resp-bad-param") \
|
|
|
|
EM(rxkad_abort_resp_call_ctr, "rxkad-resp-call-ctr") \
|
|
|
|
EM(rxkad_abort_resp_call_state, "rxkad-resp-call-state") \
|
|
|
|
EM(rxkad_abort_resp_key_expired, "rxkad-resp-key-exp") \
|
|
|
|
EM(rxkad_abort_resp_key_rejected, "rxkad-resp-key-rej") \
|
|
|
|
EM(rxkad_abort_resp_level, "rxkad-resp-level") \
|
|
|
|
EM(rxkad_abort_resp_nokey, "rxkad-resp-nokey") \
|
|
|
|
EM(rxkad_abort_resp_ooseq, "rxkad-resp-ooseq") \
|
|
|
|
EM(rxkad_abort_resp_short, "rxkad-resp-short") \
|
|
|
|
EM(rxkad_abort_resp_short_tkt, "rxkad-resp-short-tkt") \
|
|
|
|
EM(rxkad_abort_resp_tkt_aname, "rxkad-resp-tk-aname") \
|
|
|
|
EM(rxkad_abort_resp_tkt_expired, "rxkad-resp-tk-exp") \
|
|
|
|
EM(rxkad_abort_resp_tkt_future, "rxkad-resp-tk-future") \
|
|
|
|
EM(rxkad_abort_resp_tkt_inst, "rxkad-resp-tk-inst") \
|
|
|
|
EM(rxkad_abort_resp_tkt_len, "rxkad-resp-tk-len") \
|
|
|
|
EM(rxkad_abort_resp_tkt_realm, "rxkad-resp-tk-realm") \
|
|
|
|
EM(rxkad_abort_resp_tkt_short, "rxkad-resp-tk-short") \
|
|
|
|
EM(rxkad_abort_resp_tkt_sinst, "rxkad-resp-tk-sinst") \
|
|
|
|
EM(rxkad_abort_resp_tkt_sname, "rxkad-resp-tk-sname") \
|
|
|
|
EM(rxkad_abort_resp_unknown_tkt, "rxkad-resp-unknown-tkt") \
|
|
|
|
EM(rxkad_abort_resp_version, "rxkad-resp-version") \
|
2025-04-11 10:52:53 +01:00
|
|
|
/* RxGK security errors */ \
|
|
|
|
EM(rxgk_abort_1_verify_mic_eproto, "rxgk1-vfy-mic-eproto") \
|
|
|
|
EM(rxgk_abort_2_decrypt_eproto, "rxgk2-dec-eproto") \
|
|
|
|
EM(rxgk_abort_2_short_data, "rxgk2-short-data") \
|
|
|
|
EM(rxgk_abort_2_short_encdata, "rxgk2-short-encdata") \
|
|
|
|
EM(rxgk_abort_2_short_header, "rxgk2-short-hdr") \
|
|
|
|
EM(rxgk_abort_bad_key_number, "rxgk-bad-key-num") \
|
|
|
|
EM(rxgk_abort_chall_key_expired, "rxgk-chall-key-exp") \
|
|
|
|
EM(rxgk_abort_chall_no_key, "rxgk-chall-nokey") \
|
|
|
|
EM(rxgk_abort_chall_short, "rxgk-chall-short") \
|
|
|
|
EM(rxgk_abort_resp_auth_dec, "rxgk-resp-auth-dec") \
|
|
|
|
EM(rxgk_abort_resp_bad_callid, "rxgk-resp-bad-callid") \
|
|
|
|
EM(rxgk_abort_resp_bad_nonce, "rxgk-resp-bad-nonce") \
|
|
|
|
EM(rxgk_abort_resp_bad_param, "rxgk-resp-bad-param") \
|
|
|
|
EM(rxgk_abort_resp_call_ctr, "rxgk-resp-call-ctr") \
|
|
|
|
EM(rxgk_abort_resp_call_state, "rxgk-resp-call-state") \
|
|
|
|
EM(rxgk_abort_resp_internal_error, "rxgk-resp-int-error") \
|
|
|
|
EM(rxgk_abort_resp_nopkg, "rxgk-resp-nopkg") \
|
|
|
|
EM(rxgk_abort_resp_short_applen, "rxgk-resp-short-applen") \
|
|
|
|
EM(rxgk_abort_resp_short_auth, "rxgk-resp-short-auth") \
|
|
|
|
EM(rxgk_abort_resp_short_call_list, "rxgk-resp-short-callls") \
|
|
|
|
EM(rxgk_abort_resp_short_packet, "rxgk-resp-short-packet") \
|
|
|
|
EM(rxgk_abort_resp_short_yfs_klen, "rxgk-resp-short-yfs-klen") \
|
|
|
|
EM(rxgk_abort_resp_short_yfs_key, "rxgk-resp-short-yfs-key") \
|
|
|
|
EM(rxgk_abort_resp_short_yfs_tkt, "rxgk-resp-short-yfs-tkt") \
|
|
|
|
EM(rxgk_abort_resp_tok_dec, "rxgk-resp-tok-dec") \
|
|
|
|
EM(rxgk_abort_resp_tok_internal_error, "rxgk-resp-tok-int-err") \
|
|
|
|
EM(rxgk_abort_resp_tok_keyerr, "rxgk-resp-tok-keyerr") \
|
|
|
|
EM(rxgk_abort_resp_tok_nokey, "rxgk-resp-tok-nokey") \
|
|
|
|
EM(rxgk_abort_resp_tok_nopkg, "rxgk-resp-tok-nopkg") \
|
|
|
|
EM(rxgk_abort_resp_tok_short, "rxgk-resp-tok-short") \
|
|
|
|
EM(rxgk_abort_resp_xdr_align, "rxgk-resp-xdr-align") \
|
2022-10-06 21:45:42 +01:00
|
|
|
/* rxrpc errors */ \
|
|
|
|
EM(rxrpc_abort_call_improper_term, "call-improper-term") \
|
|
|
|
EM(rxrpc_abort_call_reset, "call-reset") \
|
|
|
|
EM(rxrpc_abort_call_sendmsg, "call-sendmsg") \
|
|
|
|
EM(rxrpc_abort_call_sock_release, "call-sock-rel") \
|
|
|
|
EM(rxrpc_abort_call_sock_release_tba, "call-sock-rel-tba") \
|
|
|
|
EM(rxrpc_abort_call_timeout, "call-timeout") \
|
|
|
|
EM(rxrpc_abort_no_service_key, "no-serv-key") \
|
|
|
|
EM(rxrpc_abort_nomem, "nomem") \
|
2025-04-11 10:52:49 +01:00
|
|
|
EM(rxrpc_abort_response_sendmsg, "resp-sendmsg") \
|
2022-10-06 21:45:42 +01:00
|
|
|
EM(rxrpc_abort_service_not_offered, "serv-not-offered") \
|
|
|
|
EM(rxrpc_abort_shut_down, "shut-down") \
|
|
|
|
EM(rxrpc_abort_unsupported_security, "unsup-sec") \
|
|
|
|
EM(rxrpc_badmsg_bad_abort, "bad-abort") \
|
|
|
|
EM(rxrpc_badmsg_bad_jumbo, "bad-jumbo") \
|
|
|
|
EM(rxrpc_badmsg_short_ack, "short-ack") \
|
2024-01-26 16:17:03 +00:00
|
|
|
EM(rxrpc_badmsg_short_ack_trailer, "short-ack-trailer") \
|
2022-10-06 21:45:42 +01:00
|
|
|
EM(rxrpc_badmsg_short_hdr, "short-hdr") \
|
|
|
|
EM(rxrpc_badmsg_unsupported_packet, "unsup-pkt") \
|
|
|
|
EM(rxrpc_badmsg_zero_call, "zero-call") \
|
|
|
|
EM(rxrpc_badmsg_zero_seq, "zero-seq") \
|
|
|
|
EM(rxrpc_badmsg_zero_service, "zero-service") \
|
|
|
|
EM(rxrpc_eproto_ackr_outside_window, "ackr-out-win") \
|
|
|
|
EM(rxrpc_eproto_ackr_sack_overflow, "ackr-sack-over") \
|
|
|
|
EM(rxrpc_eproto_ackr_short_sack, "ackr-short-sack") \
|
|
|
|
EM(rxrpc_eproto_ackr_zero, "ackr-zero") \
|
|
|
|
EM(rxrpc_eproto_bad_upgrade, "bad-upgrade") \
|
|
|
|
EM(rxrpc_eproto_data_after_last, "data-after-last") \
|
|
|
|
EM(rxrpc_eproto_different_last, "diff-last") \
|
|
|
|
EM(rxrpc_eproto_early_reply, "early-reply") \
|
|
|
|
EM(rxrpc_eproto_improper_term, "improper-term") \
|
|
|
|
EM(rxrpc_eproto_no_client_call, "no-cl-call") \
|
|
|
|
EM(rxrpc_eproto_no_client_conn, "no-cl-conn") \
|
|
|
|
EM(rxrpc_eproto_no_service_call, "no-sv-call") \
|
|
|
|
EM(rxrpc_eproto_reupgrade, "re-upgrade") \
|
|
|
|
EM(rxrpc_eproto_rxnull_challenge, "rxnull-chall") \
|
|
|
|
EM(rxrpc_eproto_rxnull_response, "rxnull-resp") \
|
|
|
|
EM(rxrpc_eproto_tx_rot_last, "tx-rot-last") \
|
|
|
|
EM(rxrpc_eproto_unexpected_ack, "unex-ack") \
|
|
|
|
EM(rxrpc_eproto_unexpected_ackall, "unex-ackall") \
|
|
|
|
EM(rxrpc_eproto_unexpected_implicit_end, "unex-impl-end") \
|
|
|
|
EM(rxrpc_eproto_unexpected_reply, "unex-reply") \
|
|
|
|
EM(rxrpc_eproto_wrong_security, "wrong-sec") \
|
|
|
|
EM(rxrpc_recvmsg_excess_data, "recvmsg-excess") \
|
|
|
|
EM(rxrpc_recvmsg_short_data, "recvmsg-short") \
|
|
|
|
E_(rxrpc_sendmsg_late_send, "sendmsg-late")
|
|
|
|
|
2022-10-10 15:51:39 +01:00
|
|
|
#define rxrpc_call_poke_traces \
|
2022-10-12 22:17:56 +01:00
|
|
|
EM(rxrpc_call_poke_abort, "Abort") \
|
2022-10-12 17:01:25 +01:00
|
|
|
EM(rxrpc_call_poke_complete, "Compl") \
|
2024-12-04 07:46:30 +00:00
|
|
|
EM(rxrpc_call_poke_conn_abort, "Conn-abort") \
|
2022-10-10 15:51:39 +01:00
|
|
|
EM(rxrpc_call_poke_error, "Error") \
|
|
|
|
EM(rxrpc_call_poke_idle, "Idle") \
|
2024-12-04 07:46:41 +00:00
|
|
|
EM(rxrpc_call_poke_rx_packet, "Rx-packet") \
|
2024-01-30 21:37:16 +00:00
|
|
|
EM(rxrpc_call_poke_set_timeout, "Set-timo") \
|
2022-10-10 15:51:39 +01:00
|
|
|
EM(rxrpc_call_poke_start, "Start") \
|
|
|
|
EM(rxrpc_call_poke_timer, "Timer") \
|
|
|
|
E_(rxrpc_call_poke_timer_now, "Timer-now")
|
|
|
|
|
2017-01-05 10:38:33 +00:00
|
|
|
#define rxrpc_skb_traces \
|
2022-10-21 15:31:21 +01:00
|
|
|
EM(rxrpc_skb_eaten_by_unshare, "ETN unshare ") \
|
|
|
|
EM(rxrpc_skb_eaten_by_unshare_nomem, "ETN unshar-nm") \
|
2024-12-04 07:46:41 +00:00
|
|
|
EM(rxrpc_skb_get_call_rx, "GET call-rx ") \
|
2022-10-21 08:54:03 +01:00
|
|
|
EM(rxrpc_skb_get_conn_secured, "GET conn-secd") \
|
2022-10-21 15:31:21 +01:00
|
|
|
EM(rxrpc_skb_get_conn_work, "GET conn-work") \
|
2022-10-06 15:43:51 +01:00
|
|
|
EM(rxrpc_skb_get_local_work, "GET locl-work") \
|
2025-04-11 10:52:49 +01:00
|
|
|
EM(rxrpc_skb_get_post_oob, "GET post-oob ") \
|
2022-10-06 15:43:51 +01:00
|
|
|
EM(rxrpc_skb_get_reject_work, "GET rej-work ") \
|
2022-10-21 15:31:21 +01:00
|
|
|
EM(rxrpc_skb_get_to_recvmsg, "GET to-recv ") \
|
|
|
|
EM(rxrpc_skb_get_to_recvmsg_oos, "GET to-recv-o") \
|
|
|
|
EM(rxrpc_skb_new_encap_rcv, "NEW encap-rcv") \
|
|
|
|
EM(rxrpc_skb_new_error_report, "NEW error-rpt") \
|
|
|
|
EM(rxrpc_skb_new_jumbo_subpacket, "NEW jumbo-sub") \
|
2025-04-11 10:52:49 +01:00
|
|
|
EM(rxrpc_skb_new_response_rxgk, "NEW resp-rxgk") \
|
|
|
|
EM(rxrpc_skb_new_response_rxkad, "NEW resp-rxkd") \
|
2022-10-21 15:31:21 +01:00
|
|
|
EM(rxrpc_skb_new_unshared, "NEW unshared ") \
|
2024-12-04 07:46:41 +00:00
|
|
|
EM(rxrpc_skb_put_call_rx, "PUT call-rx ") \
|
2025-04-11 10:52:49 +01:00
|
|
|
EM(rxrpc_skb_put_challenge, "PUT challenge") \
|
2022-10-21 08:54:03 +01:00
|
|
|
EM(rxrpc_skb_put_conn_secured, "PUT conn-secd") \
|
2022-10-21 15:31:21 +01:00
|
|
|
EM(rxrpc_skb_put_conn_work, "PUT conn-work") \
|
|
|
|
EM(rxrpc_skb_put_error_report, "PUT error-rep") \
|
|
|
|
EM(rxrpc_skb_put_input, "PUT input ") \
|
|
|
|
EM(rxrpc_skb_put_jumbo_subpacket, "PUT jumbo-sub") \
|
2025-04-11 10:52:49 +01:00
|
|
|
EM(rxrpc_skb_put_oob, "PUT oob ") \
|
2022-10-21 15:31:21 +01:00
|
|
|
EM(rxrpc_skb_put_purge, "PUT purge ") \
|
2025-04-11 10:52:49 +01:00
|
|
|
EM(rxrpc_skb_put_purge_oob, "PUT purge-oob") \
|
|
|
|
EM(rxrpc_skb_put_response, "PUT response ") \
|
2022-10-21 15:31:21 +01:00
|
|
|
EM(rxrpc_skb_put_rotate, "PUT rotate ") \
|
|
|
|
EM(rxrpc_skb_put_unknown, "PUT unknown ") \
|
|
|
|
EM(rxrpc_skb_see_conn_work, "SEE conn-work") \
|
2025-04-11 10:52:49 +01:00
|
|
|
EM(rxrpc_skb_see_oob_challenge, "SEE oob-chall") \
|
2022-10-21 15:31:21 +01:00
|
|
|
EM(rxrpc_skb_see_recvmsg, "SEE recvmsg ") \
|
2025-04-11 10:52:49 +01:00
|
|
|
EM(rxrpc_skb_see_recvmsg_oob, "SEE recvm-oob") \
|
2022-10-21 15:31:21 +01:00
|
|
|
EM(rxrpc_skb_see_reject, "SEE reject ") \
|
|
|
|
EM(rxrpc_skb_see_rotate, "SEE rotate ") \
|
|
|
|
E_(rxrpc_skb_see_version, "SEE version ")
|
2017-01-05 10:38:33 +00:00
|
|
|
|
2018-03-30 21:05:28 +01:00
|
|
|
#define rxrpc_local_traces \
|
2022-10-21 13:00:34 +01:00
|
|
|
EM(rxrpc_local_free, "FREE ") \
|
2022-10-20 21:58:36 +01:00
|
|
|
EM(rxrpc_local_get_call, "GET call ") \
|
2022-10-21 13:00:34 +01:00
|
|
|
EM(rxrpc_local_get_client_conn, "GET conn-cln") \
|
|
|
|
EM(rxrpc_local_get_for_use, "GET for-use ") \
|
|
|
|
EM(rxrpc_local_get_peer, "GET peer ") \
|
|
|
|
EM(rxrpc_local_get_prealloc_conn, "GET conn-pre") \
|
|
|
|
EM(rxrpc_local_new, "NEW ") \
|
|
|
|
EM(rxrpc_local_put_bind, "PUT bind ") \
|
2022-10-20 21:58:36 +01:00
|
|
|
EM(rxrpc_local_put_call, "PUT call ") \
|
2022-10-21 13:00:34 +01:00
|
|
|
EM(rxrpc_local_put_for_use, "PUT for-use ") \
|
|
|
|
EM(rxrpc_local_put_kill_conn, "PUT conn-kil") \
|
|
|
|
EM(rxrpc_local_put_peer, "PUT peer ") \
|
2022-12-19 14:22:12 +00:00
|
|
|
EM(rxrpc_local_put_prealloc_peer, "PUT peer-pre") \
|
2022-10-21 13:00:34 +01:00
|
|
|
EM(rxrpc_local_put_release_sock, "PUT rel-sock") \
|
|
|
|
EM(rxrpc_local_stop, "STOP ") \
|
|
|
|
EM(rxrpc_local_stopped, "STOPPED ") \
|
|
|
|
EM(rxrpc_local_unuse_bind, "UNU bind ") \
|
|
|
|
EM(rxrpc_local_unuse_conn_work, "UNU conn-wrk") \
|
|
|
|
EM(rxrpc_local_unuse_peer_keepalive, "UNU peer-kpa") \
|
|
|
|
EM(rxrpc_local_unuse_release_sock, "UNU rel-sock") \
|
|
|
|
EM(rxrpc_local_use_conn_work, "USE conn-wrk") \
|
|
|
|
EM(rxrpc_local_use_lookup, "USE lookup ") \
|
2020-01-23 13:13:41 +00:00
|
|
|
E_(rxrpc_local_use_peer_keepalive, "USE peer-kpa")
|
2018-03-30 21:05:28 +01:00
|
|
|
|
2018-03-30 21:05:38 +01:00
|
|
|
#define rxrpc_peer_traces \
|
2022-10-21 13:39:34 +01:00
|
|
|
EM(rxrpc_peer_free, "FREE ") \
|
|
|
|
EM(rxrpc_peer_get_accept, "GET accept ") \
|
rxrpc, afs: Allow afs to pin rxrpc_peer objects
Change rxrpc's API such that:
(1) A new function, rxrpc_kernel_lookup_peer(), is provided to look up an
rxrpc_peer record for a remote address and a corresponding function,
rxrpc_kernel_put_peer(), is provided to dispose of it again.
(2) When setting up a call, the rxrpc_peer object used during a call is
now passed in rather than being set up by rxrpc_connect_call(). For
afs, this meenat passing it to rxrpc_kernel_begin_call() rather than
the full address (the service ID then has to be passed in as a
separate parameter).
(3) A new function, rxrpc_kernel_remote_addr(), is added so that afs can
get a pointer to the transport address for display purposed, and
another, rxrpc_kernel_remote_srx(), to gain a pointer to the full
rxrpc address.
(4) The function to retrieve the RTT from a call, rxrpc_kernel_get_srtt(),
is then altered to take a peer. This now returns the RTT or -1 if
there are insufficient samples.
(5) Rename rxrpc_kernel_get_peer() to rxrpc_kernel_call_get_peer().
(6) Provide a new function, rxrpc_kernel_get_peer(), to get a ref on a
peer the caller already has.
This allows the afs filesystem to pin the rxrpc_peer records that it is
using, allowing faster lookups and pointer comparisons rather than
comparing sockaddr_rxrpc contents. It also makes it easier to get hold of
the RTT. The following changes are made to afs:
(1) The addr_list struct's addrs[] elements now hold a peer struct pointer
and a service ID rather than a sockaddr_rxrpc.
(2) When displaying the transport address, rxrpc_kernel_remote_addr() is
used.
(3) The port arg is removed from afs_alloc_addrlist() since it's always
overridden.
(4) afs_merge_fs_addr4() and afs_merge_fs_addr6() do peer lookup and may
now return an error that must be handled.
(5) afs_find_server() now takes a peer pointer to specify the address.
(6) afs_find_server(), afs_compare_fs_alists() and afs_merge_fs_addr[46]{}
now do peer pointer comparison rather than address comparison.
Signed-off-by: David Howells <dhowells@redhat.com>
cc: Marc Dionne <marc.dionne@auristor.com>
cc: linux-afs@lists.infradead.org
2023-10-19 12:55:11 +01:00
|
|
|
EM(rxrpc_peer_get_application, "GET app ") \
|
2022-10-21 13:39:34 +01:00
|
|
|
EM(rxrpc_peer_get_bundle, "GET bundle ") \
|
rxrpc, afs: Allow afs to pin rxrpc_peer objects
Change rxrpc's API such that:
(1) A new function, rxrpc_kernel_lookup_peer(), is provided to look up an
rxrpc_peer record for a remote address and a corresponding function,
rxrpc_kernel_put_peer(), is provided to dispose of it again.
(2) When setting up a call, the rxrpc_peer object used during a call is
now passed in rather than being set up by rxrpc_connect_call(). For
afs, this meenat passing it to rxrpc_kernel_begin_call() rather than
the full address (the service ID then has to be passed in as a
separate parameter).
(3) A new function, rxrpc_kernel_remote_addr(), is added so that afs can
get a pointer to the transport address for display purposed, and
another, rxrpc_kernel_remote_srx(), to gain a pointer to the full
rxrpc address.
(4) The function to retrieve the RTT from a call, rxrpc_kernel_get_srtt(),
is then altered to take a peer. This now returns the RTT or -1 if
there are insufficient samples.
(5) Rename rxrpc_kernel_get_peer() to rxrpc_kernel_call_get_peer().
(6) Provide a new function, rxrpc_kernel_get_peer(), to get a ref on a
peer the caller already has.
This allows the afs filesystem to pin the rxrpc_peer records that it is
using, allowing faster lookups and pointer comparisons rather than
comparing sockaddr_rxrpc contents. It also makes it easier to get hold of
the RTT. The following changes are made to afs:
(1) The addr_list struct's addrs[] elements now hold a peer struct pointer
and a service ID rather than a sockaddr_rxrpc.
(2) When displaying the transport address, rxrpc_kernel_remote_addr() is
used.
(3) The port arg is removed from afs_alloc_addrlist() since it's always
overridden.
(4) afs_merge_fs_addr4() and afs_merge_fs_addr6() do peer lookup and may
now return an error that must be handled.
(5) afs_find_server() now takes a peer pointer to specify the address.
(6) afs_find_server(), afs_compare_fs_alists() and afs_merge_fs_addr[46]{}
now do peer pointer comparison rather than address comparison.
Signed-off-by: David Howells <dhowells@redhat.com>
cc: Marc Dionne <marc.dionne@auristor.com>
cc: linux-afs@lists.infradead.org
2023-10-19 12:55:11 +01:00
|
|
|
EM(rxrpc_peer_get_call, "GET call ") \
|
2022-10-21 13:39:34 +01:00
|
|
|
EM(rxrpc_peer_get_client_conn, "GET cln-conn") \
|
2020-01-23 13:13:41 +00:00
|
|
|
EM(rxrpc_peer_get_input, "GET input ") \
|
2022-10-21 13:39:34 +01:00
|
|
|
EM(rxrpc_peer_get_input_error, "GET inpt-err") \
|
|
|
|
EM(rxrpc_peer_get_keepalive, "GET keepaliv") \
|
|
|
|
EM(rxrpc_peer_get_lookup_client, "GET look-cln") \
|
|
|
|
EM(rxrpc_peer_get_service_conn, "GET srv-conn") \
|
|
|
|
EM(rxrpc_peer_new_client, "NEW client ") \
|
|
|
|
EM(rxrpc_peer_new_prealloc, "NEW prealloc") \
|
rxrpc, afs: Allow afs to pin rxrpc_peer objects
Change rxrpc's API such that:
(1) A new function, rxrpc_kernel_lookup_peer(), is provided to look up an
rxrpc_peer record for a remote address and a corresponding function,
rxrpc_kernel_put_peer(), is provided to dispose of it again.
(2) When setting up a call, the rxrpc_peer object used during a call is
now passed in rather than being set up by rxrpc_connect_call(). For
afs, this meenat passing it to rxrpc_kernel_begin_call() rather than
the full address (the service ID then has to be passed in as a
separate parameter).
(3) A new function, rxrpc_kernel_remote_addr(), is added so that afs can
get a pointer to the transport address for display purposed, and
another, rxrpc_kernel_remote_srx(), to gain a pointer to the full
rxrpc address.
(4) The function to retrieve the RTT from a call, rxrpc_kernel_get_srtt(),
is then altered to take a peer. This now returns the RTT or -1 if
there are insufficient samples.
(5) Rename rxrpc_kernel_get_peer() to rxrpc_kernel_call_get_peer().
(6) Provide a new function, rxrpc_kernel_get_peer(), to get a ref on a
peer the caller already has.
This allows the afs filesystem to pin the rxrpc_peer records that it is
using, allowing faster lookups and pointer comparisons rather than
comparing sockaddr_rxrpc contents. It also makes it easier to get hold of
the RTT. The following changes are made to afs:
(1) The addr_list struct's addrs[] elements now hold a peer struct pointer
and a service ID rather than a sockaddr_rxrpc.
(2) When displaying the transport address, rxrpc_kernel_remote_addr() is
used.
(3) The port arg is removed from afs_alloc_addrlist() since it's always
overridden.
(4) afs_merge_fs_addr4() and afs_merge_fs_addr6() do peer lookup and may
now return an error that must be handled.
(5) afs_find_server() now takes a peer pointer to specify the address.
(6) afs_find_server(), afs_compare_fs_alists() and afs_merge_fs_addr[46]{}
now do peer pointer comparison rather than address comparison.
Signed-off-by: David Howells <dhowells@redhat.com>
cc: Marc Dionne <marc.dionne@auristor.com>
cc: linux-afs@lists.infradead.org
2023-10-19 12:55:11 +01:00
|
|
|
EM(rxrpc_peer_put_application, "PUT app ") \
|
2022-10-21 13:39:34 +01:00
|
|
|
EM(rxrpc_peer_put_bundle, "PUT bundle ") \
|
|
|
|
EM(rxrpc_peer_put_call, "PUT call ") \
|
|
|
|
EM(rxrpc_peer_put_conn, "PUT conn ") \
|
2020-01-23 13:13:41 +00:00
|
|
|
EM(rxrpc_peer_put_input, "PUT input ") \
|
2022-10-21 13:39:34 +01:00
|
|
|
EM(rxrpc_peer_put_input_error, "PUT inpt-err") \
|
|
|
|
E_(rxrpc_peer_put_keepalive, "PUT keepaliv")
|
2018-03-30 21:05:38 +01:00
|
|
|
|
2022-11-02 21:54:46 +00:00
|
|
|
#define rxrpc_bundle_traces \
|
|
|
|
EM(rxrpc_bundle_free, "FREE ") \
|
|
|
|
EM(rxrpc_bundle_get_client_call, "GET clt-call") \
|
|
|
|
EM(rxrpc_bundle_get_client_conn, "GET clt-conn") \
|
|
|
|
EM(rxrpc_bundle_get_service_conn, "GET svc-conn") \
|
2022-10-21 09:30:23 +01:00
|
|
|
EM(rxrpc_bundle_put_call, "PUT call ") \
|
2022-11-02 21:54:46 +00:00
|
|
|
EM(rxrpc_bundle_put_conn, "PUT conn ") \
|
|
|
|
EM(rxrpc_bundle_put_discard, "PUT discard ") \
|
|
|
|
E_(rxrpc_bundle_new, "NEW ")
|
|
|
|
|
2017-01-05 10:38:33 +00:00
|
|
|
#define rxrpc_conn_traces \
|
2022-10-21 14:06:16 +01:00
|
|
|
EM(rxrpc_conn_free, "FREE ") \
|
|
|
|
EM(rxrpc_conn_get_activate_call, "GET act-call") \
|
|
|
|
EM(rxrpc_conn_get_call_input, "GET inp-call") \
|
2025-04-11 10:52:49 +01:00
|
|
|
EM(rxrpc_conn_get_challenge_input, "GET inp-chal") \
|
2022-10-21 14:06:16 +01:00
|
|
|
EM(rxrpc_conn_get_conn_input, "GET inp-conn") \
|
|
|
|
EM(rxrpc_conn_get_idle, "GET idle ") \
|
2022-10-20 09:56:36 +01:00
|
|
|
EM(rxrpc_conn_get_poke_abort, "GET pk-abort") \
|
2025-04-11 10:52:49 +01:00
|
|
|
EM(rxrpc_conn_get_poke_response, "GET response") \
|
2025-02-03 11:03:04 +00:00
|
|
|
EM(rxrpc_conn_get_poke_secured, "GET secured ") \
|
2022-10-20 09:08:34 +01:00
|
|
|
EM(rxrpc_conn_get_poke_timer, "GET poke ") \
|
2022-10-21 14:06:16 +01:00
|
|
|
EM(rxrpc_conn_get_service_conn, "GET svc-conn") \
|
|
|
|
EM(rxrpc_conn_new_client, "NEW client ") \
|
|
|
|
EM(rxrpc_conn_new_service, "NEW service ") \
|
|
|
|
EM(rxrpc_conn_put_call, "PUT call ") \
|
|
|
|
EM(rxrpc_conn_put_call_input, "PUT inp-call") \
|
2025-04-11 10:52:49 +01:00
|
|
|
EM(rxrpc_conn_put_challenge_input, "PUT inp-chal") \
|
2022-10-21 14:06:16 +01:00
|
|
|
EM(rxrpc_conn_put_conn_input, "PUT inp-conn") \
|
|
|
|
EM(rxrpc_conn_put_discard_idle, "PUT disc-idl") \
|
|
|
|
EM(rxrpc_conn_put_local_dead, "PUT loc-dead") \
|
|
|
|
EM(rxrpc_conn_put_noreuse, "PUT noreuse ") \
|
2025-04-11 10:52:49 +01:00
|
|
|
EM(rxrpc_conn_put_oob, "PUT oob ") \
|
2022-10-21 14:06:16 +01:00
|
|
|
EM(rxrpc_conn_put_poke, "PUT poke ") \
|
2022-11-25 12:43:50 +00:00
|
|
|
EM(rxrpc_conn_put_service_reaped, "PUT svc-reap") \
|
2022-10-21 14:06:16 +01:00
|
|
|
EM(rxrpc_conn_put_unbundle, "PUT unbundle") \
|
|
|
|
EM(rxrpc_conn_put_unidle, "PUT unidle ") \
|
2022-10-20 09:08:34 +01:00
|
|
|
EM(rxrpc_conn_put_work, "PUT work ") \
|
2022-11-25 12:43:50 +00:00
|
|
|
EM(rxrpc_conn_queue_challenge, "QUE chall ") \
|
2022-10-20 09:56:36 +01:00
|
|
|
EM(rxrpc_conn_queue_retry_work, "QUE retry-wk") \
|
2022-11-25 12:43:50 +00:00
|
|
|
EM(rxrpc_conn_queue_rx_work, "QUE rx-work ") \
|
2022-10-21 14:06:16 +01:00
|
|
|
EM(rxrpc_conn_see_new_service_conn, "SEE new-svc ") \
|
|
|
|
EM(rxrpc_conn_see_reap_service, "SEE reap-svc") \
|
|
|
|
E_(rxrpc_conn_see_work, "SEE work ")
|
2017-01-05 10:38:33 +00:00
|
|
|
|
|
|
|
#define rxrpc_client_traces \
|
|
|
|
EM(rxrpc_client_activate_chans, "Activa") \
|
|
|
|
EM(rxrpc_client_alloc, "Alloc ") \
|
|
|
|
EM(rxrpc_client_chan_activate, "ChActv") \
|
|
|
|
EM(rxrpc_client_chan_disconnect, "ChDisc") \
|
|
|
|
EM(rxrpc_client_chan_pass, "ChPass") \
|
|
|
|
EM(rxrpc_client_cleanup, "Clean ") \
|
|
|
|
EM(rxrpc_client_discard, "Discar") \
|
|
|
|
EM(rxrpc_client_exposed, "Expose") \
|
|
|
|
EM(rxrpc_client_replace, "Replac") \
|
2022-10-19 09:45:43 +01:00
|
|
|
EM(rxrpc_client_queue_new_call, "Q-Call") \
|
2017-01-05 10:38:33 +00:00
|
|
|
EM(rxrpc_client_to_active, "->Actv") \
|
rxrpc: Rewrite the client connection manager
Rewrite the rxrpc client connection manager so that it can support multiple
connections for a given security key to a peer. The following changes are
made:
(1) For each open socket, the code currently maintains an rbtree with the
connections placed into it, keyed by communications parameters. This
is tricky to maintain as connections can be culled from the tree or
replaced within it. Connections can require replacement for a number
of reasons, e.g. their IDs span too great a range for the IDR data
type to represent efficiently, the call ID numbers on that conn would
overflow or the conn got aborted.
This is changed so that there's now a connection bundle object placed
in the tree, keyed on the same parameters. The bundle, however, does
not need to be replaced.
(2) An rxrpc_bundle object can now manage the available channels for a set
of parallel connections. The lock that manages this is moved there
from the rxrpc_connection struct (channel_lock).
(3) There'a a dummy bundle for all incoming connections to share so that
they have a channel_lock too. It might be better to give each
incoming connection its own bundle. This bundle is not needed to
manage which channels incoming calls are made on because that's the
solely at whim of the client.
(4) The restrictions on how many client connections are around are
removed. Instead, a previous patch limits the number of client calls
that can be allocated. Ordinarily, client connections are reaped
after 2 minutes on the idle queue, but when more than a certain number
of connections are in existence, the reaper starts reaping them after
2s of idleness instead to get the numbers back down.
It could also be made such that new call allocations are forced to
wait until the number of outstanding connections subsides.
Signed-off-by: David Howells <dhowells@redhat.com>
2020-07-01 11:15:32 +01:00
|
|
|
E_(rxrpc_client_to_idle, "->Idle")
|
2017-01-05 10:38:33 +00:00
|
|
|
|
|
|
|
#define rxrpc_call_traces \
|
2022-11-02 10:24:29 +00:00
|
|
|
EM(rxrpc_call_get_io_thread, "GET iothread") \
|
2022-10-21 14:39:26 +01:00
|
|
|
EM(rxrpc_call_get_input, "GET input ") \
|
|
|
|
EM(rxrpc_call_get_kernel_service, "GET krnl-srv") \
|
|
|
|
EM(rxrpc_call_get_notify_socket, "GET notify ") \
|
2022-10-10 15:51:39 +01:00
|
|
|
EM(rxrpc_call_get_poke, "GET poke ") \
|
2022-10-21 14:39:26 +01:00
|
|
|
EM(rxrpc_call_get_recvmsg, "GET recvmsg ") \
|
|
|
|
EM(rxrpc_call_get_release_sock, "GET rel-sock") \
|
|
|
|
EM(rxrpc_call_get_sendmsg, "GET sendmsg ") \
|
|
|
|
EM(rxrpc_call_get_userid, "GET user-id ") \
|
|
|
|
EM(rxrpc_call_new_client, "NEW client ") \
|
|
|
|
EM(rxrpc_call_new_prealloc_service, "NEW prealloc") \
|
|
|
|
EM(rxrpc_call_put_discard_prealloc, "PUT disc-pre") \
|
2022-10-20 21:58:36 +01:00
|
|
|
EM(rxrpc_call_put_discard_error, "PUT disc-err") \
|
2022-11-02 10:24:29 +00:00
|
|
|
EM(rxrpc_call_put_io_thread, "PUT iothread") \
|
2022-10-21 14:39:26 +01:00
|
|
|
EM(rxrpc_call_put_input, "PUT input ") \
|
|
|
|
EM(rxrpc_call_put_kernel, "PUT kernel ") \
|
2022-10-10 15:51:39 +01:00
|
|
|
EM(rxrpc_call_put_poke, "PUT poke ") \
|
2022-10-21 14:39:26 +01:00
|
|
|
EM(rxrpc_call_put_recvmsg, "PUT recvmsg ") \
|
rxrpc: Fix notification vs call-release vs recvmsg
When a call is released, rxrpc takes the spinlock and removes it from
->recvmsg_q in an effort to prevent racing recvmsg() invocations from
seeing the same call. Now, rxrpc_recvmsg() only takes the spinlock when
actually removing a call from the queue; it doesn't, however, take it in
the lead up to that when it checks to see if the queue is empty. It *does*
hold the socket lock, which prevents a recvmsg/recvmsg race - but this
doesn't prevent sendmsg from ending the call because sendmsg() drops the
socket lock and relies on the call->user_mutex.
Fix this by firstly removing the bit in rxrpc_release_call() that dequeues
the released call and, instead, rely on recvmsg() to simply discard
released calls (done in a preceding fix).
Secondly, rxrpc_notify_socket() is abandoned if the call is already marked
as released rather than trying to be clever by setting both pointers in
call->recvmsg_link to NULL to trick list_empty(). This isn't perfect and
can still race, resulting in a released call on the queue, but recvmsg()
will now clean that up.
Fixes: 17926a79320a ("[AF_RXRPC]: Provide secure RxRPC sockets for use by userspace and kernel both")
Signed-off-by: David Howells <dhowells@redhat.com>
Reviewed-by: Jeffrey Altman <jaltman@auristor.com>
cc: Marc Dionne <marc.dionne@auristor.com>
cc: Junvyyang, Tencent Zhuque Lab <zhuque@tencent.com>
cc: LePremierHomme <kwqcheii@proton.me>
cc: Simon Horman <horms@kernel.org>
cc: linux-afs@lists.infradead.org
Link: https://patch.msgid.link/20250717074350.3767366-4-dhowells@redhat.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2025-07-17 08:43:43 +01:00
|
|
|
EM(rxrpc_call_put_release_recvmsg_q, "PUT rls-rcmq") \
|
2022-10-21 14:39:26 +01:00
|
|
|
EM(rxrpc_call_put_release_sock, "PUT rls-sock") \
|
|
|
|
EM(rxrpc_call_put_release_sock_tba, "PUT rls-sk-a") \
|
|
|
|
EM(rxrpc_call_put_sendmsg, "PUT sendmsg ") \
|
|
|
|
EM(rxrpc_call_put_userid_exists, "PUT u-exists") \
|
2022-10-19 09:45:43 +01:00
|
|
|
EM(rxrpc_call_put_userid, "PUT user-id ") \
|
2022-10-21 14:39:26 +01:00
|
|
|
EM(rxrpc_call_see_accept, "SEE accept ") \
|
|
|
|
EM(rxrpc_call_see_activate_client, "SEE act-clnt") \
|
rxrpc: Fix recv-recv race of completed call
If a call receives an event (such as incoming data), the call gets placed
on the socket's queue and a thread in recvmsg can be awakened to go and
process it. Once the thread has picked up the call off of the queue,
further events will cause it to be requeued, and once the socket lock is
dropped (recvmsg uses call->user_mutex to allow the socket to be used in
parallel), a second thread can come in and its recvmsg can pop the call off
the socket queue again.
In such a case, the first thread will be receiving stuff from the call and
the second thread will be blocked on call->user_mutex. The first thread
can, at this point, process both the event that it picked call for and the
event that the second thread picked the call for and may see the call
terminate - in which case the call will be "released", decoupling the call
from the user call ID assigned to it (RXRPC_USER_CALL_ID in the control
message).
The first thread will return okay, but then the second thread will wake up
holding the user_mutex and, if it sees that the call has been released by
the first thread, it will BUG thusly:
kernel BUG at net/rxrpc/recvmsg.c:474!
Fix this by just dequeuing the call and ignoring it if it is seen to be
already released. We can't tell userspace about it anyway as the user call
ID has become stale.
Fixes: 248f219cb8bc ("rxrpc: Rewrite the data and ack handling code")
Reported-by: Junvyyang, Tencent Zhuque Lab <zhuque@tencent.com>
Signed-off-by: David Howells <dhowells@redhat.com>
Reviewed-by: Jeffrey Altman <jaltman@auristor.com>
cc: LePremierHomme <kwqcheii@proton.me>
cc: Marc Dionne <marc.dionne@auristor.com>
cc: Simon Horman <horms@kernel.org>
cc: linux-afs@lists.infradead.org
Link: https://patch.msgid.link/20250717074350.3767366-3-dhowells@redhat.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2025-07-17 08:43:42 +01:00
|
|
|
EM(rxrpc_call_see_already_released, "SEE alrdy-rl") \
|
2022-10-21 14:39:26 +01:00
|
|
|
EM(rxrpc_call_see_connect_failed, "SEE con-fail") \
|
|
|
|
EM(rxrpc_call_see_connected, "SEE connect ") \
|
2024-12-04 07:46:30 +00:00
|
|
|
EM(rxrpc_call_see_conn_abort, "SEE conn-abt") \
|
rxrpc: Fix recv-recv race of completed call
If a call receives an event (such as incoming data), the call gets placed
on the socket's queue and a thread in recvmsg can be awakened to go and
process it. Once the thread has picked up the call off of the queue,
further events will cause it to be requeued, and once the socket lock is
dropped (recvmsg uses call->user_mutex to allow the socket to be used in
parallel), a second thread can come in and its recvmsg can pop the call off
the socket queue again.
In such a case, the first thread will be receiving stuff from the call and
the second thread will be blocked on call->user_mutex. The first thread
can, at this point, process both the event that it picked call for and the
event that the second thread picked the call for and may see the call
terminate - in which case the call will be "released", decoupling the call
from the user call ID assigned to it (RXRPC_USER_CALL_ID in the control
message).
The first thread will return okay, but then the second thread will wake up
holding the user_mutex and, if it sees that the call has been released by
the first thread, it will BUG thusly:
kernel BUG at net/rxrpc/recvmsg.c:474!
Fix this by just dequeuing the call and ignoring it if it is seen to be
already released. We can't tell userspace about it anyway as the user call
ID has become stale.
Fixes: 248f219cb8bc ("rxrpc: Rewrite the data and ack handling code")
Reported-by: Junvyyang, Tencent Zhuque Lab <zhuque@tencent.com>
Signed-off-by: David Howells <dhowells@redhat.com>
Reviewed-by: Jeffrey Altman <jaltman@auristor.com>
cc: LePremierHomme <kwqcheii@proton.me>
cc: Marc Dionne <marc.dionne@auristor.com>
cc: Simon Horman <horms@kernel.org>
cc: linux-afs@lists.infradead.org
Link: https://patch.msgid.link/20250717074350.3767366-3-dhowells@redhat.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2025-07-17 08:43:42 +01:00
|
|
|
EM(rxrpc_call_see_discard, "SEE discard ") \
|
2022-11-02 10:24:29 +00:00
|
|
|
EM(rxrpc_call_see_disconnected, "SEE disconn ") \
|
2022-10-21 14:39:26 +01:00
|
|
|
EM(rxrpc_call_see_distribute_error, "SEE dist-err") \
|
|
|
|
EM(rxrpc_call_see_input, "SEE input ") \
|
rxrpc: Fix notification vs call-release vs recvmsg
When a call is released, rxrpc takes the spinlock and removes it from
->recvmsg_q in an effort to prevent racing recvmsg() invocations from
seeing the same call. Now, rxrpc_recvmsg() only takes the spinlock when
actually removing a call from the queue; it doesn't, however, take it in
the lead up to that when it checks to see if the queue is empty. It *does*
hold the socket lock, which prevents a recvmsg/recvmsg race - but this
doesn't prevent sendmsg from ending the call because sendmsg() drops the
socket lock and relies on the call->user_mutex.
Fix this by firstly removing the bit in rxrpc_release_call() that dequeues
the released call and, instead, rely on recvmsg() to simply discard
released calls (done in a preceding fix).
Secondly, rxrpc_notify_socket() is abandoned if the call is already marked
as released rather than trying to be clever by setting both pointers in
call->recvmsg_link to NULL to trick list_empty(). This isn't perfect and
can still race, resulting in a released call on the queue, but recvmsg()
will now clean that up.
Fixes: 17926a79320a ("[AF_RXRPC]: Provide secure RxRPC sockets for use by userspace and kernel both")
Signed-off-by: David Howells <dhowells@redhat.com>
Reviewed-by: Jeffrey Altman <jaltman@auristor.com>
cc: Marc Dionne <marc.dionne@auristor.com>
cc: Junvyyang, Tencent Zhuque Lab <zhuque@tencent.com>
cc: LePremierHomme <kwqcheii@proton.me>
cc: Simon Horman <horms@kernel.org>
cc: linux-afs@lists.infradead.org
Link: https://patch.msgid.link/20250717074350.3767366-4-dhowells@redhat.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2025-07-17 08:43:43 +01:00
|
|
|
EM(rxrpc_call_see_notify_released, "SEE nfy-rlsd") \
|
rxrpc: Fix recv-recv race of completed call
If a call receives an event (such as incoming data), the call gets placed
on the socket's queue and a thread in recvmsg can be awakened to go and
process it. Once the thread has picked up the call off of the queue,
further events will cause it to be requeued, and once the socket lock is
dropped (recvmsg uses call->user_mutex to allow the socket to be used in
parallel), a second thread can come in and its recvmsg can pop the call off
the socket queue again.
In such a case, the first thread will be receiving stuff from the call and
the second thread will be blocked on call->user_mutex. The first thread
can, at this point, process both the event that it picked call for and the
event that the second thread picked the call for and may see the call
terminate - in which case the call will be "released", decoupling the call
from the user call ID assigned to it (RXRPC_USER_CALL_ID in the control
message).
The first thread will return okay, but then the second thread will wake up
holding the user_mutex and, if it sees that the call has been released by
the first thread, it will BUG thusly:
kernel BUG at net/rxrpc/recvmsg.c:474!
Fix this by just dequeuing the call and ignoring it if it is seen to be
already released. We can't tell userspace about it anyway as the user call
ID has become stale.
Fixes: 248f219cb8bc ("rxrpc: Rewrite the data and ack handling code")
Reported-by: Junvyyang, Tencent Zhuque Lab <zhuque@tencent.com>
Signed-off-by: David Howells <dhowells@redhat.com>
Reviewed-by: Jeffrey Altman <jaltman@auristor.com>
cc: LePremierHomme <kwqcheii@proton.me>
cc: Marc Dionne <marc.dionne@auristor.com>
cc: Simon Horman <horms@kernel.org>
cc: linux-afs@lists.infradead.org
Link: https://patch.msgid.link/20250717074350.3767366-3-dhowells@redhat.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2025-07-17 08:43:42 +01:00
|
|
|
EM(rxrpc_call_see_recvmsg, "SEE recvmsg ") \
|
2022-10-21 14:39:26 +01:00
|
|
|
EM(rxrpc_call_see_release, "SEE release ") \
|
|
|
|
EM(rxrpc_call_see_userid_exists, "SEE u-exists") \
|
2024-11-06 13:03:22 +00:00
|
|
|
EM(rxrpc_call_see_waiting_call, "SEE q-conn ") \
|
2022-10-21 14:39:26 +01:00
|
|
|
E_(rxrpc_call_see_zap, "SEE zap ")
|
2017-01-05 10:38:33 +00:00
|
|
|
|
2022-03-31 23:55:08 +01:00
|
|
|
#define rxrpc_txqueue_traces \
|
|
|
|
EM(rxrpc_txqueue_await_reply, "AWR") \
|
|
|
|
EM(rxrpc_txqueue_end, "END") \
|
|
|
|
EM(rxrpc_txqueue_queue, "QUE") \
|
|
|
|
EM(rxrpc_txqueue_queue_last, "QLS") \
|
|
|
|
EM(rxrpc_txqueue_rotate, "ROT") \
|
|
|
|
EM(rxrpc_txqueue_rotate_last, "RLS") \
|
|
|
|
E_(rxrpc_txqueue_wait, "WAI")
|
2017-01-05 10:38:33 +00:00
|
|
|
|
2024-12-04 07:47:03 +00:00
|
|
|
#define rxrpc_txdata_traces \
|
|
|
|
EM(rxrpc_txdata_inject_loss, " *INJ-LOSS*") \
|
|
|
|
EM(rxrpc_txdata_new_data, " ") \
|
2024-12-04 07:47:07 +00:00
|
|
|
EM(rxrpc_txdata_retransmit, " *RETRANS*") \
|
|
|
|
EM(rxrpc_txdata_tlp_new_data, " *TLP-NEW*") \
|
|
|
|
E_(rxrpc_txdata_tlp_retransmit, " *TLP-RETRANS*")
|
2024-12-04 07:47:03 +00:00
|
|
|
|
2017-01-05 10:38:33 +00:00
|
|
|
#define rxrpc_receive_traces \
|
|
|
|
EM(rxrpc_receive_end, "END") \
|
|
|
|
EM(rxrpc_receive_front, "FRN") \
|
|
|
|
EM(rxrpc_receive_incoming, "INC") \
|
|
|
|
EM(rxrpc_receive_queue, "QUE") \
|
|
|
|
EM(rxrpc_receive_queue_last, "QLS") \
|
2022-08-27 14:27:56 +01:00
|
|
|
EM(rxrpc_receive_queue_oos, "QUO") \
|
|
|
|
EM(rxrpc_receive_queue_oos_last, "QOL") \
|
|
|
|
EM(rxrpc_receive_oos, "OOS") \
|
|
|
|
EM(rxrpc_receive_oos_last, "OSL") \
|
|
|
|
EM(rxrpc_receive_rotate, "ROT") \
|
|
|
|
E_(rxrpc_receive_rotate_last, "RLS")
|
2017-01-05 10:38:33 +00:00
|
|
|
|
|
|
|
#define rxrpc_recvmsg_traces \
|
|
|
|
EM(rxrpc_recvmsg_cont, "CONT") \
|
|
|
|
EM(rxrpc_recvmsg_data_return, "DATA") \
|
|
|
|
EM(rxrpc_recvmsg_dequeue, "DEQU") \
|
|
|
|
EM(rxrpc_recvmsg_enter, "ENTR") \
|
|
|
|
EM(rxrpc_recvmsg_full, "FULL") \
|
|
|
|
EM(rxrpc_recvmsg_hole, "HOLE") \
|
|
|
|
EM(rxrpc_recvmsg_next, "NEXT") \
|
2025-04-11 10:52:49 +01:00
|
|
|
EM(rxrpc_recvmsg_oobq, "OOBQ") \
|
rxrpc: Fix deadlock between call creation and sendmsg/recvmsg
All the routines by which rxrpc is accessed from the outside are serialised
by means of the socket lock (sendmsg, recvmsg, bind,
rxrpc_kernel_begin_call(), ...) and this presents a problem:
(1) If a number of calls on the same socket are in the process of
connection to the same peer, a maximum of four concurrent live calls
are permitted before further calls need to wait for a slot.
(2) If a call is waiting for a slot, it is deep inside sendmsg() or
rxrpc_kernel_begin_call() and the entry function is holding the socket
lock.
(3) sendmsg() and recvmsg() or the in-kernel equivalents are prevented
from servicing the other calls as they need to take the socket lock to
do so.
(4) The socket is stuck until a call is aborted and makes its slot
available to the waiter.
Fix this by:
(1) Provide each call with a mutex ('user_mutex') that arbitrates access
by the users of rxrpc separately for each specific call.
(2) Make rxrpc_sendmsg() and rxrpc_recvmsg() unlock the socket as soon as
they've got a call and taken its mutex.
Note that I'm returning EWOULDBLOCK from recvmsg() if MSG_DONTWAIT is
set but someone else has the lock. Should I instead only return
EWOULDBLOCK if there's nothing currently to be done on a socket, and
sleep in this particular instance because there is something to be
done, but we appear to be blocked by the interrupt handler doing its
ping?
(3) Make rxrpc_new_client_call() unlock the socket after allocating a new
call, locking its user mutex and adding it to the socket's call tree.
The call is returned locked so that sendmsg() can add data to it
immediately.
From the moment the call is in the socket tree, it is subject to
access by sendmsg() and recvmsg() - even if it isn't connected yet.
(4) Lock new service calls in the UDP data_ready handler (in
rxrpc_new_incoming_call()) because they may already be in the socket's
tree and the data_ready handler makes them live immediately if a user
ID has already been preassigned.
Note that the new call is locked before any notifications are sent
that it is live, so doing mutex_trylock() *ought* to always succeed.
Userspace is prevented from doing sendmsg() on calls that are in a
too-early state in rxrpc_do_sendmsg().
(5) Make rxrpc_new_incoming_call() return the call with the user mutex
held so that a ping can be scheduled immediately under it.
Note that it might be worth moving the ping call into
rxrpc_new_incoming_call() and then we can drop the mutex there.
(6) Make rxrpc_accept_call() take the lock on the call it is accepting and
release the socket after adding the call to the socket's tree. This
is slightly tricky as we've dequeued the call by that point and have
to requeue it.
Note that requeuing emits a trace event.
(7) Make rxrpc_kernel_send_data() and rxrpc_kernel_recv_data() take the
new mutex immediately and don't bother with the socket mutex at all.
This patch has the nice bonus that calls on the same socket are now to some
extent parallelisable.
Note that we might want to move rxrpc_service_prealloc() calls out from the
socket lock and give it its own lock, so that we don't hang progress in
other calls because we're waiting for the allocator.
We probably also want to avoid calling rxrpc_notify_socket() from within
the socket lock (rxrpc_accept_call()).
Signed-off-by: David Howells <dhowells@redhat.com>
Tested-by: Marc Dionne <marc.c.dionne@auristor.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-02-27 15:43:06 +00:00
|
|
|
EM(rxrpc_recvmsg_requeue, "REQU") \
|
2017-01-05 10:38:33 +00:00
|
|
|
EM(rxrpc_recvmsg_return, "RETN") \
|
|
|
|
EM(rxrpc_recvmsg_terminal, "TERM") \
|
|
|
|
EM(rxrpc_recvmsg_to_be_accepted, "TBAC") \
|
2023-02-15 21:48:05 +00:00
|
|
|
EM(rxrpc_recvmsg_unqueue, "UNQU") \
|
2017-01-05 10:38:33 +00:00
|
|
|
E_(rxrpc_recvmsg_wait, "WAIT")
|
|
|
|
|
|
|
|
#define rxrpc_rtt_tx_traces \
|
rxrpc: Fix loss of RTT samples due to interposed ACK
The Rx protocol has a mechanism to help generate RTT samples that works by
a client transmitting a REQUESTED-type ACK when it receives a DATA packet
that has the REQUEST_ACK flag set.
The peer, however, may interpose other ACKs before transmitting the
REQUESTED-ACK, as can be seen in the following trace excerpt:
rxrpc_tx_data: c=00000044 DATA d0b5ece8:00000001 00000001 q=00000001 fl=07
rxrpc_rx_ack: c=00000044 00000001 PNG r=00000000 f=00000002 p=00000000 n=0
rxrpc_rx_ack: c=00000044 00000002 REQ r=00000001 f=00000002 p=00000001 n=0
...
DATA packet 1 (q=xx) has REQUEST_ACK set (bit 1 of fl=xx). The incoming
ping (labelled PNG) hard-acks the request DATA packet (f=xx exceeds the
sequence number of the DATA packet), causing it to be discarded from the Tx
ring. The ACK that was requested (labelled REQ, r=xx references the serial
of the DATA packet) comes after the ping, but the sk_buff holding the
timestamp has gone and the RTT sample is lost.
This is particularly noticeable on RPC calls used to probe the service
offered by the peer. A lot of peers end up with an unknown RTT because we
only ever sent a single RPC. This confuses the server rotation algorithm.
Fix this by caching the information about the outgoing packet in RTT
calculations in the rxrpc_call struct rather than looking in the Tx ring.
A four-deep buffer is maintained and both REQUEST_ACK-flagged DATA and
PING-ACK transmissions are recorded in there. When the appropriate
response ACK is received, the buffer is checked for a match and, if found,
an RTT sample is recorded.
If a received ACK refers to a packet with a later serial number than an
entry in the cache, that entry is presumed lost and the entry is made
available to record a new transmission.
ACKs types other than REQUESTED-type and PING-type cause any matching
sample to be cancelled as they don't necessarily represent a useful
measurement.
If there's no space in the buffer on ping/data transmission, the sample
base is discarded.
Fixes: 50235c4b5a2f ("rxrpc: Obtain RTT data by requesting ACKs on DATA packets")
Signed-off-by: David Howells <dhowells@redhat.com>
2020-08-19 23:29:16 +01:00
|
|
|
EM(rxrpc_rtt_tx_cancel, "CNCE") \
|
2017-01-05 10:38:33 +00:00
|
|
|
EM(rxrpc_rtt_tx_data, "DATA") \
|
rxrpc: Fix loss of RTT samples due to interposed ACK
The Rx protocol has a mechanism to help generate RTT samples that works by
a client transmitting a REQUESTED-type ACK when it receives a DATA packet
that has the REQUEST_ACK flag set.
The peer, however, may interpose other ACKs before transmitting the
REQUESTED-ACK, as can be seen in the following trace excerpt:
rxrpc_tx_data: c=00000044 DATA d0b5ece8:00000001 00000001 q=00000001 fl=07
rxrpc_rx_ack: c=00000044 00000001 PNG r=00000000 f=00000002 p=00000000 n=0
rxrpc_rx_ack: c=00000044 00000002 REQ r=00000001 f=00000002 p=00000001 n=0
...
DATA packet 1 (q=xx) has REQUEST_ACK set (bit 1 of fl=xx). The incoming
ping (labelled PNG) hard-acks the request DATA packet (f=xx exceeds the
sequence number of the DATA packet), causing it to be discarded from the Tx
ring. The ACK that was requested (labelled REQ, r=xx references the serial
of the DATA packet) comes after the ping, but the sk_buff holding the
timestamp has gone and the RTT sample is lost.
This is particularly noticeable on RPC calls used to probe the service
offered by the peer. A lot of peers end up with an unknown RTT because we
only ever sent a single RPC. This confuses the server rotation algorithm.
Fix this by caching the information about the outgoing packet in RTT
calculations in the rxrpc_call struct rather than looking in the Tx ring.
A four-deep buffer is maintained and both REQUEST_ACK-flagged DATA and
PING-ACK transmissions are recorded in there. When the appropriate
response ACK is received, the buffer is checked for a match and, if found,
an RTT sample is recorded.
If a received ACK refers to a packet with a later serial number than an
entry in the cache, that entry is presumed lost and the entry is made
available to record a new transmission.
ACKs types other than REQUESTED-type and PING-type cause any matching
sample to be cancelled as they don't necessarily represent a useful
measurement.
If there's no space in the buffer on ping/data transmission, the sample
base is discarded.
Fixes: 50235c4b5a2f ("rxrpc: Obtain RTT data by requesting ACKs on DATA packets")
Signed-off-by: David Howells <dhowells@redhat.com>
2020-08-19 23:29:16 +01:00
|
|
|
EM(rxrpc_rtt_tx_no_slot, "FULL") \
|
2017-01-05 10:38:33 +00:00
|
|
|
E_(rxrpc_rtt_tx_ping, "PING")
|
|
|
|
|
|
|
|
#define rxrpc_rtt_rx_traces \
|
2024-12-04 07:46:52 +00:00
|
|
|
EM(rxrpc_rtt_rx_data_ack, "DACK") \
|
rxrpc: Fix loss of RTT samples due to interposed ACK
The Rx protocol has a mechanism to help generate RTT samples that works by
a client transmitting a REQUESTED-type ACK when it receives a DATA packet
that has the REQUEST_ACK flag set.
The peer, however, may interpose other ACKs before transmitting the
REQUESTED-ACK, as can be seen in the following trace excerpt:
rxrpc_tx_data: c=00000044 DATA d0b5ece8:00000001 00000001 q=00000001 fl=07
rxrpc_rx_ack: c=00000044 00000001 PNG r=00000000 f=00000002 p=00000000 n=0
rxrpc_rx_ack: c=00000044 00000002 REQ r=00000001 f=00000002 p=00000001 n=0
...
DATA packet 1 (q=xx) has REQUEST_ACK set (bit 1 of fl=xx). The incoming
ping (labelled PNG) hard-acks the request DATA packet (f=xx exceeds the
sequence number of the DATA packet), causing it to be discarded from the Tx
ring. The ACK that was requested (labelled REQ, r=xx references the serial
of the DATA packet) comes after the ping, but the sk_buff holding the
timestamp has gone and the RTT sample is lost.
This is particularly noticeable on RPC calls used to probe the service
offered by the peer. A lot of peers end up with an unknown RTT because we
only ever sent a single RPC. This confuses the server rotation algorithm.
Fix this by caching the information about the outgoing packet in RTT
calculations in the rxrpc_call struct rather than looking in the Tx ring.
A four-deep buffer is maintained and both REQUEST_ACK-flagged DATA and
PING-ACK transmissions are recorded in there. When the appropriate
response ACK is received, the buffer is checked for a match and, if found,
an RTT sample is recorded.
If a received ACK refers to a packet with a later serial number than an
entry in the cache, that entry is presumed lost and the entry is made
available to record a new transmission.
ACKs types other than REQUESTED-type and PING-type cause any matching
sample to be cancelled as they don't necessarily represent a useful
measurement.
If there's no space in the buffer on ping/data transmission, the sample
base is discarded.
Fixes: 50235c4b5a2f ("rxrpc: Obtain RTT data by requesting ACKs on DATA packets")
Signed-off-by: David Howells <dhowells@redhat.com>
2020-08-19 23:29:16 +01:00
|
|
|
EM(rxrpc_rtt_rx_obsolete, "OBSL") \
|
|
|
|
EM(rxrpc_rtt_rx_lost, "LOST") \
|
2024-12-04 07:46:52 +00:00
|
|
|
E_(rxrpc_rtt_rx_ping_response, "PONG")
|
2017-01-05 10:38:33 +00:00
|
|
|
|
|
|
|
#define rxrpc_timer_traces \
|
2024-01-30 21:37:16 +00:00
|
|
|
EM(rxrpc_timer_trace_delayed_ack, "DelayAck ") \
|
|
|
|
EM(rxrpc_timer_trace_expect_rx, "ExpectRx ") \
|
|
|
|
EM(rxrpc_timer_trace_hard, "HardLimit") \
|
|
|
|
EM(rxrpc_timer_trace_idle, "IdleLimit") \
|
|
|
|
EM(rxrpc_timer_trace_keepalive, "KeepAlive") \
|
|
|
|
EM(rxrpc_timer_trace_ping, "DelayPing") \
|
2024-12-04 07:47:07 +00:00
|
|
|
EM(rxrpc_timer_trace_rack_off, "RACK-OFF ") \
|
|
|
|
EM(rxrpc_timer_trace_rack_zwp, "RACK-ZWP ") \
|
|
|
|
EM(rxrpc_timer_trace_rack_reo, "RACK-Reo ") \
|
|
|
|
EM(rxrpc_timer_trace_rack_tlp_pto, "TLP-PTO ") \
|
|
|
|
E_(rxrpc_timer_trace_rack_rto, "RTO ")
|
2017-01-05 10:38:33 +00:00
|
|
|
|
|
|
|
#define rxrpc_propose_ack_traces \
|
|
|
|
EM(rxrpc_propose_ack_client_tx_end, "ClTxEnd") \
|
2024-01-30 20:30:03 +00:00
|
|
|
EM(rxrpc_propose_ack_delayed_ack, "DlydAck") \
|
2017-01-05 10:38:33 +00:00
|
|
|
EM(rxrpc_propose_ack_input_data, "DataIn ") \
|
2020-01-30 21:48:14 +00:00
|
|
|
EM(rxrpc_propose_ack_input_data_hole, "DataInH") \
|
2017-11-24 10:18:42 +00:00
|
|
|
EM(rxrpc_propose_ack_ping_for_keepalive, "KeepAlv") \
|
2017-01-05 10:38:33 +00:00
|
|
|
EM(rxrpc_propose_ack_ping_for_lost_ack, "LostAck") \
|
|
|
|
EM(rxrpc_propose_ack_ping_for_lost_reply, "LostRpl") \
|
2024-01-30 20:30:03 +00:00
|
|
|
EM(rxrpc_propose_ack_ping_for_0_retrans, "0-Retrn") \
|
2024-12-04 07:46:37 +00:00
|
|
|
EM(rxrpc_propose_ack_ping_for_mtu_probe, "MTUProb") \
|
2022-10-17 10:55:41 +01:00
|
|
|
EM(rxrpc_propose_ack_ping_for_old_rtt, "OldRtt ") \
|
2017-01-05 10:38:33 +00:00
|
|
|
EM(rxrpc_propose_ack_ping_for_params, "Params ") \
|
2022-10-17 10:55:41 +01:00
|
|
|
EM(rxrpc_propose_ack_ping_for_rtt, "Rtt ") \
|
2017-01-05 10:38:33 +00:00
|
|
|
EM(rxrpc_propose_ack_processing_op, "ProcOp ") \
|
|
|
|
EM(rxrpc_propose_ack_respond_to_ack, "Rsp2Ack") \
|
|
|
|
EM(rxrpc_propose_ack_respond_to_ping, "Rsp2Png") \
|
2024-12-04 07:47:04 +00:00
|
|
|
EM(rxrpc_propose_ack_retransmit, "Retrans") \
|
2017-01-05 10:38:33 +00:00
|
|
|
EM(rxrpc_propose_ack_retry_tx, "RetryTx") \
|
|
|
|
EM(rxrpc_propose_ack_rotate_rx, "RxAck ") \
|
2020-01-23 13:13:41 +00:00
|
|
|
EM(rxrpc_propose_ack_rx_idle, "RxIdle ") \
|
2017-01-05 10:38:33 +00:00
|
|
|
E_(rxrpc_propose_ack_terminal_ack, "ClTerm ")
|
|
|
|
|
2024-12-04 07:46:50 +00:00
|
|
|
#define rxrpc_ca_states \
|
|
|
|
EM(RXRPC_CA_CONGEST_AVOIDANCE, "CongAvoid") \
|
|
|
|
EM(RXRPC_CA_FAST_RETRANSMIT, "FastReTx ") \
|
|
|
|
EM(RXRPC_CA_PACKET_LOSS, "PktLoss ") \
|
|
|
|
E_(RXRPC_CA_SLOW_START, "SlowStart")
|
2017-01-05 10:38:33 +00:00
|
|
|
|
|
|
|
#define rxrpc_congest_changes \
|
|
|
|
EM(rxrpc_cong_begin_retransmission, " Retrans") \
|
|
|
|
EM(rxrpc_cong_cleared_nacks, " Cleared") \
|
|
|
|
EM(rxrpc_cong_new_low_nack, " NewLowN") \
|
2020-06-17 22:50:33 +01:00
|
|
|
EM(rxrpc_cong_no_change, " -") \
|
2017-01-05 10:38:33 +00:00
|
|
|
EM(rxrpc_cong_progress, " Progres") \
|
2022-10-03 18:49:11 +01:00
|
|
|
EM(rxrpc_cong_idle_reset, " IdleRes") \
|
2017-01-05 10:38:33 +00:00
|
|
|
EM(rxrpc_cong_retransmit_again, " ReTxAgn") \
|
|
|
|
EM(rxrpc_cong_rtt_window_end, " RttWinE") \
|
|
|
|
E_(rxrpc_cong_saw_nack, " SawNack")
|
|
|
|
|
|
|
|
#define rxrpc_pkts \
|
|
|
|
EM(0, "?00") \
|
|
|
|
EM(RXRPC_PACKET_TYPE_DATA, "DATA") \
|
|
|
|
EM(RXRPC_PACKET_TYPE_ACK, "ACK") \
|
|
|
|
EM(RXRPC_PACKET_TYPE_BUSY, "BUSY") \
|
|
|
|
EM(RXRPC_PACKET_TYPE_ABORT, "ABORT") \
|
|
|
|
EM(RXRPC_PACKET_TYPE_ACKALL, "ACKALL") \
|
|
|
|
EM(RXRPC_PACKET_TYPE_CHALLENGE, "CHALL") \
|
|
|
|
EM(RXRPC_PACKET_TYPE_RESPONSE, "RESP") \
|
|
|
|
EM(RXRPC_PACKET_TYPE_DEBUG, "DEBUG") \
|
|
|
|
EM(9, "?09") \
|
|
|
|
EM(10, "?10") \
|
|
|
|
EM(11, "?11") \
|
|
|
|
EM(12, "?12") \
|
|
|
|
EM(RXRPC_PACKET_TYPE_VERSION, "VERSION") \
|
|
|
|
EM(14, "?14") \
|
|
|
|
E_(15, "?15")
|
|
|
|
|
|
|
|
#define rxrpc_ack_names \
|
|
|
|
EM(0, "-0-") \
|
|
|
|
EM(RXRPC_ACK_REQUESTED, "REQ") \
|
|
|
|
EM(RXRPC_ACK_DUPLICATE, "DUP") \
|
|
|
|
EM(RXRPC_ACK_OUT_OF_SEQUENCE, "OOS") \
|
|
|
|
EM(RXRPC_ACK_EXCEEDS_WINDOW, "WIN") \
|
|
|
|
EM(RXRPC_ACK_NOSPACE, "MEM") \
|
|
|
|
EM(RXRPC_ACK_PING, "PNG") \
|
|
|
|
EM(RXRPC_ACK_PING_RESPONSE, "PNR") \
|
|
|
|
EM(RXRPC_ACK_DELAY, "DLY") \
|
|
|
|
EM(RXRPC_ACK_IDLE, "IDL") \
|
|
|
|
E_(RXRPC_ACK__INVALID, "-?-")
|
|
|
|
|
2022-10-16 08:01:32 +01:00
|
|
|
#define rxrpc_sack_traces \
|
|
|
|
EM(rxrpc_sack_advance, "ADV") \
|
|
|
|
EM(rxrpc_sack_fill, "FIL") \
|
|
|
|
EM(rxrpc_sack_nack, "NAK") \
|
|
|
|
EM(rxrpc_sack_none, "---") \
|
|
|
|
E_(rxrpc_sack_oos, "OOS")
|
|
|
|
|
2018-03-27 23:08:20 +01:00
|
|
|
#define rxrpc_completions \
|
|
|
|
EM(RXRPC_CALL_SUCCEEDED, "Succeeded") \
|
|
|
|
EM(RXRPC_CALL_REMOTELY_ABORTED, "RemoteAbort") \
|
|
|
|
EM(RXRPC_CALL_LOCALLY_ABORTED, "LocalAbort") \
|
|
|
|
EM(RXRPC_CALL_LOCAL_ERROR, "LocalError") \
|
|
|
|
E_(RXRPC_CALL_NETWORK_ERROR, "NetError")
|
|
|
|
|
2018-07-23 17:18:37 +01:00
|
|
|
#define rxrpc_tx_points \
|
|
|
|
EM(rxrpc_tx_point_call_abort, "CallAbort") \
|
|
|
|
EM(rxrpc_tx_point_call_ack, "CallAck") \
|
|
|
|
EM(rxrpc_tx_point_call_data_frag, "CallDataFrag") \
|
|
|
|
EM(rxrpc_tx_point_call_data_nofrag, "CallDataNofrag") \
|
|
|
|
EM(rxrpc_tx_point_call_final_resend, "CallFinalResend") \
|
|
|
|
EM(rxrpc_tx_point_conn_abort, "ConnAbort") \
|
|
|
|
EM(rxrpc_tx_point_reject, "Reject") \
|
2025-04-11 10:52:53 +01:00
|
|
|
EM(rxrpc_tx_point_rxgk_challenge, "RxGKChall") \
|
2018-07-23 17:18:37 +01:00
|
|
|
EM(rxrpc_tx_point_rxkad_challenge, "RxkadChall") \
|
2025-04-11 10:52:49 +01:00
|
|
|
EM(rxrpc_tx_point_response, "Response") \
|
2018-07-23 17:18:37 +01:00
|
|
|
EM(rxrpc_tx_point_version_keepalive, "VerKeepalive") \
|
|
|
|
E_(rxrpc_tx_point_version_reply, "VerReply")
|
2018-05-10 23:26:01 +01:00
|
|
|
|
2022-04-05 21:48:48 +01:00
|
|
|
#define rxrpc_req_ack_traces \
|
|
|
|
EM(rxrpc_reqack_ack_lost, "ACK-LOST ") \
|
2024-12-04 07:46:35 +00:00
|
|
|
EM(rxrpc_reqack_app_stall, "APP-STALL ") \
|
2022-04-05 21:48:48 +01:00
|
|
|
EM(rxrpc_reqack_more_rtt, "MORE-RTT ") \
|
|
|
|
EM(rxrpc_reqack_no_srv_last, "NO-SRVLAST") \
|
|
|
|
EM(rxrpc_reqack_old_rtt, "OLD-RTT ") \
|
|
|
|
EM(rxrpc_reqack_retrans, "RETRANS ") \
|
|
|
|
EM(rxrpc_reqack_slow_start, "SLOW-START") \
|
|
|
|
E_(rxrpc_reqack_small_txwin, "SMALL-TXWN")
|
2022-08-18 11:52:36 +01:00
|
|
|
/* ---- Must update size of stat_why_req_ack[] if more are added! */
|
2022-04-05 21:48:48 +01:00
|
|
|
|
2022-04-05 21:16:32 +01:00
|
|
|
#define rxrpc_txbuf_traces \
|
|
|
|
EM(rxrpc_txbuf_alloc_data, "ALLOC DATA ") \
|
2025-04-11 10:52:53 +01:00
|
|
|
EM(rxrpc_txbuf_alloc_response, "ALLOC RESP ") \
|
2022-04-05 21:16:32 +01:00
|
|
|
EM(rxrpc_txbuf_free, "FREE ") \
|
2022-03-31 23:55:08 +01:00
|
|
|
EM(rxrpc_txbuf_get_buffer, "GET BUFFER ") \
|
2022-04-05 21:16:32 +01:00
|
|
|
EM(rxrpc_txbuf_get_trans, "GET TRANS ") \
|
|
|
|
EM(rxrpc_txbuf_get_retrans, "GET RETRANS") \
|
|
|
|
EM(rxrpc_txbuf_put_cleaned, "PUT CLEANED") \
|
2020-01-30 21:48:13 +00:00
|
|
|
EM(rxrpc_txbuf_put_nomem, "PUT NOMEM ") \
|
2022-04-05 21:16:32 +01:00
|
|
|
EM(rxrpc_txbuf_put_rotated, "PUT ROTATED") \
|
2025-04-11 10:52:53 +01:00
|
|
|
EM(rxrpc_txbuf_put_response_tx, "PUT RESP TX") \
|
2022-04-05 21:16:32 +01:00
|
|
|
EM(rxrpc_txbuf_put_send_aborted, "PUT SEND-X ") \
|
2022-03-31 23:55:08 +01:00
|
|
|
EM(rxrpc_txbuf_put_trans, "PUT TRANS ") \
|
2024-12-04 07:47:07 +00:00
|
|
|
EM(rxrpc_txbuf_see_lost, "SEE LOST ") \
|
2022-11-25 09:00:55 +00:00
|
|
|
EM(rxrpc_txbuf_see_out_of_step, "OUT-OF-STEP") \
|
2024-12-04 07:47:07 +00:00
|
|
|
E_(rxrpc_txbuf_see_send_more, "SEE SEND+ ")
|
2022-04-05 21:16:32 +01:00
|
|
|
|
2024-12-04 07:46:46 +00:00
|
|
|
#define rxrpc_tq_traces \
|
|
|
|
EM(rxrpc_tq_alloc, "ALLOC") \
|
|
|
|
EM(rxrpc_tq_cleaned, "CLEAN") \
|
|
|
|
EM(rxrpc_tq_decant, "DCNT ") \
|
|
|
|
EM(rxrpc_tq_decant_advance, "DCNT>") \
|
|
|
|
EM(rxrpc_tq_queue, "QUEUE") \
|
|
|
|
EM(rxrpc_tq_queue_dup, "QUE!!") \
|
|
|
|
EM(rxrpc_tq_rotate, "ROT ") \
|
|
|
|
EM(rxrpc_tq_rotate_and_free, "ROT-F") \
|
|
|
|
EM(rxrpc_tq_rotate_and_keep, "ROT-K") \
|
|
|
|
EM(rxrpc_tq_transmit, "XMIT ") \
|
|
|
|
E_(rxrpc_tq_transmit_advance, "XMIT>")
|
|
|
|
|
2024-12-04 07:46:37 +00:00
|
|
|
#define rxrpc_pmtud_reduce_traces \
|
|
|
|
EM(rxrpc_pmtud_reduce_ack, "Ack ") \
|
|
|
|
EM(rxrpc_pmtud_reduce_icmp, "Icmp ") \
|
|
|
|
E_(rxrpc_pmtud_reduce_route, "Route")
|
|
|
|
|
2024-12-04 07:46:51 +00:00
|
|
|
#define rxrpc_rotate_traces \
|
|
|
|
EM(rxrpc_rotate_trace_hack, "hard-ack") \
|
|
|
|
EM(rxrpc_rotate_trace_sack, "soft-ack") \
|
|
|
|
E_(rxrpc_rotate_trace_snak, "soft-nack")
|
|
|
|
|
2024-12-04 07:47:07 +00:00
|
|
|
#define rxrpc_rack_timer_modes \
|
|
|
|
EM(RXRPC_CALL_RACKTIMER_OFF, "---") \
|
|
|
|
EM(RXRPC_CALL_RACKTIMER_RACK_REORDER, "REO") \
|
|
|
|
EM(RXRPC_CALL_RACKTIMER_TLP_PTO, "TLP") \
|
|
|
|
E_(RXRPC_CALL_RACKTIMER_RTO, "RTO")
|
|
|
|
|
|
|
|
#define rxrpc_tlp_probe_traces \
|
|
|
|
EM(rxrpc_tlp_probe_trace_busy, "busy") \
|
|
|
|
EM(rxrpc_tlp_probe_trace_transmit_new, "transmit-new") \
|
|
|
|
E_(rxrpc_tlp_probe_trace_retransmit, "retransmit")
|
|
|
|
|
|
|
|
#define rxrpc_tlp_ack_traces \
|
|
|
|
EM(rxrpc_tlp_ack_trace_acked, "acked") \
|
|
|
|
EM(rxrpc_tlp_ack_trace_dup_acked, "dup-acked") \
|
|
|
|
EM(rxrpc_tlp_ack_trace_hard_beyond, "hard-beyond") \
|
|
|
|
EM(rxrpc_tlp_ack_trace_incomplete, "incomplete") \
|
|
|
|
E_(rxrpc_tlp_ack_trace_new_data, "new-data")
|
|
|
|
|
2022-05-21 08:45:35 +01:00
|
|
|
/*
|
|
|
|
* Generate enums for tracing information.
|
|
|
|
*/
|
|
|
|
#ifndef __NETFS_DECLARE_TRACE_ENUMS_ONCE_ONLY
|
|
|
|
#define __NETFS_DECLARE_TRACE_ENUMS_ONCE_ONLY
|
|
|
|
|
|
|
|
#undef EM
|
|
|
|
#undef E_
|
|
|
|
#define EM(a, b) a,
|
|
|
|
#define E_(a, b) a
|
|
|
|
|
2022-10-06 21:45:42 +01:00
|
|
|
enum rxrpc_abort_reason { rxrpc_abort_reasons } __mode(byte);
|
2022-11-02 21:54:46 +00:00
|
|
|
enum rxrpc_bundle_trace { rxrpc_bundle_traces } __mode(byte);
|
2022-10-10 15:51:39 +01:00
|
|
|
enum rxrpc_call_poke_trace { rxrpc_call_poke_traces } __mode(byte);
|
2022-05-21 08:45:35 +01:00
|
|
|
enum rxrpc_call_trace { rxrpc_call_traces } __mode(byte);
|
|
|
|
enum rxrpc_client_trace { rxrpc_client_traces } __mode(byte);
|
|
|
|
enum rxrpc_congest_change { rxrpc_congest_changes } __mode(byte);
|
|
|
|
enum rxrpc_conn_trace { rxrpc_conn_traces } __mode(byte);
|
|
|
|
enum rxrpc_local_trace { rxrpc_local_traces } __mode(byte);
|
|
|
|
enum rxrpc_peer_trace { rxrpc_peer_traces } __mode(byte);
|
2024-12-04 07:46:37 +00:00
|
|
|
enum rxrpc_pmtud_reduce_trace { rxrpc_pmtud_reduce_traces } __mode(byte);
|
2022-05-21 08:45:35 +01:00
|
|
|
enum rxrpc_propose_ack_outcome { rxrpc_propose_ack_outcomes } __mode(byte);
|
|
|
|
enum rxrpc_propose_ack_trace { rxrpc_propose_ack_traces } __mode(byte);
|
|
|
|
enum rxrpc_receive_trace { rxrpc_receive_traces } __mode(byte);
|
|
|
|
enum rxrpc_recvmsg_trace { rxrpc_recvmsg_traces } __mode(byte);
|
2022-04-05 21:48:48 +01:00
|
|
|
enum rxrpc_req_ack_trace { rxrpc_req_ack_traces } __mode(byte);
|
2024-12-04 07:46:51 +00:00
|
|
|
enum rxrpc_rotate_trace { rxrpc_rotate_traces } __mode(byte);
|
2022-05-21 08:45:35 +01:00
|
|
|
enum rxrpc_rtt_rx_trace { rxrpc_rtt_rx_traces } __mode(byte);
|
|
|
|
enum rxrpc_rtt_tx_trace { rxrpc_rtt_tx_traces } __mode(byte);
|
2022-10-16 08:01:32 +01:00
|
|
|
enum rxrpc_sack_trace { rxrpc_sack_traces } __mode(byte);
|
2022-05-21 08:45:35 +01:00
|
|
|
enum rxrpc_skb_trace { rxrpc_skb_traces } __mode(byte);
|
|
|
|
enum rxrpc_timer_trace { rxrpc_timer_traces } __mode(byte);
|
2024-12-04 07:47:07 +00:00
|
|
|
enum rxrpc_tlp_ack_trace { rxrpc_tlp_ack_traces } __mode(byte);
|
|
|
|
enum rxrpc_tlp_probe_trace { rxrpc_tlp_probe_traces } __mode(byte);
|
2024-12-04 07:46:46 +00:00
|
|
|
enum rxrpc_tq_trace { rxrpc_tq_traces } __mode(byte);
|
2022-05-21 08:45:35 +01:00
|
|
|
enum rxrpc_tx_point { rxrpc_tx_points } __mode(byte);
|
2022-04-05 21:16:32 +01:00
|
|
|
enum rxrpc_txbuf_trace { rxrpc_txbuf_traces } __mode(byte);
|
2024-12-04 07:47:03 +00:00
|
|
|
enum rxrpc_txdata_trace { rxrpc_txdata_traces } __mode(byte);
|
2022-03-31 23:55:08 +01:00
|
|
|
enum rxrpc_txqueue_trace { rxrpc_txqueue_traces } __mode(byte);
|
2022-05-21 08:45:35 +01:00
|
|
|
|
|
|
|
#endif /* end __RXRPC_DECLARE_TRACE_ENUMS_ONCE_ONLY */
|
|
|
|
|
2017-01-05 10:38:33 +00:00
|
|
|
/*
|
|
|
|
* Export enum symbols via userspace.
|
|
|
|
*/
|
|
|
|
#undef EM
|
|
|
|
#undef E_
|
2022-10-06 21:45:42 +01:00
|
|
|
|
|
|
|
#ifndef RXRPC_TRACE_ONLY_DEFINE_ENUMS
|
|
|
|
|
2017-01-05 10:38:33 +00:00
|
|
|
#define EM(a, b) TRACE_DEFINE_ENUM(a);
|
|
|
|
#define E_(a, b) TRACE_DEFINE_ENUM(a);
|
|
|
|
|
2022-10-06 21:45:42 +01:00
|
|
|
rxrpc_abort_reasons;
|
2022-11-02 21:54:46 +00:00
|
|
|
rxrpc_bundle_traces;
|
2024-12-04 07:46:50 +00:00
|
|
|
rxrpc_ca_states;
|
2022-10-10 15:51:39 +01:00
|
|
|
rxrpc_call_poke_traces;
|
2017-01-05 10:38:33 +00:00
|
|
|
rxrpc_call_traces;
|
2022-05-21 08:45:35 +01:00
|
|
|
rxrpc_client_traces;
|
|
|
|
rxrpc_congest_changes;
|
|
|
|
rxrpc_conn_traces;
|
|
|
|
rxrpc_local_traces;
|
2024-12-04 07:46:37 +00:00
|
|
|
rxrpc_pmtud_reduce_traces;
|
2022-05-21 08:45:35 +01:00
|
|
|
rxrpc_propose_ack_traces;
|
2024-12-04 07:47:07 +00:00
|
|
|
rxrpc_rack_timer_modes;
|
2017-01-05 10:38:33 +00:00
|
|
|
rxrpc_receive_traces;
|
|
|
|
rxrpc_recvmsg_traces;
|
2022-04-05 21:48:48 +01:00
|
|
|
rxrpc_req_ack_traces;
|
2024-12-04 07:46:51 +00:00
|
|
|
rxrpc_rotate_traces;
|
2017-01-05 10:38:33 +00:00
|
|
|
rxrpc_rtt_rx_traces;
|
2022-05-21 08:45:35 +01:00
|
|
|
rxrpc_rtt_tx_traces;
|
2022-10-16 08:01:32 +01:00
|
|
|
rxrpc_sack_traces;
|
2022-05-21 08:45:35 +01:00
|
|
|
rxrpc_skb_traces;
|
2017-01-05 10:38:33 +00:00
|
|
|
rxrpc_timer_traces;
|
2024-12-04 07:47:07 +00:00
|
|
|
rxrpc_tlp_ack_traces;
|
|
|
|
rxrpc_tlp_probe_traces;
|
2024-12-04 07:46:46 +00:00
|
|
|
rxrpc_tq_traces;
|
2018-07-23 17:18:37 +01:00
|
|
|
rxrpc_tx_points;
|
2022-04-05 21:16:32 +01:00
|
|
|
rxrpc_txbuf_traces;
|
2024-12-04 07:47:03 +00:00
|
|
|
rxrpc_txdata_traces;
|
2022-03-31 23:55:08 +01:00
|
|
|
rxrpc_txqueue_traces;
|
2017-01-05 10:38:33 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Now redefine the EM() and E_() macros to map the enums to the strings that
|
|
|
|
* will be printed in the output.
|
|
|
|
*/
|
|
|
|
#undef EM
|
|
|
|
#undef E_
|
|
|
|
#define EM(a, b) { a, b },
|
|
|
|
#define E_(a, b) { a, b }
|
|
|
|
|
2018-03-30 21:05:28 +01:00
|
|
|
TRACE_EVENT(rxrpc_local,
|
2019-08-13 22:26:36 +01:00
|
|
|
TP_PROTO(unsigned int local_debug_id, enum rxrpc_local_trace op,
|
2022-10-21 13:00:34 +01:00
|
|
|
int ref, int usage),
|
2018-03-30 21:05:28 +01:00
|
|
|
|
2022-10-21 13:00:34 +01:00
|
|
|
TP_ARGS(local_debug_id, op, ref, usage),
|
2018-03-30 21:05:28 +01:00
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
2023-01-31 08:35:16 +00:00
|
|
|
__field(unsigned int, local)
|
|
|
|
__field(int, op)
|
|
|
|
__field(int, ref)
|
|
|
|
__field(int, usage)
|
2018-03-30 21:05:28 +01:00
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
2019-08-13 22:26:36 +01:00
|
|
|
__entry->local = local_debug_id;
|
2018-03-30 21:05:28 +01:00
|
|
|
__entry->op = op;
|
2022-10-21 13:00:34 +01:00
|
|
|
__entry->ref = ref;
|
2018-03-30 21:05:28 +01:00
|
|
|
__entry->usage = usage;
|
|
|
|
),
|
|
|
|
|
2022-10-21 13:00:34 +01:00
|
|
|
TP_printk("L=%08x %s r=%d u=%d",
|
2018-03-30 21:05:28 +01:00
|
|
|
__entry->local,
|
|
|
|
__print_symbolic(__entry->op, rxrpc_local_traces),
|
2022-10-21 13:00:34 +01:00
|
|
|
__entry->ref,
|
|
|
|
__entry->usage)
|
2018-03-30 21:05:28 +01:00
|
|
|
);
|
|
|
|
|
2024-12-04 07:47:07 +00:00
|
|
|
TRACE_EVENT(rxrpc_iothread_rx,
|
|
|
|
TP_PROTO(struct rxrpc_local *local, unsigned int nr_rx),
|
|
|
|
TP_ARGS(local, nr_rx),
|
|
|
|
TP_STRUCT__entry(
|
|
|
|
__field(unsigned int, local)
|
|
|
|
__field(unsigned int, nr_rx)
|
|
|
|
),
|
|
|
|
TP_fast_assign(
|
|
|
|
__entry->local = local->debug_id;
|
|
|
|
__entry->nr_rx = nr_rx;
|
|
|
|
),
|
|
|
|
TP_printk("L=%08x nrx=%u", __entry->local, __entry->nr_rx)
|
|
|
|
);
|
|
|
|
|
2018-03-30 21:05:38 +01:00
|
|
|
TRACE_EVENT(rxrpc_peer,
|
2022-10-21 13:39:34 +01:00
|
|
|
TP_PROTO(unsigned int peer_debug_id, int ref, enum rxrpc_peer_trace why),
|
2018-03-30 21:05:38 +01:00
|
|
|
|
2022-10-21 13:39:34 +01:00
|
|
|
TP_ARGS(peer_debug_id, ref, why),
|
2018-03-30 21:05:38 +01:00
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
2023-01-31 08:35:16 +00:00
|
|
|
__field(unsigned int, peer)
|
|
|
|
__field(int, ref)
|
|
|
|
__field(enum rxrpc_peer_trace, why)
|
2018-03-30 21:05:38 +01:00
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
2019-10-07 10:58:29 +01:00
|
|
|
__entry->peer = peer_debug_id;
|
2022-10-21 13:39:34 +01:00
|
|
|
__entry->ref = ref;
|
|
|
|
__entry->why = why;
|
2018-03-30 21:05:38 +01:00
|
|
|
),
|
|
|
|
|
2022-10-21 13:39:34 +01:00
|
|
|
TP_printk("P=%08x %s r=%d",
|
2018-03-30 21:05:38 +01:00
|
|
|
__entry->peer,
|
2022-10-21 13:39:34 +01:00
|
|
|
__print_symbolic(__entry->why, rxrpc_peer_traces),
|
|
|
|
__entry->ref)
|
2018-03-30 21:05:38 +01:00
|
|
|
);
|
|
|
|
|
2022-11-02 21:54:46 +00:00
|
|
|
TRACE_EVENT(rxrpc_bundle,
|
|
|
|
TP_PROTO(unsigned int bundle_debug_id, int ref, enum rxrpc_bundle_trace why),
|
|
|
|
|
|
|
|
TP_ARGS(bundle_debug_id, ref, why),
|
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
2023-01-31 08:35:16 +00:00
|
|
|
__field(unsigned int, bundle)
|
|
|
|
__field(int, ref)
|
|
|
|
__field(int, why)
|
2022-11-02 21:54:46 +00:00
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
|
|
|
__entry->bundle = bundle_debug_id;
|
|
|
|
__entry->ref = ref;
|
|
|
|
__entry->why = why;
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_printk("CB=%08x %s r=%d",
|
|
|
|
__entry->bundle,
|
|
|
|
__print_symbolic(__entry->why, rxrpc_bundle_traces),
|
|
|
|
__entry->ref)
|
|
|
|
);
|
|
|
|
|
2016-09-17 10:49:14 +01:00
|
|
|
TRACE_EVENT(rxrpc_conn,
|
2022-10-21 14:06:16 +01:00
|
|
|
TP_PROTO(unsigned int conn_debug_id, int ref, enum rxrpc_conn_trace why),
|
2016-09-17 10:49:14 +01:00
|
|
|
|
2022-10-21 14:06:16 +01:00
|
|
|
TP_ARGS(conn_debug_id, ref, why),
|
2016-09-17 10:49:14 +01:00
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
2023-01-31 08:35:16 +00:00
|
|
|
__field(unsigned int, conn)
|
|
|
|
__field(int, ref)
|
|
|
|
__field(int, why)
|
2016-09-17 10:49:14 +01:00
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
2019-10-07 10:58:29 +01:00
|
|
|
__entry->conn = conn_debug_id;
|
2022-10-21 14:06:16 +01:00
|
|
|
__entry->ref = ref;
|
|
|
|
__entry->why = why;
|
2016-09-17 10:49:14 +01:00
|
|
|
),
|
|
|
|
|
2022-10-21 14:06:16 +01:00
|
|
|
TP_printk("C=%08x %s r=%d",
|
2016-09-17 10:49:14 +01:00
|
|
|
__entry->conn,
|
2022-10-21 14:06:16 +01:00
|
|
|
__print_symbolic(__entry->why, rxrpc_conn_traces),
|
|
|
|
__entry->ref)
|
2016-09-17 10:49:14 +01:00
|
|
|
);
|
|
|
|
|
|
|
|
TRACE_EVENT(rxrpc_client,
|
|
|
|
TP_PROTO(struct rxrpc_connection *conn, int channel,
|
|
|
|
enum rxrpc_client_trace op),
|
|
|
|
|
|
|
|
TP_ARGS(conn, channel, op),
|
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
2023-01-31 08:35:16 +00:00
|
|
|
__field(unsigned int, conn)
|
|
|
|
__field(u32, cid)
|
|
|
|
__field(int, channel)
|
|
|
|
__field(int, usage)
|
|
|
|
__field(enum rxrpc_client_trace, op)
|
2016-09-17 10:49:14 +01:00
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
rxrpc: Rewrite the client connection manager
Rewrite the rxrpc client connection manager so that it can support multiple
connections for a given security key to a peer. The following changes are
made:
(1) For each open socket, the code currently maintains an rbtree with the
connections placed into it, keyed by communications parameters. This
is tricky to maintain as connections can be culled from the tree or
replaced within it. Connections can require replacement for a number
of reasons, e.g. their IDs span too great a range for the IDR data
type to represent efficiently, the call ID numbers on that conn would
overflow or the conn got aborted.
This is changed so that there's now a connection bundle object placed
in the tree, keyed on the same parameters. The bundle, however, does
not need to be replaced.
(2) An rxrpc_bundle object can now manage the available channels for a set
of parallel connections. The lock that manages this is moved there
from the rxrpc_connection struct (channel_lock).
(3) There'a a dummy bundle for all incoming connections to share so that
they have a channel_lock too. It might be better to give each
incoming connection its own bundle. This bundle is not needed to
manage which channels incoming calls are made on because that's the
solely at whim of the client.
(4) The restrictions on how many client connections are around are
removed. Instead, a previous patch limits the number of client calls
that can be allocated. Ordinarily, client connections are reaped
after 2 minutes on the idle queue, but when more than a certain number
of connections are in existence, the reaper starts reaping them after
2s of idleness instead to get the numbers back down.
It could also be made such that new call allocations are forced to
wait until the number of outstanding connections subsides.
Signed-off-by: David Howells <dhowells@redhat.com>
2020-07-01 11:15:32 +01:00
|
|
|
__entry->conn = conn ? conn->debug_id : 0;
|
2016-09-17 10:49:14 +01:00
|
|
|
__entry->channel = channel;
|
2022-05-21 08:45:22 +01:00
|
|
|
__entry->usage = conn ? refcount_read(&conn->ref) : -2;
|
2016-09-17 10:49:14 +01:00
|
|
|
__entry->op = op;
|
2020-09-14 13:02:58 +01:00
|
|
|
__entry->cid = conn ? conn->proto.cid : 0;
|
2016-09-17 10:49:14 +01:00
|
|
|
),
|
|
|
|
|
rxrpc: Rewrite the client connection manager
Rewrite the rxrpc client connection manager so that it can support multiple
connections for a given security key to a peer. The following changes are
made:
(1) For each open socket, the code currently maintains an rbtree with the
connections placed into it, keyed by communications parameters. This
is tricky to maintain as connections can be culled from the tree or
replaced within it. Connections can require replacement for a number
of reasons, e.g. their IDs span too great a range for the IDR data
type to represent efficiently, the call ID numbers on that conn would
overflow or the conn got aborted.
This is changed so that there's now a connection bundle object placed
in the tree, keyed on the same parameters. The bundle, however, does
not need to be replaced.
(2) An rxrpc_bundle object can now manage the available channels for a set
of parallel connections. The lock that manages this is moved there
from the rxrpc_connection struct (channel_lock).
(3) There'a a dummy bundle for all incoming connections to share so that
they have a channel_lock too. It might be better to give each
incoming connection its own bundle. This bundle is not needed to
manage which channels incoming calls are made on because that's the
solely at whim of the client.
(4) The restrictions on how many client connections are around are
removed. Instead, a previous patch limits the number of client calls
that can be allocated. Ordinarily, client connections are reaped
after 2 minutes on the idle queue, but when more than a certain number
of connections are in existence, the reaper starts reaping them after
2s of idleness instead to get the numbers back down.
It could also be made such that new call allocations are forced to
wait until the number of outstanding connections subsides.
Signed-off-by: David Howells <dhowells@redhat.com>
2020-07-01 11:15:32 +01:00
|
|
|
TP_printk("C=%08x h=%2d %s i=%08x u=%d",
|
2016-09-17 10:49:14 +01:00
|
|
|
__entry->conn,
|
|
|
|
__entry->channel,
|
2017-01-05 10:38:33 +00:00
|
|
|
__print_symbolic(__entry->op, rxrpc_client_traces),
|
2016-09-17 10:49:14 +01:00
|
|
|
__entry->cid,
|
|
|
|
__entry->usage)
|
|
|
|
);
|
|
|
|
|
2016-08-30 09:49:29 +01:00
|
|
|
TRACE_EVENT(rxrpc_call,
|
2022-10-21 14:39:26 +01:00
|
|
|
TP_PROTO(unsigned int call_debug_id, int ref, unsigned long aux,
|
|
|
|
enum rxrpc_call_trace why),
|
2016-08-30 09:49:29 +01:00
|
|
|
|
2022-10-21 14:39:26 +01:00
|
|
|
TP_ARGS(call_debug_id, ref, aux, why),
|
2016-08-30 09:49:29 +01:00
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
2023-01-31 08:39:46 +00:00
|
|
|
__field(unsigned int, call)
|
|
|
|
__field(int, ref)
|
|
|
|
__field(int, why)
|
|
|
|
__field(unsigned long, aux)
|
2016-08-30 09:49:29 +01:00
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
2019-10-07 10:58:29 +01:00
|
|
|
__entry->call = call_debug_id;
|
2022-10-21 14:39:26 +01:00
|
|
|
__entry->ref = ref;
|
|
|
|
__entry->why = why;
|
2016-08-30 09:49:29 +01:00
|
|
|
__entry->aux = aux;
|
|
|
|
),
|
|
|
|
|
2022-10-21 14:39:26 +01:00
|
|
|
TP_printk("c=%08x %s r=%d a=%lx",
|
2016-08-30 09:49:29 +01:00
|
|
|
__entry->call,
|
2022-10-21 14:39:26 +01:00
|
|
|
__print_symbolic(__entry->why, rxrpc_call_traces),
|
|
|
|
__entry->ref,
|
2016-08-30 09:49:29 +01:00
|
|
|
__entry->aux)
|
|
|
|
);
|
|
|
|
|
2016-08-23 15:27:24 +01:00
|
|
|
TRACE_EVENT(rxrpc_skb,
|
2022-10-21 15:31:21 +01:00
|
|
|
TP_PROTO(struct sk_buff *skb, int usage, int mod_count,
|
|
|
|
enum rxrpc_skb_trace why),
|
2016-08-23 15:27:24 +01:00
|
|
|
|
2022-10-21 15:31:21 +01:00
|
|
|
TP_ARGS(skb, usage, mod_count, why),
|
2016-08-23 15:27:24 +01:00
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
2023-01-31 08:35:16 +00:00
|
|
|
__field(struct sk_buff *, skb)
|
|
|
|
__field(int, usage)
|
|
|
|
__field(int, mod_count)
|
|
|
|
__field(enum rxrpc_skb_trace, why)
|
2016-08-23 15:27:24 +01:00
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
|
|
|
__entry->skb = skb;
|
|
|
|
__entry->usage = usage;
|
|
|
|
__entry->mod_count = mod_count;
|
2022-10-21 15:31:21 +01:00
|
|
|
__entry->why = why;
|
2016-08-23 15:27:24 +01:00
|
|
|
),
|
|
|
|
|
2022-10-21 15:31:21 +01:00
|
|
|
TP_printk("s=%p Rx %s u=%d m=%d",
|
2016-08-23 15:27:24 +01:00
|
|
|
__entry->skb,
|
2022-10-21 15:31:21 +01:00
|
|
|
__print_symbolic(__entry->why, rxrpc_skb_traces),
|
2016-08-23 15:27:24 +01:00
|
|
|
__entry->usage,
|
2022-10-21 15:31:21 +01:00
|
|
|
__entry->mod_count)
|
2016-08-23 15:27:24 +01:00
|
|
|
);
|
|
|
|
|
2016-09-08 11:10:12 +01:00
|
|
|
TRACE_EVENT(rxrpc_rx_packet,
|
|
|
|
TP_PROTO(struct rxrpc_skb_priv *sp),
|
|
|
|
|
|
|
|
TP_ARGS(sp),
|
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
2023-01-31 08:35:16 +00:00
|
|
|
__field_struct(struct rxrpc_host_header, hdr)
|
2016-09-08 11:10:12 +01:00
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
|
|
|
memcpy(&__entry->hdr, &sp->hdr, sizeof(__entry->hdr));
|
|
|
|
),
|
|
|
|
|
2016-09-17 10:49:13 +01:00
|
|
|
TP_printk("%08x:%08x:%08x:%04x %08x %08x %02x %02x %s",
|
2016-09-08 11:10:12 +01:00
|
|
|
__entry->hdr.epoch, __entry->hdr.cid,
|
|
|
|
__entry->hdr.callNumber, __entry->hdr.serviceId,
|
|
|
|
__entry->hdr.serial, __entry->hdr.seq,
|
2022-12-05 17:38:31 +00:00
|
|
|
__entry->hdr.securityIndex, __entry->hdr.flags,
|
|
|
|
__print_symbolic(__entry->hdr.type, rxrpc_pkts))
|
2016-09-08 11:10:12 +01:00
|
|
|
);
|
|
|
|
|
|
|
|
TRACE_EVENT(rxrpc_rx_done,
|
|
|
|
TP_PROTO(int result, int abort_code),
|
|
|
|
|
|
|
|
TP_ARGS(result, abort_code),
|
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
2023-01-31 08:39:46 +00:00
|
|
|
__field(int, result)
|
|
|
|
__field(int, abort_code)
|
2016-09-08 11:10:12 +01:00
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
|
|
|
__entry->result = result;
|
|
|
|
__entry->abort_code = abort_code;
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_printk("r=%d a=%d", __entry->result, __entry->abort_code)
|
|
|
|
);
|
|
|
|
|
2024-11-06 13:00:45 +00:00
|
|
|
TRACE_EVENT(rxrpc_abort_call,
|
|
|
|
TP_PROTO(const struct rxrpc_call *call, int abort_code),
|
|
|
|
|
|
|
|
TP_ARGS(call, abort_code),
|
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
|
|
|
__field(unsigned int, call_nr)
|
|
|
|
__field(enum rxrpc_abort_reason, why)
|
|
|
|
__field(int, abort_code)
|
|
|
|
__field(int, error)
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
|
|
|
__entry->call_nr = call->debug_id;
|
|
|
|
__entry->why = call->send_abort_why;
|
|
|
|
__entry->abort_code = abort_code;
|
|
|
|
__entry->error = call->send_abort_err;
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_printk("c=%08x a=%d e=%d %s",
|
|
|
|
__entry->call_nr,
|
|
|
|
__entry->abort_code, __entry->error,
|
|
|
|
__print_symbolic(__entry->why, rxrpc_abort_reasons))
|
|
|
|
);
|
|
|
|
|
2016-09-06 22:19:51 +01:00
|
|
|
TRACE_EVENT(rxrpc_abort,
|
2022-10-06 21:45:42 +01:00
|
|
|
TP_PROTO(unsigned int call_nr, enum rxrpc_abort_reason why,
|
|
|
|
u32 cid, u32 call_id, rxrpc_seq_t seq, int abort_code, int error),
|
2016-09-06 22:19:51 +01:00
|
|
|
|
2018-03-27 23:03:00 +01:00
|
|
|
TP_ARGS(call_nr, why, cid, call_id, seq, abort_code, error),
|
2016-09-06 22:19:51 +01:00
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
2023-01-31 08:35:16 +00:00
|
|
|
__field(unsigned int, call_nr)
|
|
|
|
__field(enum rxrpc_abort_reason, why)
|
|
|
|
__field(u32, cid)
|
|
|
|
__field(u32, call_id)
|
|
|
|
__field(rxrpc_seq_t, seq)
|
|
|
|
__field(int, abort_code)
|
|
|
|
__field(int, error)
|
2016-09-06 22:19:51 +01:00
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
2018-03-27 23:03:00 +01:00
|
|
|
__entry->call_nr = call_nr;
|
2022-10-06 21:45:42 +01:00
|
|
|
__entry->why = why;
|
2016-09-06 22:19:51 +01:00
|
|
|
__entry->cid = cid;
|
|
|
|
__entry->call_id = call_id;
|
|
|
|
__entry->abort_code = abort_code;
|
|
|
|
__entry->error = error;
|
|
|
|
__entry->seq = seq;
|
|
|
|
),
|
|
|
|
|
2018-03-27 23:03:00 +01:00
|
|
|
TP_printk("c=%08x %08x:%08x s=%u a=%d e=%d %s",
|
|
|
|
__entry->call_nr,
|
2016-09-06 22:19:51 +01:00
|
|
|
__entry->cid, __entry->call_id, __entry->seq,
|
2022-10-06 21:45:42 +01:00
|
|
|
__entry->abort_code, __entry->error,
|
|
|
|
__print_symbolic(__entry->why, rxrpc_abort_reasons))
|
2016-09-06 22:19:51 +01:00
|
|
|
);
|
|
|
|
|
2018-03-27 23:08:20 +01:00
|
|
|
TRACE_EVENT(rxrpc_call_complete,
|
|
|
|
TP_PROTO(struct rxrpc_call *call),
|
|
|
|
|
|
|
|
TP_ARGS(call),
|
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
2023-01-31 08:35:16 +00:00
|
|
|
__field(unsigned int, call)
|
|
|
|
__field(enum rxrpc_call_completion, compl)
|
|
|
|
__field(int, error)
|
|
|
|
__field(u32, abort_code)
|
2018-03-27 23:08:20 +01:00
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
|
|
|
__entry->call = call->debug_id;
|
|
|
|
__entry->compl = call->completion;
|
|
|
|
__entry->error = call->error;
|
|
|
|
__entry->abort_code = call->abort_code;
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_printk("c=%08x %s r=%d ac=%d",
|
|
|
|
__entry->call,
|
|
|
|
__print_symbolic(__entry->compl, rxrpc_completions),
|
|
|
|
__entry->error,
|
|
|
|
__entry->abort_code)
|
|
|
|
);
|
|
|
|
|
2022-03-31 23:55:08 +01:00
|
|
|
TRACE_EVENT(rxrpc_txqueue,
|
|
|
|
TP_PROTO(struct rxrpc_call *call, enum rxrpc_txqueue_trace why),
|
2016-09-17 10:49:13 +01:00
|
|
|
|
|
|
|
TP_ARGS(call, why),
|
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
2023-01-31 08:35:16 +00:00
|
|
|
__field(unsigned int, call)
|
|
|
|
__field(enum rxrpc_txqueue_trace, why)
|
|
|
|
__field(rxrpc_seq_t, tx_bottom)
|
2024-12-04 07:46:48 +00:00
|
|
|
__field(rxrpc_seq_t, acks_hard_ack)
|
2023-01-31 08:35:16 +00:00
|
|
|
__field(rxrpc_seq_t, tx_top)
|
2024-12-04 07:46:46 +00:00
|
|
|
__field(rxrpc_seq_t, send_top)
|
2023-01-31 08:35:16 +00:00
|
|
|
__field(int, tx_winsize)
|
2016-09-17 10:49:13 +01:00
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
2018-03-27 23:03:00 +01:00
|
|
|
__entry->call = call->debug_id;
|
2016-09-17 10:49:13 +01:00
|
|
|
__entry->why = why;
|
2022-03-31 23:55:08 +01:00
|
|
|
__entry->tx_bottom = call->tx_bottom;
|
2024-12-04 07:46:48 +00:00
|
|
|
__entry->acks_hard_ack = call->acks_hard_ack;
|
2016-09-17 10:49:13 +01:00
|
|
|
__entry->tx_top = call->tx_top;
|
2024-12-04 07:46:46 +00:00
|
|
|
__entry->send_top = call->send_top;
|
2017-01-05 10:38:34 +00:00
|
|
|
__entry->tx_winsize = call->tx_winsize;
|
2016-09-17 10:49:13 +01:00
|
|
|
),
|
|
|
|
|
2024-12-04 07:46:48 +00:00
|
|
|
TP_printk("c=%08x %s b=%08x h=%08x n=%u/%u/%u/%u",
|
2016-09-17 10:49:13 +01:00
|
|
|
__entry->call,
|
2022-03-31 23:55:08 +01:00
|
|
|
__print_symbolic(__entry->why, rxrpc_txqueue_traces),
|
|
|
|
__entry->tx_bottom,
|
2024-12-04 07:46:48 +00:00
|
|
|
__entry->acks_hard_ack,
|
|
|
|
__entry->acks_hard_ack - __entry->tx_bottom,
|
|
|
|
__entry->tx_top - __entry->acks_hard_ack,
|
2024-12-04 07:46:46 +00:00
|
|
|
__entry->send_top - __entry->tx_top,
|
2017-01-05 10:38:34 +00:00
|
|
|
__entry->tx_winsize)
|
|
|
|
);
|
|
|
|
|
2024-12-04 07:46:40 +00:00
|
|
|
TRACE_EVENT(rxrpc_transmit,
|
2024-12-04 07:46:46 +00:00
|
|
|
TP_PROTO(struct rxrpc_call *call, rxrpc_seq_t send_top, int space),
|
2024-12-04 07:46:40 +00:00
|
|
|
|
2024-12-04 07:46:46 +00:00
|
|
|
TP_ARGS(call, send_top, space),
|
2024-12-04 07:46:40 +00:00
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
|
|
|
__field(unsigned int, call)
|
|
|
|
__field(rxrpc_seq_t, seq)
|
|
|
|
__field(u16, space)
|
|
|
|
__field(u16, tx_winsize)
|
|
|
|
__field(u16, cong_cwnd)
|
|
|
|
__field(u16, cong_extra)
|
|
|
|
__field(u16, in_flight)
|
|
|
|
__field(u16, prepared)
|
|
|
|
__field(u16, pmtud_jumbo)
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
|
|
|
__entry->call = call->debug_id;
|
2024-12-04 07:46:46 +00:00
|
|
|
__entry->seq = call->tx_top + 1;
|
2024-12-04 07:46:40 +00:00
|
|
|
__entry->space = space;
|
|
|
|
__entry->tx_winsize = call->tx_winsize;
|
|
|
|
__entry->cong_cwnd = call->cong_cwnd;
|
|
|
|
__entry->cong_extra = call->cong_extra;
|
2024-12-04 07:46:46 +00:00
|
|
|
__entry->prepared = send_top - call->tx_bottom;
|
2024-12-04 07:46:47 +00:00
|
|
|
__entry->in_flight = call->tx_top - call->tx_bottom;
|
2024-12-04 07:46:40 +00:00
|
|
|
__entry->pmtud_jumbo = call->peer->pmtud_jumbo;
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_printk("c=%08x q=%08x sp=%u tw=%u cw=%u+%u pr=%u if=%u pj=%u",
|
|
|
|
__entry->call,
|
|
|
|
__entry->seq,
|
|
|
|
__entry->space,
|
|
|
|
__entry->tx_winsize,
|
|
|
|
__entry->cong_cwnd,
|
|
|
|
__entry->cong_extra,
|
|
|
|
__entry->prepared,
|
|
|
|
__entry->in_flight,
|
|
|
|
__entry->pmtud_jumbo)
|
|
|
|
);
|
|
|
|
|
2024-12-04 07:46:46 +00:00
|
|
|
TRACE_EVENT(rxrpc_tx_rotate,
|
|
|
|
TP_PROTO(struct rxrpc_call *call, rxrpc_seq_t seq, rxrpc_seq_t to),
|
|
|
|
|
|
|
|
TP_ARGS(call, seq, to),
|
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
|
|
|
__field(unsigned int, call)
|
|
|
|
__field(rxrpc_seq_t, seq)
|
|
|
|
__field(rxrpc_seq_t, to)
|
|
|
|
__field(rxrpc_seq_t, top)
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
|
|
|
__entry->call = call->debug_id;
|
|
|
|
__entry->seq = seq;
|
|
|
|
__entry->to = to;
|
|
|
|
__entry->top = call->tx_top;
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_printk("c=%08x q=%08x-%08x-%08x",
|
|
|
|
__entry->call,
|
|
|
|
__entry->seq,
|
|
|
|
__entry->to,
|
|
|
|
__entry->top)
|
|
|
|
);
|
|
|
|
|
2017-01-05 10:38:34 +00:00
|
|
|
TRACE_EVENT(rxrpc_rx_data,
|
2018-07-23 17:18:37 +01:00
|
|
|
TP_PROTO(unsigned int call, rxrpc_seq_t seq,
|
2022-10-07 17:44:39 +01:00
|
|
|
rxrpc_serial_t serial, u8 flags),
|
2017-01-05 10:38:34 +00:00
|
|
|
|
2022-10-07 17:44:39 +01:00
|
|
|
TP_ARGS(call, seq, serial, flags),
|
2017-01-05 10:38:34 +00:00
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
2023-01-31 08:39:46 +00:00
|
|
|
__field(unsigned int, call)
|
|
|
|
__field(rxrpc_seq_t, seq)
|
|
|
|
__field(rxrpc_serial_t, serial)
|
|
|
|
__field(u8, flags)
|
2017-01-05 10:38:34 +00:00
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
2018-07-23 17:18:37 +01:00
|
|
|
__entry->call = call;
|
2017-01-05 10:38:34 +00:00
|
|
|
__entry->seq = seq;
|
|
|
|
__entry->serial = serial;
|
|
|
|
__entry->flags = flags;
|
|
|
|
),
|
|
|
|
|
2022-10-07 17:44:39 +01:00
|
|
|
TP_printk("c=%08x DATA %08x q=%08x fl=%02x",
|
2017-01-05 10:38:34 +00:00
|
|
|
__entry->call,
|
|
|
|
__entry->serial,
|
|
|
|
__entry->seq,
|
2022-10-07 17:44:39 +01:00
|
|
|
__entry->flags)
|
2016-09-17 10:49:13 +01:00
|
|
|
);
|
|
|
|
|
2016-09-17 10:49:13 +01:00
|
|
|
TRACE_EVENT(rxrpc_rx_ack,
|
2024-12-04 07:46:48 +00:00
|
|
|
TP_PROTO(struct rxrpc_call *call, struct rxrpc_skb_priv *sp),
|
2016-09-17 10:49:13 +01:00
|
|
|
|
2024-12-04 07:46:48 +00:00
|
|
|
TP_ARGS(call, sp),
|
2016-09-17 10:49:13 +01:00
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
2023-01-31 08:39:46 +00:00
|
|
|
__field(unsigned int, call)
|
|
|
|
__field(rxrpc_serial_t, serial)
|
|
|
|
__field(rxrpc_serial_t, ack_serial)
|
|
|
|
__field(rxrpc_seq_t, first)
|
|
|
|
__field(rxrpc_seq_t, prev)
|
|
|
|
__field(u8, reason)
|
|
|
|
__field(u8, n_acks)
|
2024-12-04 07:46:56 +00:00
|
|
|
__field(u8, user_status)
|
2016-09-17 10:49:13 +01:00
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
2024-12-04 07:46:48 +00:00
|
|
|
__entry->call = call->debug_id;
|
|
|
|
__entry->serial = sp->hdr.serial;
|
2024-12-04 07:46:56 +00:00
|
|
|
__entry->user_status = sp->hdr.userStatus;
|
2024-12-04 07:46:48 +00:00
|
|
|
__entry->ack_serial = sp->ack.acked_serial;
|
|
|
|
__entry->first = sp->ack.first_ack;
|
|
|
|
__entry->prev = sp->ack.prev_ack;
|
|
|
|
__entry->reason = sp->ack.reason;
|
|
|
|
__entry->n_acks = sp->ack.nr_acks;
|
2016-09-17 10:49:13 +01:00
|
|
|
),
|
|
|
|
|
2024-12-04 07:46:56 +00:00
|
|
|
TP_printk("c=%08x %08x %s r=%08x us=%02x f=%08x p=%08x n=%u",
|
2016-09-17 10:49:13 +01:00
|
|
|
__entry->call,
|
2017-01-05 10:38:34 +00:00
|
|
|
__entry->serial,
|
2017-01-05 10:38:33 +00:00
|
|
|
__print_symbolic(__entry->reason, rxrpc_ack_names),
|
2017-01-05 10:38:34 +00:00
|
|
|
__entry->ack_serial,
|
2024-12-04 07:46:56 +00:00
|
|
|
__entry->user_status,
|
2016-09-17 10:49:13 +01:00
|
|
|
__entry->first,
|
2017-01-05 10:38:34 +00:00
|
|
|
__entry->prev,
|
2016-09-17 10:49:13 +01:00
|
|
|
__entry->n_acks)
|
|
|
|
);
|
|
|
|
|
2017-04-06 10:12:00 +01:00
|
|
|
TRACE_EVENT(rxrpc_rx_abort,
|
|
|
|
TP_PROTO(struct rxrpc_call *call, rxrpc_serial_t serial,
|
|
|
|
u32 abort_code),
|
|
|
|
|
|
|
|
TP_ARGS(call, serial, abort_code),
|
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
2023-01-31 08:39:46 +00:00
|
|
|
__field(unsigned int, call)
|
|
|
|
__field(rxrpc_serial_t, serial)
|
|
|
|
__field(u32, abort_code)
|
2017-04-06 10:12:00 +01:00
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
2018-03-27 23:03:00 +01:00
|
|
|
__entry->call = call->debug_id;
|
2017-04-06 10:12:00 +01:00
|
|
|
__entry->serial = serial;
|
|
|
|
__entry->abort_code = abort_code;
|
|
|
|
),
|
|
|
|
|
2018-03-27 23:03:00 +01:00
|
|
|
TP_printk("c=%08x ABORT %08x ac=%d",
|
2017-04-06 10:12:00 +01:00
|
|
|
__entry->call,
|
|
|
|
__entry->serial,
|
|
|
|
__entry->abort_code)
|
|
|
|
);
|
|
|
|
|
2024-12-04 07:46:30 +00:00
|
|
|
TRACE_EVENT(rxrpc_rx_conn_abort,
|
|
|
|
TP_PROTO(const struct rxrpc_connection *conn, const struct sk_buff *skb),
|
|
|
|
|
|
|
|
TP_ARGS(conn, skb),
|
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
|
|
|
__field(unsigned int, conn)
|
|
|
|
__field(rxrpc_serial_t, serial)
|
|
|
|
__field(u32, abort_code)
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
|
|
|
__entry->conn = conn->debug_id;
|
|
|
|
__entry->serial = rxrpc_skb(skb)->hdr.serial;
|
|
|
|
__entry->abort_code = skb->priority;
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_printk("C=%08x ABORT %08x ac=%d",
|
|
|
|
__entry->conn,
|
|
|
|
__entry->serial,
|
|
|
|
__entry->abort_code)
|
|
|
|
);
|
|
|
|
|
2025-04-11 10:52:58 +01:00
|
|
|
TRACE_EVENT(rxrpc_tx_challenge,
|
|
|
|
TP_PROTO(struct rxrpc_connection *conn, rxrpc_serial_t serial,
|
|
|
|
u32 version, u32 nonce),
|
|
|
|
|
|
|
|
TP_ARGS(conn, serial, version, nonce),
|
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
|
|
|
__field(unsigned int, conn)
|
|
|
|
__field(rxrpc_serial_t, serial)
|
|
|
|
__field(u32, version)
|
|
|
|
__field(u32, nonce)
|
|
|
|
__field(u16, service_id)
|
|
|
|
__field(u8, security_ix)
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
|
|
|
__entry->conn = conn->debug_id;
|
|
|
|
__entry->serial = serial;
|
|
|
|
__entry->version = version;
|
|
|
|
__entry->nonce = nonce;
|
|
|
|
__entry->service_id = conn->service_id;
|
|
|
|
__entry->security_ix = conn->security_ix;
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_printk("C=%08x CHALLENGE r=%08x sv=%u+%u v=%x n=%x",
|
|
|
|
__entry->conn,
|
|
|
|
__entry->serial,
|
|
|
|
__entry->service_id,
|
|
|
|
__entry->security_ix,
|
|
|
|
__entry->version,
|
|
|
|
__entry->nonce)
|
|
|
|
);
|
|
|
|
|
2022-10-20 11:51:06 +01:00
|
|
|
TRACE_EVENT(rxrpc_rx_challenge,
|
|
|
|
TP_PROTO(struct rxrpc_connection *conn, rxrpc_serial_t serial,
|
|
|
|
u32 version, u32 nonce, u32 min_level),
|
|
|
|
|
|
|
|
TP_ARGS(conn, serial, version, nonce, min_level),
|
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
2023-01-31 08:39:46 +00:00
|
|
|
__field(unsigned int, conn)
|
|
|
|
__field(rxrpc_serial_t, serial)
|
|
|
|
__field(u32, version)
|
|
|
|
__field(u32, nonce)
|
|
|
|
__field(u32, min_level)
|
2025-04-11 10:52:58 +01:00
|
|
|
__field(u16, service_id)
|
2025-04-11 10:52:53 +01:00
|
|
|
__field(u8, security_ix)
|
2022-10-20 11:51:06 +01:00
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
|
|
|
__entry->conn = conn->debug_id;
|
|
|
|
__entry->serial = serial;
|
|
|
|
__entry->version = version;
|
|
|
|
__entry->nonce = nonce;
|
|
|
|
__entry->min_level = min_level;
|
2025-04-11 10:52:58 +01:00
|
|
|
__entry->service_id = conn->service_id;
|
2025-04-11 10:52:53 +01:00
|
|
|
__entry->security_ix = conn->security_ix;
|
2022-10-20 11:51:06 +01:00
|
|
|
),
|
|
|
|
|
2025-04-11 10:52:58 +01:00
|
|
|
TP_printk("C=%08x CHALLENGE r=%08x sv=%u+%u v=%x n=%x ml=%x",
|
2022-10-20 11:51:06 +01:00
|
|
|
__entry->conn,
|
|
|
|
__entry->serial,
|
2025-04-11 10:52:58 +01:00
|
|
|
__entry->service_id,
|
2025-04-11 10:52:53 +01:00
|
|
|
__entry->security_ix,
|
2022-10-20 11:51:06 +01:00
|
|
|
__entry->version,
|
|
|
|
__entry->nonce,
|
|
|
|
__entry->min_level)
|
|
|
|
);
|
|
|
|
|
2025-04-11 10:52:58 +01:00
|
|
|
TRACE_EVENT(rxrpc_tx_response,
|
|
|
|
TP_PROTO(struct rxrpc_connection *conn, rxrpc_serial_t serial,
|
|
|
|
struct rxrpc_skb_priv *rsp),
|
|
|
|
|
|
|
|
TP_ARGS(conn, serial, rsp),
|
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
|
|
|
__field(unsigned int, conn)
|
|
|
|
__field(rxrpc_serial_t, serial)
|
|
|
|
__field(rxrpc_serial_t, challenge)
|
|
|
|
__field(u32, version)
|
|
|
|
__field(u32, kvno)
|
|
|
|
__field(u16, ticket_len)
|
|
|
|
__field(u16, appdata_len)
|
|
|
|
__field(u16, service_id)
|
|
|
|
__field(u8, security_ix)
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
|
|
|
__entry->conn = conn->debug_id;
|
|
|
|
__entry->serial = serial;
|
|
|
|
__entry->challenge = rsp->resp.challenge_serial;
|
|
|
|
__entry->version = rsp->resp.version;
|
|
|
|
__entry->kvno = rsp->resp.kvno;
|
|
|
|
__entry->ticket_len = rsp->resp.ticket_len;
|
|
|
|
__entry->service_id = conn->service_id;
|
|
|
|
__entry->security_ix = conn->security_ix;
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_printk("C=%08x RESPONSE r=%08x cr=%08x sv=%u+%u v=%x kv=%x tl=%u",
|
|
|
|
__entry->conn,
|
|
|
|
__entry->serial,
|
|
|
|
__entry->challenge,
|
|
|
|
__entry->service_id,
|
|
|
|
__entry->security_ix,
|
|
|
|
__entry->version,
|
|
|
|
__entry->kvno,
|
|
|
|
__entry->ticket_len)
|
|
|
|
);
|
|
|
|
|
2022-10-20 11:51:06 +01:00
|
|
|
TRACE_EVENT(rxrpc_rx_response,
|
|
|
|
TP_PROTO(struct rxrpc_connection *conn, rxrpc_serial_t serial,
|
|
|
|
u32 version, u32 kvno, u32 ticket_len),
|
|
|
|
|
|
|
|
TP_ARGS(conn, serial, version, kvno, ticket_len),
|
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
2023-01-31 08:39:46 +00:00
|
|
|
__field(unsigned int, conn)
|
|
|
|
__field(rxrpc_serial_t, serial)
|
|
|
|
__field(u32, version)
|
|
|
|
__field(u32, kvno)
|
|
|
|
__field(u32, ticket_len)
|
2025-04-11 10:52:53 +01:00
|
|
|
__field(u8, security_ix)
|
2022-10-20 11:51:06 +01:00
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
|
|
|
__entry->conn = conn->debug_id;
|
|
|
|
__entry->serial = serial;
|
|
|
|
__entry->version = version;
|
|
|
|
__entry->kvno = kvno;
|
|
|
|
__entry->ticket_len = ticket_len;
|
2025-04-11 10:52:53 +01:00
|
|
|
__entry->security_ix = conn->security_ix;
|
2022-10-20 11:51:06 +01:00
|
|
|
),
|
|
|
|
|
2025-04-11 10:52:53 +01:00
|
|
|
TP_printk("C=%08x RESPONSE r=%08x sx=%u v=%x kvno=%x tl=%x",
|
2022-10-20 11:51:06 +01:00
|
|
|
__entry->conn,
|
|
|
|
__entry->serial,
|
2025-04-11 10:52:53 +01:00
|
|
|
__entry->security_ix,
|
2022-10-20 11:51:06 +01:00
|
|
|
__entry->version,
|
|
|
|
__entry->kvno,
|
|
|
|
__entry->ticket_len)
|
|
|
|
);
|
|
|
|
|
2017-04-06 10:12:00 +01:00
|
|
|
TRACE_EVENT(rxrpc_rx_rwind_change,
|
|
|
|
TP_PROTO(struct rxrpc_call *call, rxrpc_serial_t serial,
|
|
|
|
u32 rwind, bool wake),
|
|
|
|
|
|
|
|
TP_ARGS(call, serial, rwind, wake),
|
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
2023-01-31 08:39:46 +00:00
|
|
|
__field(unsigned int, call)
|
|
|
|
__field(rxrpc_serial_t, serial)
|
|
|
|
__field(u32, rwind)
|
|
|
|
__field(bool, wake)
|
2017-04-06 10:12:00 +01:00
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
2018-03-27 23:03:00 +01:00
|
|
|
__entry->call = call->debug_id;
|
2017-04-06 10:12:00 +01:00
|
|
|
__entry->serial = serial;
|
|
|
|
__entry->rwind = rwind;
|
|
|
|
__entry->wake = wake;
|
|
|
|
),
|
|
|
|
|
2018-03-27 23:03:00 +01:00
|
|
|
TP_printk("c=%08x %08x rw=%u%s",
|
2017-04-06 10:12:00 +01:00
|
|
|
__entry->call,
|
|
|
|
__entry->serial,
|
|
|
|
__entry->rwind,
|
|
|
|
__entry->wake ? " wake" : "")
|
|
|
|
);
|
|
|
|
|
2018-07-23 17:18:37 +01:00
|
|
|
TRACE_EVENT(rxrpc_tx_packet,
|
|
|
|
TP_PROTO(unsigned int call_id, struct rxrpc_wire_header *whdr,
|
|
|
|
enum rxrpc_tx_point where),
|
|
|
|
|
|
|
|
TP_ARGS(call_id, whdr, where),
|
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
2023-01-31 08:35:16 +00:00
|
|
|
__field(unsigned int, call)
|
|
|
|
__field(enum rxrpc_tx_point, where)
|
|
|
|
__field_struct(struct rxrpc_wire_header, whdr)
|
2018-07-23 17:18:37 +01:00
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
|
|
|
__entry->call = call_id;
|
|
|
|
memcpy(&__entry->whdr, whdr, sizeof(__entry->whdr));
|
2018-10-08 19:44:39 +01:00
|
|
|
__entry->where = where;
|
2018-07-23 17:18:37 +01:00
|
|
|
),
|
|
|
|
|
|
|
|
TP_printk("c=%08x %08x:%08x:%08x:%04x %08x %08x %02x %02x %s %s",
|
|
|
|
__entry->call,
|
|
|
|
ntohl(__entry->whdr.epoch),
|
|
|
|
ntohl(__entry->whdr.cid),
|
|
|
|
ntohl(__entry->whdr.callNumber),
|
|
|
|
ntohs(__entry->whdr.serviceId),
|
|
|
|
ntohl(__entry->whdr.serial),
|
|
|
|
ntohl(__entry->whdr.seq),
|
|
|
|
__entry->whdr.type, __entry->whdr.flags,
|
|
|
|
__entry->whdr.type <= 15 ?
|
|
|
|
__print_symbolic(__entry->whdr.type, rxrpc_pkts) : "?UNK",
|
|
|
|
__print_symbolic(__entry->where, rxrpc_tx_points))
|
|
|
|
);
|
|
|
|
|
2016-09-23 12:39:22 +01:00
|
|
|
TRACE_EVENT(rxrpc_tx_data,
|
|
|
|
TP_PROTO(struct rxrpc_call *call, rxrpc_seq_t seq,
|
2024-12-04 07:47:03 +00:00
|
|
|
rxrpc_serial_t serial, unsigned int flags,
|
|
|
|
enum rxrpc_txdata_trace trace),
|
2016-09-23 12:39:22 +01:00
|
|
|
|
2024-12-04 07:47:03 +00:00
|
|
|
TP_ARGS(call, seq, serial, flags, trace),
|
2016-09-23 12:39:22 +01:00
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
2023-01-31 08:39:46 +00:00
|
|
|
__field(unsigned int, call)
|
|
|
|
__field(rxrpc_seq_t, seq)
|
|
|
|
__field(rxrpc_serial_t, serial)
|
|
|
|
__field(u32, cid)
|
|
|
|
__field(u32, call_id)
|
2024-01-29 15:01:10 +00:00
|
|
|
__field(u16, flags)
|
2024-12-04 07:47:03 +00:00
|
|
|
__field(enum rxrpc_txdata_trace, trace)
|
2016-09-23 12:39:22 +01:00
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
2018-03-27 23:03:00 +01:00
|
|
|
__entry->call = call->debug_id;
|
2018-07-23 17:18:37 +01:00
|
|
|
__entry->cid = call->cid;
|
|
|
|
__entry->call_id = call->call_id;
|
2016-09-23 12:39:22 +01:00
|
|
|
__entry->seq = seq;
|
|
|
|
__entry->serial = serial;
|
|
|
|
__entry->flags = flags;
|
2024-12-04 07:47:03 +00:00
|
|
|
__entry->trace = trace;
|
2016-09-23 12:39:22 +01:00
|
|
|
),
|
|
|
|
|
2024-12-04 07:47:03 +00:00
|
|
|
TP_printk("c=%08x DATA %08x:%08x %08x q=%08x fl=%02x%s",
|
2016-09-23 12:39:22 +01:00
|
|
|
__entry->call,
|
2018-07-23 17:18:37 +01:00
|
|
|
__entry->cid,
|
|
|
|
__entry->call_id,
|
2016-09-23 12:39:22 +01:00
|
|
|
__entry->serial,
|
|
|
|
__entry->seq,
|
2024-01-29 15:01:10 +00:00
|
|
|
__entry->flags & RXRPC_TXBUF_WIRE_FLAGS,
|
2024-12-04 07:47:03 +00:00
|
|
|
__print_symbolic(__entry->trace, rxrpc_txdata_traces))
|
2016-09-23 12:39:22 +01:00
|
|
|
);
|
|
|
|
|
2016-09-17 10:49:13 +01:00
|
|
|
TRACE_EVENT(rxrpc_tx_ack,
|
2018-07-23 17:18:37 +01:00
|
|
|
TP_PROTO(unsigned int call, rxrpc_serial_t serial,
|
2016-09-23 12:39:22 +01:00
|
|
|
rxrpc_seq_t ack_first, rxrpc_serial_t ack_serial,
|
2024-12-04 07:47:04 +00:00
|
|
|
u8 reason, u8 n_acks, u16 rwind,
|
|
|
|
enum rxrpc_propose_ack_trace trace),
|
2016-09-17 10:49:13 +01:00
|
|
|
|
2024-12-04 07:47:04 +00:00
|
|
|
TP_ARGS(call, serial, ack_first, ack_serial, reason, n_acks, rwind, trace),
|
2016-09-17 10:49:13 +01:00
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
2023-01-31 08:39:46 +00:00
|
|
|
__field(unsigned int, call)
|
|
|
|
__field(rxrpc_serial_t, serial)
|
|
|
|
__field(rxrpc_seq_t, ack_first)
|
|
|
|
__field(rxrpc_serial_t, ack_serial)
|
|
|
|
__field(u8, reason)
|
|
|
|
__field(u8, n_acks)
|
2023-01-31 15:31:49 +00:00
|
|
|
__field(u16, rwind)
|
2024-12-04 07:47:04 +00:00
|
|
|
__field(enum rxrpc_propose_ack_trace, trace)
|
2016-09-17 10:49:13 +01:00
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
2018-07-23 17:18:37 +01:00
|
|
|
__entry->call = call;
|
2016-09-17 10:49:13 +01:00
|
|
|
__entry->serial = serial;
|
2016-09-23 12:39:22 +01:00
|
|
|
__entry->ack_first = ack_first;
|
|
|
|
__entry->ack_serial = ack_serial;
|
2016-09-17 10:49:13 +01:00
|
|
|
__entry->reason = reason;
|
|
|
|
__entry->n_acks = n_acks;
|
2023-01-31 15:31:49 +00:00
|
|
|
__entry->rwind = rwind;
|
2024-12-04 07:47:04 +00:00
|
|
|
__entry->trace = trace;
|
2016-09-17 10:49:13 +01:00
|
|
|
),
|
|
|
|
|
2024-12-04 07:47:04 +00:00
|
|
|
TP_printk(" c=%08x ACK %08x %s f=%08x r=%08x n=%u rw=%u %s",
|
2016-09-17 10:49:13 +01:00
|
|
|
__entry->call,
|
|
|
|
__entry->serial,
|
2017-01-05 10:38:33 +00:00
|
|
|
__print_symbolic(__entry->reason, rxrpc_ack_names),
|
2016-09-23 12:39:22 +01:00
|
|
|
__entry->ack_first,
|
|
|
|
__entry->ack_serial,
|
2023-01-31 15:31:49 +00:00
|
|
|
__entry->n_acks,
|
2024-12-04 07:47:04 +00:00
|
|
|
__entry->rwind,
|
|
|
|
__print_symbolic(__entry->trace, rxrpc_propose_ack_traces))
|
2016-09-17 10:49:13 +01:00
|
|
|
);
|
|
|
|
|
2016-09-17 10:49:13 +01:00
|
|
|
TRACE_EVENT(rxrpc_receive,
|
|
|
|
TP_PROTO(struct rxrpc_call *call, enum rxrpc_receive_trace why,
|
|
|
|
rxrpc_serial_t serial, rxrpc_seq_t seq),
|
|
|
|
|
|
|
|
TP_ARGS(call, why, serial, seq),
|
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
2023-01-31 08:35:16 +00:00
|
|
|
__field(unsigned int, call)
|
|
|
|
__field(enum rxrpc_receive_trace, why)
|
|
|
|
__field(rxrpc_serial_t, serial)
|
|
|
|
__field(rxrpc_seq_t, seq)
|
2022-10-17 11:44:22 +01:00
|
|
|
__field(rxrpc_seq_t, window)
|
|
|
|
__field(rxrpc_seq_t, wtop)
|
2016-09-17 10:49:13 +01:00
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
2018-03-27 23:03:00 +01:00
|
|
|
__entry->call = call->debug_id;
|
2016-09-17 10:49:13 +01:00
|
|
|
__entry->why = why;
|
|
|
|
__entry->serial = serial;
|
|
|
|
__entry->seq = seq;
|
2022-10-17 11:44:22 +01:00
|
|
|
__entry->window = call->ackr_window;
|
|
|
|
__entry->wtop = call->ackr_wtop;
|
2016-09-17 10:49:13 +01:00
|
|
|
),
|
|
|
|
|
2018-03-27 23:03:00 +01:00
|
|
|
TP_printk("c=%08x %s r=%08x q=%08x w=%08x-%08x",
|
2016-09-17 10:49:13 +01:00
|
|
|
__entry->call,
|
2017-01-05 10:38:33 +00:00
|
|
|
__print_symbolic(__entry->why, rxrpc_receive_traces),
|
2016-09-17 10:49:13 +01:00
|
|
|
__entry->serial,
|
|
|
|
__entry->seq,
|
2022-10-17 11:44:22 +01:00
|
|
|
__entry->window,
|
|
|
|
__entry->wtop)
|
2016-09-17 10:49:13 +01:00
|
|
|
);
|
|
|
|
|
2016-09-17 11:13:31 +01:00
|
|
|
TRACE_EVENT(rxrpc_recvmsg,
|
2022-12-24 14:49:00 +00:00
|
|
|
TP_PROTO(unsigned int call_debug_id, enum rxrpc_recvmsg_trace why,
|
2022-10-07 17:22:40 +01:00
|
|
|
int ret),
|
|
|
|
|
2022-12-24 14:49:00 +00:00
|
|
|
TP_ARGS(call_debug_id, why, ret),
|
2022-10-07 17:22:40 +01:00
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
2023-01-31 08:35:16 +00:00
|
|
|
__field(unsigned int, call)
|
|
|
|
__field(enum rxrpc_recvmsg_trace, why)
|
|
|
|
__field(int, ret)
|
2022-10-07 17:22:40 +01:00
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
2022-12-24 14:49:00 +00:00
|
|
|
__entry->call = call_debug_id;
|
2022-10-07 17:22:40 +01:00
|
|
|
__entry->why = why;
|
|
|
|
__entry->ret = ret;
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_printk("c=%08x %s ret=%d",
|
|
|
|
__entry->call,
|
|
|
|
__print_symbolic(__entry->why, rxrpc_recvmsg_traces),
|
|
|
|
__entry->ret)
|
|
|
|
);
|
|
|
|
|
|
|
|
TRACE_EVENT(rxrpc_recvdata,
|
2016-09-17 11:13:31 +01:00
|
|
|
TP_PROTO(struct rxrpc_call *call, enum rxrpc_recvmsg_trace why,
|
|
|
|
rxrpc_seq_t seq, unsigned int offset, unsigned int len,
|
|
|
|
int ret),
|
|
|
|
|
|
|
|
TP_ARGS(call, why, seq, offset, len, ret),
|
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
2023-01-31 08:35:16 +00:00
|
|
|
__field(unsigned int, call)
|
|
|
|
__field(enum rxrpc_recvmsg_trace, why)
|
|
|
|
__field(rxrpc_seq_t, seq)
|
|
|
|
__field(unsigned int, offset)
|
|
|
|
__field(unsigned int, len)
|
|
|
|
__field(int, ret)
|
2016-09-17 11:13:31 +01:00
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
2019-10-03 17:44:44 +01:00
|
|
|
__entry->call = call ? call->debug_id : 0;
|
2016-09-17 11:13:31 +01:00
|
|
|
__entry->why = why;
|
|
|
|
__entry->seq = seq;
|
|
|
|
__entry->offset = offset;
|
|
|
|
__entry->len = len;
|
|
|
|
__entry->ret = ret;
|
|
|
|
),
|
|
|
|
|
2018-03-27 23:03:00 +01:00
|
|
|
TP_printk("c=%08x %s q=%08x o=%u l=%u ret=%d",
|
2016-09-17 11:13:31 +01:00
|
|
|
__entry->call,
|
2017-01-05 10:38:33 +00:00
|
|
|
__print_symbolic(__entry->why, rxrpc_recvmsg_traces),
|
2016-09-17 11:13:31 +01:00
|
|
|
__entry->seq,
|
|
|
|
__entry->offset,
|
|
|
|
__entry->len,
|
|
|
|
__entry->ret)
|
|
|
|
);
|
|
|
|
|
2016-09-22 00:41:53 +01:00
|
|
|
TRACE_EVENT(rxrpc_rtt_tx,
|
|
|
|
TP_PROTO(struct rxrpc_call *call, enum rxrpc_rtt_tx_trace why,
|
rxrpc: Fix loss of RTT samples due to interposed ACK
The Rx protocol has a mechanism to help generate RTT samples that works by
a client transmitting a REQUESTED-type ACK when it receives a DATA packet
that has the REQUEST_ACK flag set.
The peer, however, may interpose other ACKs before transmitting the
REQUESTED-ACK, as can be seen in the following trace excerpt:
rxrpc_tx_data: c=00000044 DATA d0b5ece8:00000001 00000001 q=00000001 fl=07
rxrpc_rx_ack: c=00000044 00000001 PNG r=00000000 f=00000002 p=00000000 n=0
rxrpc_rx_ack: c=00000044 00000002 REQ r=00000001 f=00000002 p=00000001 n=0
...
DATA packet 1 (q=xx) has REQUEST_ACK set (bit 1 of fl=xx). The incoming
ping (labelled PNG) hard-acks the request DATA packet (f=xx exceeds the
sequence number of the DATA packet), causing it to be discarded from the Tx
ring. The ACK that was requested (labelled REQ, r=xx references the serial
of the DATA packet) comes after the ping, but the sk_buff holding the
timestamp has gone and the RTT sample is lost.
This is particularly noticeable on RPC calls used to probe the service
offered by the peer. A lot of peers end up with an unknown RTT because we
only ever sent a single RPC. This confuses the server rotation algorithm.
Fix this by caching the information about the outgoing packet in RTT
calculations in the rxrpc_call struct rather than looking in the Tx ring.
A four-deep buffer is maintained and both REQUEST_ACK-flagged DATA and
PING-ACK transmissions are recorded in there. When the appropriate
response ACK is received, the buffer is checked for a match and, if found,
an RTT sample is recorded.
If a received ACK refers to a packet with a later serial number than an
entry in the cache, that entry is presumed lost and the entry is made
available to record a new transmission.
ACKs types other than REQUESTED-type and PING-type cause any matching
sample to be cancelled as they don't necessarily represent a useful
measurement.
If there's no space in the buffer on ping/data transmission, the sample
base is discarded.
Fixes: 50235c4b5a2f ("rxrpc: Obtain RTT data by requesting ACKs on DATA packets")
Signed-off-by: David Howells <dhowells@redhat.com>
2020-08-19 23:29:16 +01:00
|
|
|
int slot, rxrpc_serial_t send_serial),
|
2016-09-22 00:41:53 +01:00
|
|
|
|
rxrpc: Fix loss of RTT samples due to interposed ACK
The Rx protocol has a mechanism to help generate RTT samples that works by
a client transmitting a REQUESTED-type ACK when it receives a DATA packet
that has the REQUEST_ACK flag set.
The peer, however, may interpose other ACKs before transmitting the
REQUESTED-ACK, as can be seen in the following trace excerpt:
rxrpc_tx_data: c=00000044 DATA d0b5ece8:00000001 00000001 q=00000001 fl=07
rxrpc_rx_ack: c=00000044 00000001 PNG r=00000000 f=00000002 p=00000000 n=0
rxrpc_rx_ack: c=00000044 00000002 REQ r=00000001 f=00000002 p=00000001 n=0
...
DATA packet 1 (q=xx) has REQUEST_ACK set (bit 1 of fl=xx). The incoming
ping (labelled PNG) hard-acks the request DATA packet (f=xx exceeds the
sequence number of the DATA packet), causing it to be discarded from the Tx
ring. The ACK that was requested (labelled REQ, r=xx references the serial
of the DATA packet) comes after the ping, but the sk_buff holding the
timestamp has gone and the RTT sample is lost.
This is particularly noticeable on RPC calls used to probe the service
offered by the peer. A lot of peers end up with an unknown RTT because we
only ever sent a single RPC. This confuses the server rotation algorithm.
Fix this by caching the information about the outgoing packet in RTT
calculations in the rxrpc_call struct rather than looking in the Tx ring.
A four-deep buffer is maintained and both REQUEST_ACK-flagged DATA and
PING-ACK transmissions are recorded in there. When the appropriate
response ACK is received, the buffer is checked for a match and, if found,
an RTT sample is recorded.
If a received ACK refers to a packet with a later serial number than an
entry in the cache, that entry is presumed lost and the entry is made
available to record a new transmission.
ACKs types other than REQUESTED-type and PING-type cause any matching
sample to be cancelled as they don't necessarily represent a useful
measurement.
If there's no space in the buffer on ping/data transmission, the sample
base is discarded.
Fixes: 50235c4b5a2f ("rxrpc: Obtain RTT data by requesting ACKs on DATA packets")
Signed-off-by: David Howells <dhowells@redhat.com>
2020-08-19 23:29:16 +01:00
|
|
|
TP_ARGS(call, why, slot, send_serial),
|
2016-09-22 00:41:53 +01:00
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
2023-01-31 08:35:16 +00:00
|
|
|
__field(unsigned int, call)
|
|
|
|
__field(enum rxrpc_rtt_tx_trace, why)
|
|
|
|
__field(int, slot)
|
|
|
|
__field(rxrpc_serial_t, send_serial)
|
2016-09-22 00:41:53 +01:00
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
2018-03-27 23:03:00 +01:00
|
|
|
__entry->call = call->debug_id;
|
2016-09-22 00:41:53 +01:00
|
|
|
__entry->why = why;
|
rxrpc: Fix loss of RTT samples due to interposed ACK
The Rx protocol has a mechanism to help generate RTT samples that works by
a client transmitting a REQUESTED-type ACK when it receives a DATA packet
that has the REQUEST_ACK flag set.
The peer, however, may interpose other ACKs before transmitting the
REQUESTED-ACK, as can be seen in the following trace excerpt:
rxrpc_tx_data: c=00000044 DATA d0b5ece8:00000001 00000001 q=00000001 fl=07
rxrpc_rx_ack: c=00000044 00000001 PNG r=00000000 f=00000002 p=00000000 n=0
rxrpc_rx_ack: c=00000044 00000002 REQ r=00000001 f=00000002 p=00000001 n=0
...
DATA packet 1 (q=xx) has REQUEST_ACK set (bit 1 of fl=xx). The incoming
ping (labelled PNG) hard-acks the request DATA packet (f=xx exceeds the
sequence number of the DATA packet), causing it to be discarded from the Tx
ring. The ACK that was requested (labelled REQ, r=xx references the serial
of the DATA packet) comes after the ping, but the sk_buff holding the
timestamp has gone and the RTT sample is lost.
This is particularly noticeable on RPC calls used to probe the service
offered by the peer. A lot of peers end up with an unknown RTT because we
only ever sent a single RPC. This confuses the server rotation algorithm.
Fix this by caching the information about the outgoing packet in RTT
calculations in the rxrpc_call struct rather than looking in the Tx ring.
A four-deep buffer is maintained and both REQUEST_ACK-flagged DATA and
PING-ACK transmissions are recorded in there. When the appropriate
response ACK is received, the buffer is checked for a match and, if found,
an RTT sample is recorded.
If a received ACK refers to a packet with a later serial number than an
entry in the cache, that entry is presumed lost and the entry is made
available to record a new transmission.
ACKs types other than REQUESTED-type and PING-type cause any matching
sample to be cancelled as they don't necessarily represent a useful
measurement.
If there's no space in the buffer on ping/data transmission, the sample
base is discarded.
Fixes: 50235c4b5a2f ("rxrpc: Obtain RTT data by requesting ACKs on DATA packets")
Signed-off-by: David Howells <dhowells@redhat.com>
2020-08-19 23:29:16 +01:00
|
|
|
__entry->slot = slot;
|
2016-09-22 00:41:53 +01:00
|
|
|
__entry->send_serial = send_serial;
|
|
|
|
),
|
|
|
|
|
rxrpc: Fix loss of RTT samples due to interposed ACK
The Rx protocol has a mechanism to help generate RTT samples that works by
a client transmitting a REQUESTED-type ACK when it receives a DATA packet
that has the REQUEST_ACK flag set.
The peer, however, may interpose other ACKs before transmitting the
REQUESTED-ACK, as can be seen in the following trace excerpt:
rxrpc_tx_data: c=00000044 DATA d0b5ece8:00000001 00000001 q=00000001 fl=07
rxrpc_rx_ack: c=00000044 00000001 PNG r=00000000 f=00000002 p=00000000 n=0
rxrpc_rx_ack: c=00000044 00000002 REQ r=00000001 f=00000002 p=00000001 n=0
...
DATA packet 1 (q=xx) has REQUEST_ACK set (bit 1 of fl=xx). The incoming
ping (labelled PNG) hard-acks the request DATA packet (f=xx exceeds the
sequence number of the DATA packet), causing it to be discarded from the Tx
ring. The ACK that was requested (labelled REQ, r=xx references the serial
of the DATA packet) comes after the ping, but the sk_buff holding the
timestamp has gone and the RTT sample is lost.
This is particularly noticeable on RPC calls used to probe the service
offered by the peer. A lot of peers end up with an unknown RTT because we
only ever sent a single RPC. This confuses the server rotation algorithm.
Fix this by caching the information about the outgoing packet in RTT
calculations in the rxrpc_call struct rather than looking in the Tx ring.
A four-deep buffer is maintained and both REQUEST_ACK-flagged DATA and
PING-ACK transmissions are recorded in there. When the appropriate
response ACK is received, the buffer is checked for a match and, if found,
an RTT sample is recorded.
If a received ACK refers to a packet with a later serial number than an
entry in the cache, that entry is presumed lost and the entry is made
available to record a new transmission.
ACKs types other than REQUESTED-type and PING-type cause any matching
sample to be cancelled as they don't necessarily represent a useful
measurement.
If there's no space in the buffer on ping/data transmission, the sample
base is discarded.
Fixes: 50235c4b5a2f ("rxrpc: Obtain RTT data by requesting ACKs on DATA packets")
Signed-off-by: David Howells <dhowells@redhat.com>
2020-08-19 23:29:16 +01:00
|
|
|
TP_printk("c=%08x [%d] %s sr=%08x",
|
2016-09-22 00:41:53 +01:00
|
|
|
__entry->call,
|
rxrpc: Fix loss of RTT samples due to interposed ACK
The Rx protocol has a mechanism to help generate RTT samples that works by
a client transmitting a REQUESTED-type ACK when it receives a DATA packet
that has the REQUEST_ACK flag set.
The peer, however, may interpose other ACKs before transmitting the
REQUESTED-ACK, as can be seen in the following trace excerpt:
rxrpc_tx_data: c=00000044 DATA d0b5ece8:00000001 00000001 q=00000001 fl=07
rxrpc_rx_ack: c=00000044 00000001 PNG r=00000000 f=00000002 p=00000000 n=0
rxrpc_rx_ack: c=00000044 00000002 REQ r=00000001 f=00000002 p=00000001 n=0
...
DATA packet 1 (q=xx) has REQUEST_ACK set (bit 1 of fl=xx). The incoming
ping (labelled PNG) hard-acks the request DATA packet (f=xx exceeds the
sequence number of the DATA packet), causing it to be discarded from the Tx
ring. The ACK that was requested (labelled REQ, r=xx references the serial
of the DATA packet) comes after the ping, but the sk_buff holding the
timestamp has gone and the RTT sample is lost.
This is particularly noticeable on RPC calls used to probe the service
offered by the peer. A lot of peers end up with an unknown RTT because we
only ever sent a single RPC. This confuses the server rotation algorithm.
Fix this by caching the information about the outgoing packet in RTT
calculations in the rxrpc_call struct rather than looking in the Tx ring.
A four-deep buffer is maintained and both REQUEST_ACK-flagged DATA and
PING-ACK transmissions are recorded in there. When the appropriate
response ACK is received, the buffer is checked for a match and, if found,
an RTT sample is recorded.
If a received ACK refers to a packet with a later serial number than an
entry in the cache, that entry is presumed lost and the entry is made
available to record a new transmission.
ACKs types other than REQUESTED-type and PING-type cause any matching
sample to be cancelled as they don't necessarily represent a useful
measurement.
If there's no space in the buffer on ping/data transmission, the sample
base is discarded.
Fixes: 50235c4b5a2f ("rxrpc: Obtain RTT data by requesting ACKs on DATA packets")
Signed-off-by: David Howells <dhowells@redhat.com>
2020-08-19 23:29:16 +01:00
|
|
|
__entry->slot,
|
2017-01-05 10:38:33 +00:00
|
|
|
__print_symbolic(__entry->why, rxrpc_rtt_tx_traces),
|
2016-09-22 00:41:53 +01:00
|
|
|
__entry->send_serial)
|
|
|
|
);
|
|
|
|
|
|
|
|
TRACE_EVENT(rxrpc_rtt_rx,
|
|
|
|
TP_PROTO(struct rxrpc_call *call, enum rxrpc_rtt_rx_trace why,
|
rxrpc: Fix loss of RTT samples due to interposed ACK
The Rx protocol has a mechanism to help generate RTT samples that works by
a client transmitting a REQUESTED-type ACK when it receives a DATA packet
that has the REQUEST_ACK flag set.
The peer, however, may interpose other ACKs before transmitting the
REQUESTED-ACK, as can be seen in the following trace excerpt:
rxrpc_tx_data: c=00000044 DATA d0b5ece8:00000001 00000001 q=00000001 fl=07
rxrpc_rx_ack: c=00000044 00000001 PNG r=00000000 f=00000002 p=00000000 n=0
rxrpc_rx_ack: c=00000044 00000002 REQ r=00000001 f=00000002 p=00000001 n=0
...
DATA packet 1 (q=xx) has REQUEST_ACK set (bit 1 of fl=xx). The incoming
ping (labelled PNG) hard-acks the request DATA packet (f=xx exceeds the
sequence number of the DATA packet), causing it to be discarded from the Tx
ring. The ACK that was requested (labelled REQ, r=xx references the serial
of the DATA packet) comes after the ping, but the sk_buff holding the
timestamp has gone and the RTT sample is lost.
This is particularly noticeable on RPC calls used to probe the service
offered by the peer. A lot of peers end up with an unknown RTT because we
only ever sent a single RPC. This confuses the server rotation algorithm.
Fix this by caching the information about the outgoing packet in RTT
calculations in the rxrpc_call struct rather than looking in the Tx ring.
A four-deep buffer is maintained and both REQUEST_ACK-flagged DATA and
PING-ACK transmissions are recorded in there. When the appropriate
response ACK is received, the buffer is checked for a match and, if found,
an RTT sample is recorded.
If a received ACK refers to a packet with a later serial number than an
entry in the cache, that entry is presumed lost and the entry is made
available to record a new transmission.
ACKs types other than REQUESTED-type and PING-type cause any matching
sample to be cancelled as they don't necessarily represent a useful
measurement.
If there's no space in the buffer on ping/data transmission, the sample
base is discarded.
Fixes: 50235c4b5a2f ("rxrpc: Obtain RTT data by requesting ACKs on DATA packets")
Signed-off-by: David Howells <dhowells@redhat.com>
2020-08-19 23:29:16 +01:00
|
|
|
int slot,
|
2016-09-22 00:41:53 +01:00
|
|
|
rxrpc_serial_t send_serial, rxrpc_serial_t resp_serial,
|
2024-12-04 07:46:55 +00:00
|
|
|
u32 rtt, u32 srtt, u32 rto),
|
2016-09-22 00:41:53 +01:00
|
|
|
|
2024-12-04 07:46:55 +00:00
|
|
|
TP_ARGS(call, why, slot, send_serial, resp_serial, rtt, srtt, rto),
|
2016-09-22 00:41:53 +01:00
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
2023-01-31 08:35:16 +00:00
|
|
|
__field(unsigned int, call)
|
|
|
|
__field(enum rxrpc_rtt_rx_trace, why)
|
|
|
|
__field(int, slot)
|
|
|
|
__field(rxrpc_serial_t, send_serial)
|
|
|
|
__field(rxrpc_serial_t, resp_serial)
|
|
|
|
__field(u32, rtt)
|
2024-12-04 07:46:55 +00:00
|
|
|
__field(u32, srtt)
|
2023-01-31 08:35:16 +00:00
|
|
|
__field(u32, rto)
|
2024-12-04 07:46:55 +00:00
|
|
|
__field(u32, min_rtt)
|
2016-09-22 00:41:53 +01:00
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
2018-03-27 23:03:00 +01:00
|
|
|
__entry->call = call->debug_id;
|
2016-09-22 00:41:53 +01:00
|
|
|
__entry->why = why;
|
rxrpc: Fix loss of RTT samples due to interposed ACK
The Rx protocol has a mechanism to help generate RTT samples that works by
a client transmitting a REQUESTED-type ACK when it receives a DATA packet
that has the REQUEST_ACK flag set.
The peer, however, may interpose other ACKs before transmitting the
REQUESTED-ACK, as can be seen in the following trace excerpt:
rxrpc_tx_data: c=00000044 DATA d0b5ece8:00000001 00000001 q=00000001 fl=07
rxrpc_rx_ack: c=00000044 00000001 PNG r=00000000 f=00000002 p=00000000 n=0
rxrpc_rx_ack: c=00000044 00000002 REQ r=00000001 f=00000002 p=00000001 n=0
...
DATA packet 1 (q=xx) has REQUEST_ACK set (bit 1 of fl=xx). The incoming
ping (labelled PNG) hard-acks the request DATA packet (f=xx exceeds the
sequence number of the DATA packet), causing it to be discarded from the Tx
ring. The ACK that was requested (labelled REQ, r=xx references the serial
of the DATA packet) comes after the ping, but the sk_buff holding the
timestamp has gone and the RTT sample is lost.
This is particularly noticeable on RPC calls used to probe the service
offered by the peer. A lot of peers end up with an unknown RTT because we
only ever sent a single RPC. This confuses the server rotation algorithm.
Fix this by caching the information about the outgoing packet in RTT
calculations in the rxrpc_call struct rather than looking in the Tx ring.
A four-deep buffer is maintained and both REQUEST_ACK-flagged DATA and
PING-ACK transmissions are recorded in there. When the appropriate
response ACK is received, the buffer is checked for a match and, if found,
an RTT sample is recorded.
If a received ACK refers to a packet with a later serial number than an
entry in the cache, that entry is presumed lost and the entry is made
available to record a new transmission.
ACKs types other than REQUESTED-type and PING-type cause any matching
sample to be cancelled as they don't necessarily represent a useful
measurement.
If there's no space in the buffer on ping/data transmission, the sample
base is discarded.
Fixes: 50235c4b5a2f ("rxrpc: Obtain RTT data by requesting ACKs on DATA packets")
Signed-off-by: David Howells <dhowells@redhat.com>
2020-08-19 23:29:16 +01:00
|
|
|
__entry->slot = slot;
|
2016-09-22 00:41:53 +01:00
|
|
|
__entry->send_serial = send_serial;
|
|
|
|
__entry->resp_serial = resp_serial;
|
|
|
|
__entry->rtt = rtt;
|
2024-12-04 07:46:55 +00:00
|
|
|
__entry->srtt = srtt;
|
2020-05-11 14:54:34 +01:00
|
|
|
__entry->rto = rto;
|
2024-12-04 07:47:05 +00:00
|
|
|
__entry->min_rtt = minmax_get(&call->min_rtt)
|
2016-09-22 00:41:53 +01:00
|
|
|
),
|
|
|
|
|
2024-12-04 07:46:55 +00:00
|
|
|
TP_printk("c=%08x [%d] %s sr=%08x rr=%08x rtt=%u srtt=%u rto=%u min=%u",
|
2016-09-22 00:41:53 +01:00
|
|
|
__entry->call,
|
rxrpc: Fix loss of RTT samples due to interposed ACK
The Rx protocol has a mechanism to help generate RTT samples that works by
a client transmitting a REQUESTED-type ACK when it receives a DATA packet
that has the REQUEST_ACK flag set.
The peer, however, may interpose other ACKs before transmitting the
REQUESTED-ACK, as can be seen in the following trace excerpt:
rxrpc_tx_data: c=00000044 DATA d0b5ece8:00000001 00000001 q=00000001 fl=07
rxrpc_rx_ack: c=00000044 00000001 PNG r=00000000 f=00000002 p=00000000 n=0
rxrpc_rx_ack: c=00000044 00000002 REQ r=00000001 f=00000002 p=00000001 n=0
...
DATA packet 1 (q=xx) has REQUEST_ACK set (bit 1 of fl=xx). The incoming
ping (labelled PNG) hard-acks the request DATA packet (f=xx exceeds the
sequence number of the DATA packet), causing it to be discarded from the Tx
ring. The ACK that was requested (labelled REQ, r=xx references the serial
of the DATA packet) comes after the ping, but the sk_buff holding the
timestamp has gone and the RTT sample is lost.
This is particularly noticeable on RPC calls used to probe the service
offered by the peer. A lot of peers end up with an unknown RTT because we
only ever sent a single RPC. This confuses the server rotation algorithm.
Fix this by caching the information about the outgoing packet in RTT
calculations in the rxrpc_call struct rather than looking in the Tx ring.
A four-deep buffer is maintained and both REQUEST_ACK-flagged DATA and
PING-ACK transmissions are recorded in there. When the appropriate
response ACK is received, the buffer is checked for a match and, if found,
an RTT sample is recorded.
If a received ACK refers to a packet with a later serial number than an
entry in the cache, that entry is presumed lost and the entry is made
available to record a new transmission.
ACKs types other than REQUESTED-type and PING-type cause any matching
sample to be cancelled as they don't necessarily represent a useful
measurement.
If there's no space in the buffer on ping/data transmission, the sample
base is discarded.
Fixes: 50235c4b5a2f ("rxrpc: Obtain RTT data by requesting ACKs on DATA packets")
Signed-off-by: David Howells <dhowells@redhat.com>
2020-08-19 23:29:16 +01:00
|
|
|
__entry->slot,
|
2017-01-05 10:38:33 +00:00
|
|
|
__print_symbolic(__entry->why, rxrpc_rtt_rx_traces),
|
2016-09-22 00:41:53 +01:00
|
|
|
__entry->send_serial,
|
|
|
|
__entry->resp_serial,
|
|
|
|
__entry->rtt,
|
2024-12-04 07:46:55 +00:00
|
|
|
__entry->srtt / 8,
|
|
|
|
__entry->rto,
|
|
|
|
__entry->min_rtt)
|
2016-09-22 00:41:53 +01:00
|
|
|
);
|
|
|
|
|
2024-01-30 21:37:16 +00:00
|
|
|
TRACE_EVENT(rxrpc_timer_set,
|
|
|
|
TP_PROTO(struct rxrpc_call *call, ktime_t delay,
|
|
|
|
enum rxrpc_timer_trace why),
|
2016-09-23 15:22:36 +01:00
|
|
|
|
2024-01-30 21:37:16 +00:00
|
|
|
TP_ARGS(call, delay, why),
|
2016-09-23 15:22:36 +01:00
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
2023-01-31 08:39:46 +00:00
|
|
|
__field(unsigned int, call)
|
|
|
|
__field(enum rxrpc_timer_trace, why)
|
2024-01-30 21:37:16 +00:00
|
|
|
__field(ktime_t, delay)
|
2016-09-23 15:22:36 +01:00
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
2018-03-27 23:03:00 +01:00
|
|
|
__entry->call = call->debug_id;
|
2017-11-24 10:18:41 +00:00
|
|
|
__entry->why = why;
|
2024-01-30 21:37:16 +00:00
|
|
|
__entry->delay = delay;
|
2016-09-23 15:22:36 +01:00
|
|
|
),
|
|
|
|
|
2024-01-30 21:37:16 +00:00
|
|
|
TP_printk("c=%08x %s to=%lld",
|
2016-09-23 15:22:36 +01:00
|
|
|
__entry->call,
|
2017-01-05 10:38:33 +00:00
|
|
|
__print_symbolic(__entry->why, rxrpc_timer_traces),
|
2024-01-30 21:37:16 +00:00
|
|
|
ktime_to_us(__entry->delay))
|
|
|
|
);
|
|
|
|
|
|
|
|
TRACE_EVENT(rxrpc_timer_exp,
|
|
|
|
TP_PROTO(struct rxrpc_call *call, ktime_t delay,
|
|
|
|
enum rxrpc_timer_trace why),
|
|
|
|
|
|
|
|
TP_ARGS(call, delay, why),
|
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
|
|
|
__field(unsigned int, call)
|
|
|
|
__field(enum rxrpc_timer_trace, why)
|
|
|
|
__field(ktime_t, delay)
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
|
|
|
__entry->call = call->debug_id;
|
|
|
|
__entry->why = why;
|
|
|
|
__entry->delay = delay;
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_printk("c=%08x %s to=%lld",
|
|
|
|
__entry->call,
|
|
|
|
__print_symbolic(__entry->why, rxrpc_timer_traces),
|
|
|
|
ktime_to_us(__entry->delay))
|
|
|
|
);
|
|
|
|
|
|
|
|
TRACE_EVENT(rxrpc_timer_can,
|
|
|
|
TP_PROTO(struct rxrpc_call *call, enum rxrpc_timer_trace why),
|
|
|
|
|
|
|
|
TP_ARGS(call, why),
|
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
|
|
|
__field(unsigned int, call)
|
|
|
|
__field(enum rxrpc_timer_trace, why)
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
|
|
|
__entry->call = call->debug_id;
|
|
|
|
__entry->why = why;
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_printk("c=%08x %s",
|
|
|
|
__entry->call,
|
|
|
|
__print_symbolic(__entry->why, rxrpc_timer_traces))
|
|
|
|
);
|
|
|
|
|
|
|
|
TRACE_EVENT(rxrpc_timer_restart,
|
|
|
|
TP_PROTO(struct rxrpc_call *call, ktime_t delay, unsigned long delayj),
|
|
|
|
|
|
|
|
TP_ARGS(call, delay, delayj),
|
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
|
|
|
__field(unsigned int, call)
|
|
|
|
__field(unsigned long, delayj)
|
|
|
|
__field(ktime_t, delay)
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
|
|
|
__entry->call = call->debug_id;
|
|
|
|
__entry->delayj = delayj;
|
|
|
|
__entry->delay = delay;
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_printk("c=%08x to=%lld j=%ld",
|
|
|
|
__entry->call,
|
|
|
|
ktime_to_us(__entry->delay),
|
|
|
|
__entry->delayj)
|
2016-09-23 15:22:36 +01:00
|
|
|
);
|
|
|
|
|
2022-04-22 00:20:49 +01:00
|
|
|
TRACE_EVENT(rxrpc_timer_expired,
|
2024-01-30 21:37:16 +00:00
|
|
|
TP_PROTO(struct rxrpc_call *call),
|
2022-04-22 00:20:49 +01:00
|
|
|
|
2024-01-30 21:37:16 +00:00
|
|
|
TP_ARGS(call),
|
2022-04-22 00:20:49 +01:00
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
2023-01-31 08:39:46 +00:00
|
|
|
__field(unsigned int, call)
|
2022-04-22 00:20:49 +01:00
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
|
|
|
__entry->call = call->debug_id;
|
|
|
|
),
|
|
|
|
|
2024-01-30 21:37:16 +00:00
|
|
|
TP_printk("c=%08x EXPIRED",
|
|
|
|
__entry->call)
|
2022-04-22 00:20:49 +01:00
|
|
|
);
|
|
|
|
|
2016-09-23 12:39:22 +01:00
|
|
|
TRACE_EVENT(rxrpc_rx_lose,
|
|
|
|
TP_PROTO(struct rxrpc_skb_priv *sp),
|
|
|
|
|
|
|
|
TP_ARGS(sp),
|
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
2023-01-31 08:35:16 +00:00
|
|
|
__field_struct(struct rxrpc_host_header, hdr)
|
2016-09-23 12:39:22 +01:00
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
|
|
|
memcpy(&__entry->hdr, &sp->hdr, sizeof(__entry->hdr));
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_printk("%08x:%08x:%08x:%04x %08x %08x %02x %02x %s *LOSE*",
|
|
|
|
__entry->hdr.epoch, __entry->hdr.cid,
|
|
|
|
__entry->hdr.callNumber, __entry->hdr.serviceId,
|
|
|
|
__entry->hdr.serial, __entry->hdr.seq,
|
|
|
|
__entry->hdr.type, __entry->hdr.flags,
|
2017-01-05 10:38:33 +00:00
|
|
|
__entry->hdr.type <= 15 ?
|
|
|
|
__print_symbolic(__entry->hdr.type, rxrpc_pkts) : "?UNK")
|
2016-09-23 12:39:22 +01:00
|
|
|
);
|
|
|
|
|
2016-09-23 13:50:40 +01:00
|
|
|
TRACE_EVENT(rxrpc_propose_ack,
|
|
|
|
TP_PROTO(struct rxrpc_call *call, enum rxrpc_propose_ack_trace why,
|
2020-01-30 21:48:14 +00:00
|
|
|
u8 ack_reason, rxrpc_serial_t serial),
|
2016-09-23 13:50:40 +01:00
|
|
|
|
2020-01-30 21:48:14 +00:00
|
|
|
TP_ARGS(call, why, ack_reason, serial),
|
2016-09-23 13:50:40 +01:00
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
2023-01-31 08:35:16 +00:00
|
|
|
__field(unsigned int, call)
|
|
|
|
__field(enum rxrpc_propose_ack_trace, why)
|
|
|
|
__field(rxrpc_serial_t, serial)
|
|
|
|
__field(u8, ack_reason)
|
2016-09-23 13:50:40 +01:00
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
2018-03-27 23:03:00 +01:00
|
|
|
__entry->call = call->debug_id;
|
2016-09-23 13:50:40 +01:00
|
|
|
__entry->why = why;
|
|
|
|
__entry->serial = serial;
|
|
|
|
__entry->ack_reason = ack_reason;
|
|
|
|
),
|
|
|
|
|
2020-01-30 21:48:14 +00:00
|
|
|
TP_printk("c=%08x %s %s r=%08x",
|
2016-09-23 13:50:40 +01:00
|
|
|
__entry->call,
|
2017-01-05 10:38:33 +00:00
|
|
|
__print_symbolic(__entry->why, rxrpc_propose_ack_traces),
|
|
|
|
__print_symbolic(__entry->ack_reason, rxrpc_ack_names),
|
2020-01-30 21:48:14 +00:00
|
|
|
__entry->serial)
|
2016-09-23 13:50:40 +01:00
|
|
|
);
|
|
|
|
|
2020-01-30 21:48:13 +00:00
|
|
|
TRACE_EVENT(rxrpc_send_ack,
|
|
|
|
TP_PROTO(struct rxrpc_call *call, enum rxrpc_propose_ack_trace why,
|
|
|
|
u8 ack_reason, rxrpc_serial_t serial),
|
|
|
|
|
|
|
|
TP_ARGS(call, why, ack_reason, serial),
|
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
2023-01-31 08:35:16 +00:00
|
|
|
__field(unsigned int, call)
|
|
|
|
__field(enum rxrpc_propose_ack_trace, why)
|
|
|
|
__field(rxrpc_serial_t, serial)
|
|
|
|
__field(u8, ack_reason)
|
2020-01-30 21:48:13 +00:00
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
|
|
|
__entry->call = call->debug_id;
|
|
|
|
__entry->why = why;
|
|
|
|
__entry->serial = serial;
|
|
|
|
__entry->ack_reason = ack_reason;
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_printk("c=%08x %s %s r=%08x",
|
|
|
|
__entry->call,
|
|
|
|
__print_symbolic(__entry->why, rxrpc_propose_ack_traces),
|
|
|
|
__print_symbolic(__entry->ack_reason, rxrpc_ack_names),
|
|
|
|
__entry->serial)
|
|
|
|
);
|
|
|
|
|
2020-01-30 21:48:14 +00:00
|
|
|
TRACE_EVENT(rxrpc_drop_ack,
|
|
|
|
TP_PROTO(struct rxrpc_call *call, enum rxrpc_propose_ack_trace why,
|
|
|
|
u8 ack_reason, rxrpc_serial_t serial, bool nobuf),
|
|
|
|
|
|
|
|
TP_ARGS(call, why, ack_reason, serial, nobuf),
|
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
2023-01-31 08:35:16 +00:00
|
|
|
__field(unsigned int, call)
|
|
|
|
__field(enum rxrpc_propose_ack_trace, why)
|
|
|
|
__field(rxrpc_serial_t, serial)
|
|
|
|
__field(u8, ack_reason)
|
|
|
|
__field(bool, nobuf)
|
2020-01-30 21:48:14 +00:00
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
|
|
|
__entry->call = call->debug_id;
|
|
|
|
__entry->why = why;
|
|
|
|
__entry->serial = serial;
|
|
|
|
__entry->ack_reason = ack_reason;
|
|
|
|
__entry->nobuf = nobuf;
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_printk("c=%08x %s %s r=%08x nbf=%u",
|
|
|
|
__entry->call,
|
|
|
|
__print_symbolic(__entry->why, rxrpc_propose_ack_traces),
|
|
|
|
__print_symbolic(__entry->ack_reason, rxrpc_ack_names),
|
|
|
|
__entry->serial, __entry->nobuf)
|
|
|
|
);
|
|
|
|
|
2016-09-23 13:58:55 +01:00
|
|
|
TRACE_EVENT(rxrpc_retransmit,
|
2024-12-04 07:46:46 +00:00
|
|
|
TP_PROTO(struct rxrpc_call *call,
|
|
|
|
struct rxrpc_send_data_req *req,
|
2024-12-04 07:47:07 +00:00
|
|
|
struct rxrpc_txbuf *txb),
|
2016-09-23 13:58:55 +01:00
|
|
|
|
2024-12-04 07:47:07 +00:00
|
|
|
TP_ARGS(call, req, txb),
|
2016-09-23 13:58:55 +01:00
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
2023-01-31 08:39:46 +00:00
|
|
|
__field(unsigned int, call)
|
2024-12-04 07:46:51 +00:00
|
|
|
__field(unsigned int, qbase)
|
2023-01-31 08:39:46 +00:00
|
|
|
__field(rxrpc_seq_t, seq)
|
2024-01-29 13:51:30 +00:00
|
|
|
__field(rxrpc_serial_t, serial)
|
2016-09-23 13:58:55 +01:00
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
2018-03-27 23:03:00 +01:00
|
|
|
__entry->call = call->debug_id;
|
2024-12-04 07:46:51 +00:00
|
|
|
__entry->qbase = req->tq->qbase;
|
2024-12-04 07:46:46 +00:00
|
|
|
__entry->seq = req->seq;
|
|
|
|
__entry->serial = txb->serial;
|
2016-09-23 13:58:55 +01:00
|
|
|
),
|
|
|
|
|
2024-12-04 07:47:07 +00:00
|
|
|
TP_printk("c=%08x tq=%x q=%x r=%x",
|
2016-09-23 13:58:55 +01:00
|
|
|
__entry->call,
|
2024-12-04 07:46:51 +00:00
|
|
|
__entry->qbase,
|
2016-09-23 13:58:55 +01:00
|
|
|
__entry->seq,
|
2024-12-04 07:47:07 +00:00
|
|
|
__entry->serial)
|
2016-09-23 13:58:55 +01:00
|
|
|
);
|
|
|
|
|
2016-09-24 18:05:27 +01:00
|
|
|
TRACE_EVENT(rxrpc_congest,
|
2024-12-04 07:46:52 +00:00
|
|
|
TP_PROTO(struct rxrpc_call *call, struct rxrpc_ack_summary *summary),
|
2016-09-24 18:05:27 +01:00
|
|
|
|
2024-12-04 07:46:52 +00:00
|
|
|
TP_ARGS(call, summary),
|
2016-09-24 18:05:27 +01:00
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
2023-01-31 08:35:16 +00:00
|
|
|
__field(unsigned int, call)
|
2024-12-04 07:46:50 +00:00
|
|
|
__field(enum rxrpc_ca_state, ca_state)
|
2023-01-31 08:35:16 +00:00
|
|
|
__field(rxrpc_seq_t, hard_ack)
|
|
|
|
__field(rxrpc_seq_t, top)
|
|
|
|
__field(rxrpc_seq_t, lowest_nak)
|
2024-12-04 07:46:50 +00:00
|
|
|
__field(u16, nr_sacks)
|
|
|
|
__field(u16, nr_snacks)
|
|
|
|
__field(u16, cwnd)
|
|
|
|
__field(u16, ssthresh)
|
|
|
|
__field(u16, cumul_acks)
|
|
|
|
__field(u16, dup_acks)
|
2023-01-31 08:35:16 +00:00
|
|
|
__field_struct(struct rxrpc_ack_summary, sum)
|
2016-09-24 18:05:27 +01:00
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
2018-03-27 23:03:00 +01:00
|
|
|
__entry->call = call->debug_id;
|
2024-12-04 07:46:50 +00:00
|
|
|
__entry->ca_state = call->cong_ca_state;
|
2024-12-04 07:46:48 +00:00
|
|
|
__entry->hard_ack = call->acks_hard_ack;
|
2016-09-24 18:05:27 +01:00
|
|
|
__entry->top = call->tx_top;
|
|
|
|
__entry->lowest_nak = call->acks_lowest_nak;
|
2024-12-04 07:46:50 +00:00
|
|
|
__entry->nr_sacks = call->acks_nr_sacks;
|
|
|
|
__entry->nr_snacks = call->acks_nr_snacks;
|
|
|
|
__entry->cwnd = call->cong_cwnd;
|
|
|
|
__entry->ssthresh = call->cong_ssthresh;
|
|
|
|
__entry->cumul_acks = call->cong_cumul_acks;
|
|
|
|
__entry->dup_acks = call->cong_dup_acks;
|
2016-09-24 18:05:27 +01:00
|
|
|
memcpy(&__entry->sum, summary, sizeof(__entry->sum));
|
|
|
|
),
|
|
|
|
|
2024-12-04 07:46:51 +00:00
|
|
|
TP_printk("c=%08x r=%08x %s q=%08x %s cw=%u ss=%u A=%u+%u/%u+%u r=%u b=%u u=%u d=%u l=%x%s%s%s",
|
2016-09-24 18:05:27 +01:00
|
|
|
__entry->call,
|
2024-12-04 07:46:52 +00:00
|
|
|
__entry->sum.acked_serial,
|
2017-01-05 10:38:33 +00:00
|
|
|
__print_symbolic(__entry->sum.ack_reason, rxrpc_ack_names),
|
2016-09-24 18:05:27 +01:00
|
|
|
__entry->hard_ack,
|
2024-12-04 07:46:50 +00:00
|
|
|
__print_symbolic(__entry->ca_state, rxrpc_ca_states),
|
|
|
|
__entry->cwnd,
|
|
|
|
__entry->ssthresh,
|
2024-12-04 07:46:51 +00:00
|
|
|
__entry->nr_sacks, __entry->sum.nr_new_sacks,
|
|
|
|
__entry->nr_snacks, __entry->sum.nr_new_snacks,
|
|
|
|
__entry->sum.nr_new_hacks,
|
2016-09-24 18:05:27 +01:00
|
|
|
__entry->top - __entry->hard_ack,
|
2024-12-04 07:46:50 +00:00
|
|
|
__entry->cumul_acks,
|
|
|
|
__entry->dup_acks,
|
|
|
|
__entry->lowest_nak, __entry->sum.new_low_snack ? "!" : "",
|
|
|
|
__print_symbolic(__entry->sum.change, rxrpc_congest_changes),
|
2016-09-24 18:05:27 +01:00
|
|
|
__entry->sum.retrans_timeo ? " rTxTo" : "")
|
|
|
|
);
|
|
|
|
|
2022-11-11 13:47:35 +00:00
|
|
|
TRACE_EVENT(rxrpc_reset_cwnd,
|
2024-12-04 07:47:07 +00:00
|
|
|
TP_PROTO(struct rxrpc_call *call, ktime_t since_last_tx, ktime_t rtt),
|
2022-11-11 13:47:35 +00:00
|
|
|
|
2024-12-04 07:47:07 +00:00
|
|
|
TP_ARGS(call, since_last_tx, rtt),
|
2022-11-11 13:47:35 +00:00
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
2023-01-31 08:39:46 +00:00
|
|
|
__field(unsigned int, call)
|
2024-12-04 07:46:50 +00:00
|
|
|
__field(enum rxrpc_ca_state, ca_state)
|
2023-01-31 08:39:46 +00:00
|
|
|
__field(unsigned short, cwnd)
|
|
|
|
__field(unsigned short, extra)
|
|
|
|
__field(rxrpc_seq_t, hard_ack)
|
|
|
|
__field(rxrpc_seq_t, prepared)
|
|
|
|
__field(ktime_t, since_last_tx)
|
2024-12-04 07:47:07 +00:00
|
|
|
__field(ktime_t, rtt)
|
2023-01-31 08:39:46 +00:00
|
|
|
__field(bool, has_data)
|
2022-11-11 13:47:35 +00:00
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
|
|
|
__entry->call = call->debug_id;
|
2024-12-04 07:46:50 +00:00
|
|
|
__entry->ca_state = call->cong_ca_state;
|
2022-11-11 13:47:35 +00:00
|
|
|
__entry->cwnd = call->cong_cwnd;
|
|
|
|
__entry->extra = call->cong_extra;
|
2024-12-04 07:46:48 +00:00
|
|
|
__entry->hard_ack = call->acks_hard_ack;
|
2024-12-04 07:46:46 +00:00
|
|
|
__entry->prepared = call->send_top - call->tx_bottom;
|
2024-12-04 07:47:07 +00:00
|
|
|
__entry->since_last_tx = since_last_tx;
|
|
|
|
__entry->rtt = rtt;
|
2024-12-04 07:46:46 +00:00
|
|
|
__entry->has_data = call->tx_bottom != call->tx_top;
|
2022-11-11 13:47:35 +00:00
|
|
|
),
|
|
|
|
|
2024-12-04 07:47:07 +00:00
|
|
|
TP_printk("c=%08x q=%08x %s cw=%u+%u pr=%u tm=%llu/%llu d=%u",
|
2022-11-11 13:47:35 +00:00
|
|
|
__entry->call,
|
|
|
|
__entry->hard_ack,
|
2024-12-04 07:46:50 +00:00
|
|
|
__print_symbolic(__entry->ca_state, rxrpc_ca_states),
|
2022-11-11 13:47:35 +00:00
|
|
|
__entry->cwnd,
|
|
|
|
__entry->extra,
|
|
|
|
__entry->prepared,
|
2024-12-04 07:47:07 +00:00
|
|
|
ktime_to_us(__entry->since_last_tx),
|
|
|
|
ktime_to_us(__entry->rtt),
|
2022-11-11 13:47:35 +00:00
|
|
|
__entry->has_data)
|
|
|
|
);
|
|
|
|
|
2017-01-05 10:38:34 +00:00
|
|
|
TRACE_EVENT(rxrpc_disconnect_call,
|
|
|
|
TP_PROTO(struct rxrpc_call *call),
|
|
|
|
|
|
|
|
TP_ARGS(call),
|
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
2023-01-31 08:39:46 +00:00
|
|
|
__field(unsigned int, call)
|
|
|
|
__field(u32, abort_code)
|
2017-01-05 10:38:34 +00:00
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
2018-03-27 23:03:00 +01:00
|
|
|
__entry->call = call->debug_id;
|
2017-01-05 10:38:34 +00:00
|
|
|
__entry->abort_code = call->abort_code;
|
|
|
|
),
|
|
|
|
|
2018-03-27 23:03:00 +01:00
|
|
|
TP_printk("c=%08x ab=%08x",
|
2017-01-05 10:38:34 +00:00
|
|
|
__entry->call,
|
|
|
|
__entry->abort_code)
|
|
|
|
);
|
|
|
|
|
|
|
|
TRACE_EVENT(rxrpc_improper_term,
|
|
|
|
TP_PROTO(struct rxrpc_call *call),
|
|
|
|
|
|
|
|
TP_ARGS(call),
|
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
2023-01-31 08:39:46 +00:00
|
|
|
__field(unsigned int, call)
|
|
|
|
__field(u32, abort_code)
|
2017-01-05 10:38:34 +00:00
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
2018-03-27 23:03:00 +01:00
|
|
|
__entry->call = call->debug_id;
|
2017-01-05 10:38:34 +00:00
|
|
|
__entry->abort_code = call->abort_code;
|
|
|
|
),
|
|
|
|
|
2018-03-27 23:03:00 +01:00
|
|
|
TP_printk("c=%08x ab=%08x",
|
2017-01-05 10:38:34 +00:00
|
|
|
__entry->call,
|
|
|
|
__entry->abort_code)
|
|
|
|
);
|
|
|
|
|
2017-04-06 10:12:00 +01:00
|
|
|
TRACE_EVENT(rxrpc_connect_call,
|
|
|
|
TP_PROTO(struct rxrpc_call *call),
|
|
|
|
|
|
|
|
TP_ARGS(call),
|
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
2023-01-31 08:35:16 +00:00
|
|
|
__field(unsigned int, call)
|
|
|
|
__field(unsigned long, user_call_ID)
|
|
|
|
__field(u32, cid)
|
|
|
|
__field(u32, call_id)
|
|
|
|
__field_struct(struct sockaddr_rxrpc, srx)
|
2017-04-06 10:12:00 +01:00
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
2018-03-27 23:03:00 +01:00
|
|
|
__entry->call = call->debug_id;
|
2017-04-06 10:12:00 +01:00
|
|
|
__entry->user_call_ID = call->user_call_ID;
|
|
|
|
__entry->cid = call->cid;
|
|
|
|
__entry->call_id = call->call_id;
|
2020-01-23 13:13:41 +00:00
|
|
|
__entry->srx = call->dest_srx;
|
2017-04-06 10:12:00 +01:00
|
|
|
),
|
|
|
|
|
2020-01-23 13:13:41 +00:00
|
|
|
TP_printk("c=%08x u=%p %08x:%08x dst=%pISp",
|
2017-04-06 10:12:00 +01:00
|
|
|
__entry->call,
|
|
|
|
(void *)__entry->user_call_ID,
|
|
|
|
__entry->cid,
|
2020-01-23 13:13:41 +00:00
|
|
|
__entry->call_id,
|
|
|
|
&__entry->srx.transport)
|
2017-04-06 10:12:00 +01:00
|
|
|
);
|
|
|
|
|
2024-12-04 07:46:51 +00:00
|
|
|
TRACE_EVENT(rxrpc_apply_acks,
|
|
|
|
TP_PROTO(struct rxrpc_call *call, struct rxrpc_txqueue *tq),
|
|
|
|
|
|
|
|
TP_ARGS(call, tq),
|
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
|
|
|
__field(unsigned int, call)
|
|
|
|
__field(unsigned int, nr_rep)
|
|
|
|
__field(rxrpc_seq_t, qbase)
|
|
|
|
__field(unsigned long, acks)
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
|
|
|
__entry->call = call->debug_id;
|
|
|
|
__entry->qbase = tq->qbase;
|
|
|
|
__entry->acks = tq->segment_acked;
|
|
|
|
__entry->nr_rep = tq->nr_reported_acks;
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_printk("c=%08x tq=%x acks=%016lx rep=%u",
|
|
|
|
__entry->call,
|
|
|
|
__entry->qbase,
|
|
|
|
__entry->acks,
|
|
|
|
__entry->nr_rep)
|
|
|
|
);
|
|
|
|
|
2018-03-27 23:02:47 +01:00
|
|
|
TRACE_EVENT(rxrpc_resend,
|
2024-12-04 07:46:51 +00:00
|
|
|
TP_PROTO(struct rxrpc_call *call, rxrpc_serial_t ack_serial),
|
2018-03-27 23:02:47 +01:00
|
|
|
|
2024-12-04 07:46:51 +00:00
|
|
|
TP_ARGS(call, ack_serial),
|
2018-03-27 23:02:47 +01:00
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
2023-01-31 08:39:46 +00:00
|
|
|
__field(unsigned int, call)
|
|
|
|
__field(rxrpc_seq_t, seq)
|
|
|
|
__field(rxrpc_seq_t, transmitted)
|
|
|
|
__field(rxrpc_serial_t, ack_serial)
|
2018-03-27 23:02:47 +01:00
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
2018-03-27 23:03:00 +01:00
|
|
|
__entry->call = call->debug_id;
|
2024-12-04 07:46:48 +00:00
|
|
|
__entry->seq = call->acks_hard_ack;
|
2020-01-23 13:13:41 +00:00
|
|
|
__entry->transmitted = call->tx_transmitted;
|
2024-12-04 07:46:51 +00:00
|
|
|
__entry->ack_serial = ack_serial;
|
2018-03-27 23:02:47 +01:00
|
|
|
),
|
|
|
|
|
2020-01-23 13:13:41 +00:00
|
|
|
TP_printk("c=%08x r=%x q=%x tq=%x",
|
2018-03-27 23:02:47 +01:00
|
|
|
__entry->call,
|
2020-01-23 13:13:41 +00:00
|
|
|
__entry->ack_serial,
|
|
|
|
__entry->seq,
|
|
|
|
__entry->transmitted)
|
2018-03-27 23:02:47 +01:00
|
|
|
);
|
|
|
|
|
2024-12-04 07:47:07 +00:00
|
|
|
TRACE_EVENT(rxrpc_resend_lost,
|
|
|
|
TP_PROTO(struct rxrpc_call *call, struct rxrpc_txqueue *tq, unsigned long lost),
|
|
|
|
|
|
|
|
TP_ARGS(call, tq, lost),
|
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
|
|
|
__field(unsigned int, call)
|
|
|
|
__field(rxrpc_seq_t, qbase)
|
|
|
|
__field(u8, nr_rep)
|
|
|
|
__field(unsigned long, lost)
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
|
|
|
__entry->call = call->debug_id;
|
|
|
|
__entry->qbase = tq->qbase;
|
|
|
|
__entry->nr_rep = tq->nr_reported_acks;
|
|
|
|
__entry->lost = lost;
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_printk("c=%08x tq=%x lost=%016lx nr=%u",
|
|
|
|
__entry->call,
|
|
|
|
__entry->qbase,
|
|
|
|
__entry->lost,
|
|
|
|
__entry->nr_rep)
|
|
|
|
);
|
|
|
|
|
2024-12-04 07:46:51 +00:00
|
|
|
TRACE_EVENT(rxrpc_rotate,
|
|
|
|
TP_PROTO(struct rxrpc_call *call, struct rxrpc_txqueue *tq,
|
|
|
|
struct rxrpc_ack_summary *summary, rxrpc_seq_t seq,
|
|
|
|
enum rxrpc_rotate_trace trace),
|
|
|
|
|
|
|
|
TP_ARGS(call, tq, summary, seq, trace),
|
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
|
|
|
__field(unsigned int, call)
|
|
|
|
__field(rxrpc_seq_t, qbase)
|
|
|
|
__field(rxrpc_seq_t, seq)
|
|
|
|
__field(unsigned int, nr_rep)
|
|
|
|
__field(enum rxrpc_rotate_trace, trace)
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
|
|
|
__entry->call = call->debug_id;
|
|
|
|
__entry->qbase = tq->qbase;
|
|
|
|
__entry->seq = seq;
|
|
|
|
__entry->nr_rep = tq->nr_reported_acks;
|
|
|
|
__entry->trace = trace;
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_printk("c=%08x tq=%x q=%x nr=%x %s",
|
|
|
|
__entry->call,
|
|
|
|
__entry->qbase,
|
|
|
|
__entry->seq,
|
|
|
|
__entry->nr_rep,
|
|
|
|
__print_symbolic(__entry->trace, rxrpc_rotate_traces))
|
|
|
|
);
|
|
|
|
|
2018-05-10 23:26:01 +01:00
|
|
|
TRACE_EVENT(rxrpc_rx_icmp,
|
|
|
|
TP_PROTO(struct rxrpc_peer *peer, struct sock_extended_err *ee,
|
|
|
|
struct sockaddr_rxrpc *srx),
|
|
|
|
|
|
|
|
TP_ARGS(peer, ee, srx),
|
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
2023-01-31 08:35:16 +00:00
|
|
|
__field(unsigned int, peer)
|
|
|
|
__field_struct(struct sock_extended_err, ee)
|
|
|
|
__field_struct(struct sockaddr_rxrpc, srx)
|
2018-05-10 23:26:01 +01:00
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
|
|
|
__entry->peer = peer->debug_id;
|
|
|
|
memcpy(&__entry->ee, ee, sizeof(__entry->ee));
|
|
|
|
memcpy(&__entry->srx, srx, sizeof(__entry->srx));
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_printk("P=%08x o=%u t=%u c=%u i=%u d=%u e=%d %pISp",
|
|
|
|
__entry->peer,
|
|
|
|
__entry->ee.ee_origin,
|
|
|
|
__entry->ee.ee_type,
|
|
|
|
__entry->ee.ee_code,
|
|
|
|
__entry->ee.ee_info,
|
|
|
|
__entry->ee.ee_data,
|
|
|
|
__entry->ee.ee_errno,
|
|
|
|
&__entry->srx.transport)
|
|
|
|
);
|
|
|
|
|
2018-05-10 23:26:01 +01:00
|
|
|
TRACE_EVENT(rxrpc_tx_fail,
|
|
|
|
TP_PROTO(unsigned int debug_id, rxrpc_serial_t serial, int ret,
|
2018-07-23 17:18:37 +01:00
|
|
|
enum rxrpc_tx_point where),
|
2018-05-10 23:26:01 +01:00
|
|
|
|
2018-07-23 17:18:37 +01:00
|
|
|
TP_ARGS(debug_id, serial, ret, where),
|
2018-05-10 23:26:01 +01:00
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
2023-01-31 08:35:16 +00:00
|
|
|
__field(unsigned int, debug_id)
|
|
|
|
__field(rxrpc_serial_t, serial)
|
|
|
|
__field(int, ret)
|
|
|
|
__field(enum rxrpc_tx_point, where)
|
2018-05-10 23:26:01 +01:00
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
|
|
|
__entry->debug_id = debug_id;
|
|
|
|
__entry->serial = serial;
|
|
|
|
__entry->ret = ret;
|
2018-07-23 17:18:37 +01:00
|
|
|
__entry->where = where;
|
2018-05-10 23:26:01 +01:00
|
|
|
),
|
|
|
|
|
|
|
|
TP_printk("c=%08x r=%x ret=%d %s",
|
|
|
|
__entry->debug_id,
|
|
|
|
__entry->serial,
|
|
|
|
__entry->ret,
|
2018-07-23 17:18:37 +01:00
|
|
|
__print_symbolic(__entry->where, rxrpc_tx_points))
|
2018-05-10 23:26:01 +01:00
|
|
|
);
|
|
|
|
|
rxrpc: Fix handling of call quietly cancelled out on server
Sometimes an in-progress call will stop responding on the fileserver when
the fileserver quietly cancels the call with an internally marked abort
(RX_CALL_DEAD), without sending an ABORT to the client.
This causes the client's call to eventually expire from lack of incoming
packets directed its way, which currently leads to it being cancelled
locally with ETIME. Note that it's not currently clear as to why this
happens as it's really hard to reproduce.
The rotation policy implement by kAFS, however, doesn't differentiate
between ETIME meaning we didn't get any response from the server and ETIME
meaning the call got cancelled mid-flow. The latter leads to an oops when
fetching data as the rotation partially resets the afs_read descriptor,
which can result in a cleared page pointer being dereferenced because that
page has already been filled.
Handle this by the following means:
(1) Set a flag on a call when we receive a packet for it.
(2) Store the highest packet serial number so far received for a call
(bearing in mind this may wrap).
(3) If, when the "not received anything recently" timeout expires on a
call, we've received at least one packet for a call and the connection
as a whole has received packets more recently than that call, then
cancel the call locally with ECONNRESET rather than ETIME.
This indicates that the call was definitely in progress on the server.
(4) In kAFS, if the rotation algorithm sees ECONNRESET rather than ETIME,
don't try the next server, but rather abort the call.
This avoids the oops as we don't try to reuse the afs_read struct.
Rather, as-yet ungotten pages will be reread at a later data.
Also:
(5) Add an rxrpc tracepoint to log detection of the call being reset.
Without this, I occasionally see an oops like the following:
general protection fault: 0000 [#1] SMP PTI
...
RIP: 0010:_copy_to_iter+0x204/0x310
RSP: 0018:ffff8800cae0f828 EFLAGS: 00010206
RAX: 0000000000000560 RBX: 0000000000000560 RCX: 0000000000000560
RDX: ffff8800cae0f968 RSI: ffff8800d58b3312 RDI: 0005080000000000
RBP: ffff8800cae0f968 R08: 0000000000000560 R09: ffff8800ca00f400
R10: ffff8800c36f28d4 R11: 00000000000008c4 R12: ffff8800cae0f958
R13: 0000000000000560 R14: ffff8800d58b3312 R15: 0000000000000560
FS: 00007fdaef108080(0000) GS:ffff8800ca680000(0000) knlGS:0000000000000000
CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
CR2: 00007fb28a8fa000 CR3: 00000000d2a76002 CR4: 00000000001606e0
Call Trace:
skb_copy_datagram_iter+0x14e/0x289
rxrpc_recvmsg_data.isra.0+0x6f3/0xf68
? trace_buffer_unlock_commit_regs+0x4f/0x89
rxrpc_kernel_recv_data+0x149/0x421
afs_extract_data+0x1e0/0x798
? afs_wait_for_call_to_complete+0xc9/0x52e
afs_deliver_fs_fetch_data+0x33a/0x5ab
afs_deliver_to_call+0x1ee/0x5e0
? afs_wait_for_call_to_complete+0xc9/0x52e
afs_wait_for_call_to_complete+0x12b/0x52e
? wake_up_q+0x54/0x54
afs_make_call+0x287/0x462
? afs_fs_fetch_data+0x3e6/0x3ed
? rcu_read_lock_sched_held+0x5d/0x63
afs_fs_fetch_data+0x3e6/0x3ed
afs_fetch_data+0xbb/0x14a
afs_readpages+0x317/0x40d
__do_page_cache_readahead+0x203/0x2ba
? ondemand_readahead+0x3a7/0x3c1
ondemand_readahead+0x3a7/0x3c1
generic_file_buffered_read+0x18b/0x62f
__vfs_read+0xdb/0xfe
vfs_read+0xb2/0x137
ksys_read+0x50/0x8c
do_syscall_64+0x7d/0x1a0
entry_SYSCALL_64_after_hwframe+0x49/0xbe
Note the weird value in RDI which is a result of trying to kmap() a NULL
page pointer.
Signed-off-by: David Howells <dhowells@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-06-03 02:17:39 +01:00
|
|
|
TRACE_EVENT(rxrpc_call_reset,
|
|
|
|
TP_PROTO(struct rxrpc_call *call),
|
|
|
|
|
|
|
|
TP_ARGS(call),
|
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
2023-01-31 08:39:46 +00:00
|
|
|
__field(unsigned int, debug_id)
|
|
|
|
__field(u32, cid)
|
|
|
|
__field(u32, call_id)
|
|
|
|
__field(rxrpc_serial_t, call_serial)
|
|
|
|
__field(rxrpc_serial_t, conn_serial)
|
|
|
|
__field(rxrpc_seq_t, tx_seq)
|
|
|
|
__field(rxrpc_seq_t, rx_seq)
|
rxrpc: Fix handling of call quietly cancelled out on server
Sometimes an in-progress call will stop responding on the fileserver when
the fileserver quietly cancels the call with an internally marked abort
(RX_CALL_DEAD), without sending an ABORT to the client.
This causes the client's call to eventually expire from lack of incoming
packets directed its way, which currently leads to it being cancelled
locally with ETIME. Note that it's not currently clear as to why this
happens as it's really hard to reproduce.
The rotation policy implement by kAFS, however, doesn't differentiate
between ETIME meaning we didn't get any response from the server and ETIME
meaning the call got cancelled mid-flow. The latter leads to an oops when
fetching data as the rotation partially resets the afs_read descriptor,
which can result in a cleared page pointer being dereferenced because that
page has already been filled.
Handle this by the following means:
(1) Set a flag on a call when we receive a packet for it.
(2) Store the highest packet serial number so far received for a call
(bearing in mind this may wrap).
(3) If, when the "not received anything recently" timeout expires on a
call, we've received at least one packet for a call and the connection
as a whole has received packets more recently than that call, then
cancel the call locally with ECONNRESET rather than ETIME.
This indicates that the call was definitely in progress on the server.
(4) In kAFS, if the rotation algorithm sees ECONNRESET rather than ETIME,
don't try the next server, but rather abort the call.
This avoids the oops as we don't try to reuse the afs_read struct.
Rather, as-yet ungotten pages will be reread at a later data.
Also:
(5) Add an rxrpc tracepoint to log detection of the call being reset.
Without this, I occasionally see an oops like the following:
general protection fault: 0000 [#1] SMP PTI
...
RIP: 0010:_copy_to_iter+0x204/0x310
RSP: 0018:ffff8800cae0f828 EFLAGS: 00010206
RAX: 0000000000000560 RBX: 0000000000000560 RCX: 0000000000000560
RDX: ffff8800cae0f968 RSI: ffff8800d58b3312 RDI: 0005080000000000
RBP: ffff8800cae0f968 R08: 0000000000000560 R09: ffff8800ca00f400
R10: ffff8800c36f28d4 R11: 00000000000008c4 R12: ffff8800cae0f958
R13: 0000000000000560 R14: ffff8800d58b3312 R15: 0000000000000560
FS: 00007fdaef108080(0000) GS:ffff8800ca680000(0000) knlGS:0000000000000000
CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
CR2: 00007fb28a8fa000 CR3: 00000000d2a76002 CR4: 00000000001606e0
Call Trace:
skb_copy_datagram_iter+0x14e/0x289
rxrpc_recvmsg_data.isra.0+0x6f3/0xf68
? trace_buffer_unlock_commit_regs+0x4f/0x89
rxrpc_kernel_recv_data+0x149/0x421
afs_extract_data+0x1e0/0x798
? afs_wait_for_call_to_complete+0xc9/0x52e
afs_deliver_fs_fetch_data+0x33a/0x5ab
afs_deliver_to_call+0x1ee/0x5e0
? afs_wait_for_call_to_complete+0xc9/0x52e
afs_wait_for_call_to_complete+0x12b/0x52e
? wake_up_q+0x54/0x54
afs_make_call+0x287/0x462
? afs_fs_fetch_data+0x3e6/0x3ed
? rcu_read_lock_sched_held+0x5d/0x63
afs_fs_fetch_data+0x3e6/0x3ed
afs_fetch_data+0xbb/0x14a
afs_readpages+0x317/0x40d
__do_page_cache_readahead+0x203/0x2ba
? ondemand_readahead+0x3a7/0x3c1
ondemand_readahead+0x3a7/0x3c1
generic_file_buffered_read+0x18b/0x62f
__vfs_read+0xdb/0xfe
vfs_read+0xb2/0x137
ksys_read+0x50/0x8c
do_syscall_64+0x7d/0x1a0
entry_SYSCALL_64_after_hwframe+0x49/0xbe
Note the weird value in RDI which is a result of trying to kmap() a NULL
page pointer.
Signed-off-by: David Howells <dhowells@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-06-03 02:17:39 +01:00
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
|
|
|
__entry->debug_id = call->debug_id;
|
|
|
|
__entry->cid = call->cid;
|
|
|
|
__entry->call_id = call->call_id;
|
|
|
|
__entry->call_serial = call->rx_serial;
|
|
|
|
__entry->conn_serial = call->conn->hi_serial;
|
2024-12-04 07:46:48 +00:00
|
|
|
__entry->tx_seq = call->acks_hard_ack;
|
2022-08-27 14:27:56 +01:00
|
|
|
__entry->rx_seq = call->rx_highest_seq;
|
rxrpc: Fix handling of call quietly cancelled out on server
Sometimes an in-progress call will stop responding on the fileserver when
the fileserver quietly cancels the call with an internally marked abort
(RX_CALL_DEAD), without sending an ABORT to the client.
This causes the client's call to eventually expire from lack of incoming
packets directed its way, which currently leads to it being cancelled
locally with ETIME. Note that it's not currently clear as to why this
happens as it's really hard to reproduce.
The rotation policy implement by kAFS, however, doesn't differentiate
between ETIME meaning we didn't get any response from the server and ETIME
meaning the call got cancelled mid-flow. The latter leads to an oops when
fetching data as the rotation partially resets the afs_read descriptor,
which can result in a cleared page pointer being dereferenced because that
page has already been filled.
Handle this by the following means:
(1) Set a flag on a call when we receive a packet for it.
(2) Store the highest packet serial number so far received for a call
(bearing in mind this may wrap).
(3) If, when the "not received anything recently" timeout expires on a
call, we've received at least one packet for a call and the connection
as a whole has received packets more recently than that call, then
cancel the call locally with ECONNRESET rather than ETIME.
This indicates that the call was definitely in progress on the server.
(4) In kAFS, if the rotation algorithm sees ECONNRESET rather than ETIME,
don't try the next server, but rather abort the call.
This avoids the oops as we don't try to reuse the afs_read struct.
Rather, as-yet ungotten pages will be reread at a later data.
Also:
(5) Add an rxrpc tracepoint to log detection of the call being reset.
Without this, I occasionally see an oops like the following:
general protection fault: 0000 [#1] SMP PTI
...
RIP: 0010:_copy_to_iter+0x204/0x310
RSP: 0018:ffff8800cae0f828 EFLAGS: 00010206
RAX: 0000000000000560 RBX: 0000000000000560 RCX: 0000000000000560
RDX: ffff8800cae0f968 RSI: ffff8800d58b3312 RDI: 0005080000000000
RBP: ffff8800cae0f968 R08: 0000000000000560 R09: ffff8800ca00f400
R10: ffff8800c36f28d4 R11: 00000000000008c4 R12: ffff8800cae0f958
R13: 0000000000000560 R14: ffff8800d58b3312 R15: 0000000000000560
FS: 00007fdaef108080(0000) GS:ffff8800ca680000(0000) knlGS:0000000000000000
CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
CR2: 00007fb28a8fa000 CR3: 00000000d2a76002 CR4: 00000000001606e0
Call Trace:
skb_copy_datagram_iter+0x14e/0x289
rxrpc_recvmsg_data.isra.0+0x6f3/0xf68
? trace_buffer_unlock_commit_regs+0x4f/0x89
rxrpc_kernel_recv_data+0x149/0x421
afs_extract_data+0x1e0/0x798
? afs_wait_for_call_to_complete+0xc9/0x52e
afs_deliver_fs_fetch_data+0x33a/0x5ab
afs_deliver_to_call+0x1ee/0x5e0
? afs_wait_for_call_to_complete+0xc9/0x52e
afs_wait_for_call_to_complete+0x12b/0x52e
? wake_up_q+0x54/0x54
afs_make_call+0x287/0x462
? afs_fs_fetch_data+0x3e6/0x3ed
? rcu_read_lock_sched_held+0x5d/0x63
afs_fs_fetch_data+0x3e6/0x3ed
afs_fetch_data+0xbb/0x14a
afs_readpages+0x317/0x40d
__do_page_cache_readahead+0x203/0x2ba
? ondemand_readahead+0x3a7/0x3c1
ondemand_readahead+0x3a7/0x3c1
generic_file_buffered_read+0x18b/0x62f
__vfs_read+0xdb/0xfe
vfs_read+0xb2/0x137
ksys_read+0x50/0x8c
do_syscall_64+0x7d/0x1a0
entry_SYSCALL_64_after_hwframe+0x49/0xbe
Note the weird value in RDI which is a result of trying to kmap() a NULL
page pointer.
Signed-off-by: David Howells <dhowells@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-06-03 02:17:39 +01:00
|
|
|
),
|
|
|
|
|
|
|
|
TP_printk("c=%08x %08x:%08x r=%08x/%08x tx=%08x rx=%08x",
|
|
|
|
__entry->debug_id,
|
|
|
|
__entry->cid, __entry->call_id,
|
|
|
|
__entry->call_serial, __entry->conn_serial,
|
|
|
|
__entry->tx_seq, __entry->rx_seq)
|
|
|
|
);
|
|
|
|
|
2018-07-23 17:18:37 +01:00
|
|
|
TRACE_EVENT(rxrpc_notify_socket,
|
|
|
|
TP_PROTO(unsigned int debug_id, rxrpc_serial_t serial),
|
|
|
|
|
|
|
|
TP_ARGS(debug_id, serial),
|
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
2023-01-31 08:39:46 +00:00
|
|
|
__field(unsigned int, debug_id)
|
|
|
|
__field(rxrpc_serial_t, serial)
|
2018-07-23 17:18:37 +01:00
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
|
|
|
__entry->debug_id = debug_id;
|
|
|
|
__entry->serial = serial;
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_printk("c=%08x r=%08x",
|
|
|
|
__entry->debug_id,
|
|
|
|
__entry->serial)
|
|
|
|
);
|
|
|
|
|
2020-04-28 22:06:54 +01:00
|
|
|
TRACE_EVENT(rxrpc_rx_discard_ack,
|
2024-12-04 07:46:48 +00:00
|
|
|
TP_PROTO(struct rxrpc_call *call, rxrpc_serial_t serial,
|
|
|
|
rxrpc_seq_t hard_ack, rxrpc_seq_t prev_pkt),
|
2020-04-28 22:06:54 +01:00
|
|
|
|
2024-12-04 07:46:48 +00:00
|
|
|
TP_ARGS(call, serial, hard_ack, prev_pkt),
|
2020-04-28 22:06:54 +01:00
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
2023-01-31 08:35:16 +00:00
|
|
|
__field(unsigned int, debug_id)
|
|
|
|
__field(rxrpc_serial_t, serial)
|
2024-12-04 07:46:48 +00:00
|
|
|
__field(rxrpc_seq_t, hard_ack)
|
2020-04-28 22:06:54 +01:00
|
|
|
__field(rxrpc_seq_t, prev_pkt)
|
2024-12-04 07:46:48 +00:00
|
|
|
__field(rxrpc_seq_t, acks_hard_ack)
|
|
|
|
__field(rxrpc_seq_t, acks_prev_seq)
|
2020-04-28 22:06:54 +01:00
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
2024-12-04 07:46:48 +00:00
|
|
|
__entry->debug_id = call->debug_id;
|
2020-04-28 22:06:54 +01:00
|
|
|
__entry->serial = serial;
|
2024-12-04 07:46:48 +00:00
|
|
|
__entry->hard_ack = hard_ack;
|
2020-04-28 22:06:54 +01:00
|
|
|
__entry->prev_pkt = prev_pkt;
|
2024-12-04 07:46:48 +00:00
|
|
|
__entry->acks_hard_ack = call->acks_hard_ack;
|
|
|
|
__entry->acks_prev_seq = call->acks_prev_seq;
|
2020-04-28 22:06:54 +01:00
|
|
|
),
|
|
|
|
|
|
|
|
TP_printk("c=%08x r=%08x %08x<%08x %08x<%08x",
|
|
|
|
__entry->debug_id,
|
|
|
|
__entry->serial,
|
2024-12-04 07:46:48 +00:00
|
|
|
__entry->hard_ack,
|
|
|
|
__entry->acks_hard_ack,
|
2020-04-28 22:06:54 +01:00
|
|
|
__entry->prev_pkt,
|
2024-12-04 07:46:48 +00:00
|
|
|
__entry->acks_prev_seq)
|
2020-04-28 22:06:54 +01:00
|
|
|
);
|
|
|
|
|
2022-04-05 21:48:48 +01:00
|
|
|
TRACE_EVENT(rxrpc_req_ack,
|
|
|
|
TP_PROTO(unsigned int call_debug_id, rxrpc_seq_t seq,
|
|
|
|
enum rxrpc_req_ack_trace why),
|
|
|
|
|
|
|
|
TP_ARGS(call_debug_id, seq, why),
|
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
2023-01-31 08:35:16 +00:00
|
|
|
__field(unsigned int, call_debug_id)
|
|
|
|
__field(rxrpc_seq_t, seq)
|
|
|
|
__field(enum rxrpc_req_ack_trace, why)
|
2022-04-05 21:48:48 +01:00
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
|
|
|
__entry->call_debug_id = call_debug_id;
|
|
|
|
__entry->seq = seq;
|
|
|
|
__entry->why = why;
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_printk("c=%08x q=%08x REQ-%s",
|
|
|
|
__entry->call_debug_id,
|
|
|
|
__entry->seq,
|
|
|
|
__print_symbolic(__entry->why, rxrpc_req_ack_traces))
|
|
|
|
);
|
|
|
|
|
2022-04-05 21:16:32 +01:00
|
|
|
TRACE_EVENT(rxrpc_txbuf,
|
|
|
|
TP_PROTO(unsigned int debug_id,
|
|
|
|
unsigned int call_debug_id, rxrpc_seq_t seq,
|
|
|
|
int ref, enum rxrpc_txbuf_trace what),
|
|
|
|
|
|
|
|
TP_ARGS(debug_id, call_debug_id, seq, ref, what),
|
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
2023-01-31 08:35:16 +00:00
|
|
|
__field(unsigned int, debug_id)
|
|
|
|
__field(unsigned int, call_debug_id)
|
|
|
|
__field(rxrpc_seq_t, seq)
|
|
|
|
__field(int, ref)
|
|
|
|
__field(enum rxrpc_txbuf_trace, what)
|
2022-04-05 21:16:32 +01:00
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
|
|
|
__entry->debug_id = debug_id;
|
|
|
|
__entry->call_debug_id = call_debug_id;
|
|
|
|
__entry->seq = seq;
|
|
|
|
__entry->ref = ref;
|
|
|
|
__entry->what = what;
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_printk("B=%08x c=%08x q=%08x %s r=%d",
|
|
|
|
__entry->debug_id,
|
|
|
|
__entry->call_debug_id,
|
|
|
|
__entry->seq,
|
|
|
|
__print_symbolic(__entry->what, rxrpc_txbuf_traces),
|
|
|
|
__entry->ref)
|
|
|
|
);
|
|
|
|
|
2024-12-04 07:46:46 +00:00
|
|
|
TRACE_EVENT(rxrpc_tq,
|
|
|
|
TP_PROTO(struct rxrpc_call *call, struct rxrpc_txqueue *tq,
|
|
|
|
rxrpc_seq_t seq, enum rxrpc_tq_trace trace),
|
|
|
|
|
|
|
|
TP_ARGS(call, tq, seq, trace),
|
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
|
|
|
__field(unsigned int, call_debug_id)
|
|
|
|
__field(rxrpc_seq_t, qbase)
|
|
|
|
__field(rxrpc_seq_t, seq)
|
|
|
|
__field(enum rxrpc_tq_trace, trace)
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
|
|
|
__entry->call_debug_id = call->debug_id;
|
|
|
|
__entry->qbase = tq ? tq->qbase : call->tx_qbase;
|
|
|
|
__entry->seq = seq;
|
|
|
|
__entry->trace = trace;
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_printk("c=%08x bq=%08x q=%08x %s",
|
|
|
|
__entry->call_debug_id,
|
|
|
|
__entry->qbase,
|
|
|
|
__entry->seq,
|
|
|
|
__print_symbolic(__entry->trace, rxrpc_tq_traces))
|
|
|
|
);
|
|
|
|
|
2022-10-10 15:51:39 +01:00
|
|
|
TRACE_EVENT(rxrpc_poke_call,
|
|
|
|
TP_PROTO(struct rxrpc_call *call, bool busy,
|
|
|
|
enum rxrpc_call_poke_trace what),
|
|
|
|
|
|
|
|
TP_ARGS(call, busy, what),
|
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
2023-01-31 08:35:16 +00:00
|
|
|
__field(unsigned int, call_debug_id)
|
|
|
|
__field(bool, busy)
|
|
|
|
__field(enum rxrpc_call_poke_trace, what)
|
2022-10-10 15:51:39 +01:00
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
|
|
|
__entry->call_debug_id = call->debug_id;
|
|
|
|
__entry->busy = busy;
|
|
|
|
__entry->what = what;
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_printk("c=%08x %s%s",
|
|
|
|
__entry->call_debug_id,
|
|
|
|
__print_symbolic(__entry->what, rxrpc_call_poke_traces),
|
|
|
|
__entry->busy ? "!" : "")
|
|
|
|
);
|
|
|
|
|
|
|
|
TRACE_EVENT(rxrpc_call_poked,
|
|
|
|
TP_PROTO(struct rxrpc_call *call),
|
|
|
|
|
|
|
|
TP_ARGS(call),
|
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
2023-01-31 08:39:46 +00:00
|
|
|
__field(unsigned int, call_debug_id)
|
2022-10-10 15:51:39 +01:00
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
|
|
|
__entry->call_debug_id = call->debug_id;
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_printk("c=%08x",
|
|
|
|
__entry->call_debug_id)
|
|
|
|
);
|
|
|
|
|
2022-10-16 08:01:32 +01:00
|
|
|
TRACE_EVENT(rxrpc_sack,
|
|
|
|
TP_PROTO(struct rxrpc_call *call, rxrpc_seq_t seq,
|
|
|
|
unsigned int sack, enum rxrpc_sack_trace what),
|
|
|
|
|
|
|
|
TP_ARGS(call, seq, sack, what),
|
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
|
|
|
__field(unsigned int, call_debug_id)
|
|
|
|
__field(rxrpc_seq_t, seq)
|
|
|
|
__field(unsigned int, sack)
|
|
|
|
__field(enum rxrpc_sack_trace, what)
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
|
|
|
__entry->call_debug_id = call->debug_id;
|
|
|
|
__entry->seq = seq;
|
|
|
|
__entry->sack = sack;
|
|
|
|
__entry->what = what;
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_printk("c=%08x q=%08x %s k=%x",
|
|
|
|
__entry->call_debug_id,
|
|
|
|
__entry->seq,
|
|
|
|
__print_symbolic(__entry->what, rxrpc_sack_traces),
|
|
|
|
__entry->sack)
|
|
|
|
);
|
|
|
|
|
2024-12-04 07:46:37 +00:00
|
|
|
TRACE_EVENT(rxrpc_pmtud_tx,
|
|
|
|
TP_PROTO(struct rxrpc_call *call),
|
|
|
|
|
|
|
|
TP_ARGS(call),
|
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
|
|
|
__field(unsigned int, peer_debug_id)
|
|
|
|
__field(unsigned int, call_debug_id)
|
|
|
|
__field(rxrpc_serial_t, ping_serial)
|
|
|
|
__field(unsigned short, pmtud_trial)
|
|
|
|
__field(unsigned short, pmtud_good)
|
|
|
|
__field(unsigned short, pmtud_bad)
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
|
|
|
__entry->peer_debug_id = call->peer->debug_id;
|
|
|
|
__entry->call_debug_id = call->debug_id;
|
|
|
|
__entry->ping_serial = call->conn->pmtud_probe;
|
|
|
|
__entry->pmtud_trial = call->peer->pmtud_trial;
|
|
|
|
__entry->pmtud_good = call->peer->pmtud_good;
|
|
|
|
__entry->pmtud_bad = call->peer->pmtud_bad;
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_printk("P=%08x c=%08x pr=%08x %u-%u-%u",
|
|
|
|
__entry->peer_debug_id,
|
|
|
|
__entry->call_debug_id,
|
|
|
|
__entry->ping_serial,
|
|
|
|
__entry->pmtud_good,
|
|
|
|
__entry->pmtud_trial,
|
|
|
|
__entry->pmtud_bad)
|
|
|
|
);
|
|
|
|
|
|
|
|
TRACE_EVENT(rxrpc_pmtud_rx,
|
|
|
|
TP_PROTO(struct rxrpc_connection *conn, rxrpc_serial_t resp_serial),
|
|
|
|
|
|
|
|
TP_ARGS(conn, resp_serial),
|
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
|
|
|
__field(unsigned int, peer_debug_id)
|
|
|
|
__field(unsigned int, call_debug_id)
|
|
|
|
__field(rxrpc_serial_t, ping_serial)
|
|
|
|
__field(rxrpc_serial_t, resp_serial)
|
|
|
|
__field(unsigned short, max_data)
|
|
|
|
__field(u8, jumbo_max)
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
|
|
|
__entry->peer_debug_id = conn->peer->debug_id;
|
|
|
|
__entry->call_debug_id = conn->pmtud_call;
|
|
|
|
__entry->ping_serial = conn->pmtud_probe;
|
|
|
|
__entry->resp_serial = resp_serial;
|
|
|
|
__entry->max_data = conn->peer->max_data;
|
|
|
|
__entry->jumbo_max = conn->peer->pmtud_jumbo;
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_printk("P=%08x c=%08x pr=%08x rr=%08x max=%u jm=%u",
|
|
|
|
__entry->peer_debug_id,
|
|
|
|
__entry->call_debug_id,
|
|
|
|
__entry->ping_serial,
|
|
|
|
__entry->resp_serial,
|
|
|
|
__entry->max_data,
|
|
|
|
__entry->jumbo_max)
|
|
|
|
);
|
|
|
|
|
|
|
|
TRACE_EVENT(rxrpc_pmtud_lost,
|
|
|
|
TP_PROTO(struct rxrpc_connection *conn, rxrpc_serial_t resp_serial),
|
|
|
|
|
|
|
|
TP_ARGS(conn, resp_serial),
|
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
|
|
|
__field(unsigned int, peer_debug_id)
|
|
|
|
__field(unsigned int, call_debug_id)
|
|
|
|
__field(rxrpc_serial_t, ping_serial)
|
|
|
|
__field(rxrpc_serial_t, resp_serial)
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
|
|
|
__entry->peer_debug_id = conn->peer->debug_id;
|
|
|
|
__entry->call_debug_id = conn->pmtud_call;
|
|
|
|
__entry->ping_serial = conn->pmtud_probe;
|
|
|
|
__entry->resp_serial = resp_serial;
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_printk("P=%08x c=%08x pr=%08x rr=%08x",
|
|
|
|
__entry->peer_debug_id,
|
|
|
|
__entry->call_debug_id,
|
|
|
|
__entry->ping_serial,
|
|
|
|
__entry->resp_serial)
|
|
|
|
);
|
|
|
|
|
|
|
|
TRACE_EVENT(rxrpc_pmtud_reduce,
|
|
|
|
TP_PROTO(struct rxrpc_peer *peer, rxrpc_serial_t serial,
|
|
|
|
unsigned int max_data, enum rxrpc_pmtud_reduce_trace reason),
|
|
|
|
|
|
|
|
TP_ARGS(peer, serial, max_data, reason),
|
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
|
|
|
__field(unsigned int, peer_debug_id)
|
|
|
|
__field(rxrpc_serial_t, serial)
|
|
|
|
__field(unsigned int, max_data)
|
|
|
|
__field(enum rxrpc_pmtud_reduce_trace, reason)
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
|
|
|
__entry->peer_debug_id = peer->debug_id;
|
|
|
|
__entry->serial = serial;
|
|
|
|
__entry->max_data = max_data;
|
|
|
|
__entry->reason = reason;
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_printk("P=%08x %s r=%08x m=%u",
|
|
|
|
__entry->peer_debug_id,
|
|
|
|
__print_symbolic(__entry->reason, rxrpc_pmtud_reduce_traces),
|
|
|
|
__entry->serial, __entry->max_data)
|
|
|
|
);
|
|
|
|
|
2024-12-04 07:47:07 +00:00
|
|
|
TRACE_EVENT(rxrpc_rack,
|
|
|
|
TP_PROTO(struct rxrpc_call *call, ktime_t timo),
|
|
|
|
|
|
|
|
TP_ARGS(call, timo),
|
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
|
|
|
__field(unsigned int, call)
|
|
|
|
__field(rxrpc_serial_t, ack_serial)
|
|
|
|
__field(rxrpc_seq_t, seq)
|
|
|
|
__field(enum rxrpc_rack_timer_mode, mode)
|
|
|
|
__field(unsigned short, nr_sent)
|
|
|
|
__field(unsigned short, nr_lost)
|
|
|
|
__field(unsigned short, nr_resent)
|
|
|
|
__field(unsigned short, nr_sacked)
|
|
|
|
__field(ktime_t, timo)
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
|
|
|
__entry->call = call->debug_id;
|
|
|
|
__entry->ack_serial = call->rx_serial;
|
|
|
|
__entry->seq = call->rack_end_seq;
|
|
|
|
__entry->mode = call->rack_timer_mode;
|
|
|
|
__entry->nr_sent = call->tx_nr_sent;
|
|
|
|
__entry->nr_lost = call->tx_nr_lost;
|
|
|
|
__entry->nr_resent = call->tx_nr_resent;
|
|
|
|
__entry->nr_sacked = call->acks_nr_sacks;
|
|
|
|
__entry->timo = timo;
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_printk("c=%08x r=%08x q=%08x %s slrs=%u,%u,%u,%u t=%lld",
|
|
|
|
__entry->call, __entry->ack_serial, __entry->seq,
|
|
|
|
__print_symbolic(__entry->mode, rxrpc_rack_timer_modes),
|
|
|
|
__entry->nr_sent, __entry->nr_lost,
|
|
|
|
__entry->nr_resent, __entry->nr_sacked,
|
|
|
|
ktime_to_us(__entry->timo))
|
|
|
|
);
|
|
|
|
|
|
|
|
TRACE_EVENT(rxrpc_rack_update,
|
|
|
|
TP_PROTO(struct rxrpc_call *call, struct rxrpc_ack_summary *summary),
|
|
|
|
|
|
|
|
TP_ARGS(call, summary),
|
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
|
|
|
__field(unsigned int, call)
|
|
|
|
__field(rxrpc_serial_t, ack_serial)
|
|
|
|
__field(rxrpc_seq_t, seq)
|
|
|
|
__field(int, xmit_ts)
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
|
|
|
__entry->call = call->debug_id;
|
|
|
|
__entry->ack_serial = call->rx_serial;
|
|
|
|
__entry->seq = call->rack_end_seq;
|
|
|
|
__entry->xmit_ts = ktime_sub(call->acks_latest_ts, call->rack_xmit_ts);
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_printk("c=%08x r=%08x q=%08x xt=%lld",
|
|
|
|
__entry->call, __entry->ack_serial, __entry->seq,
|
|
|
|
ktime_to_us(__entry->xmit_ts))
|
|
|
|
);
|
|
|
|
|
|
|
|
TRACE_EVENT(rxrpc_rack_scan_loss,
|
|
|
|
TP_PROTO(struct rxrpc_call *call),
|
|
|
|
|
|
|
|
TP_ARGS(call),
|
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
|
|
|
__field(unsigned int, call)
|
|
|
|
__field(ktime_t, rack_rtt)
|
|
|
|
__field(ktime_t, rack_reo_wnd)
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
|
|
|
__entry->call = call->debug_id;
|
|
|
|
__entry->rack_rtt = call->rack_rtt;
|
|
|
|
__entry->rack_reo_wnd = call->rack_reo_wnd;
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_printk("c=%08x rtt=%lld reow=%lld",
|
|
|
|
__entry->call, ktime_to_us(__entry->rack_rtt),
|
|
|
|
ktime_to_us(__entry->rack_reo_wnd))
|
|
|
|
);
|
|
|
|
|
|
|
|
TRACE_EVENT(rxrpc_rack_scan_loss_tq,
|
|
|
|
TP_PROTO(struct rxrpc_call *call, const struct rxrpc_txqueue *tq,
|
|
|
|
unsigned long nacks),
|
|
|
|
|
|
|
|
TP_ARGS(call, tq, nacks),
|
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
|
|
|
__field(unsigned int, call)
|
|
|
|
__field(rxrpc_seq_t, qbase)
|
|
|
|
__field(unsigned long, nacks)
|
|
|
|
__field(unsigned long, lost)
|
|
|
|
__field(unsigned long, retrans)
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
|
|
|
__entry->call = call->debug_id;
|
|
|
|
__entry->qbase = tq->qbase;
|
|
|
|
__entry->nacks = nacks;
|
|
|
|
__entry->lost = tq->segment_lost;
|
|
|
|
__entry->retrans = tq->segment_retransmitted;
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_printk("c=%08x q=%08x n=%lx l=%lx r=%lx",
|
|
|
|
__entry->call, __entry->qbase,
|
|
|
|
__entry->nacks, __entry->lost, __entry->retrans)
|
|
|
|
);
|
|
|
|
|
|
|
|
TRACE_EVENT(rxrpc_rack_detect_loss,
|
|
|
|
TP_PROTO(struct rxrpc_call *call, struct rxrpc_ack_summary *summary,
|
|
|
|
rxrpc_seq_t seq),
|
|
|
|
|
|
|
|
TP_ARGS(call, summary, seq),
|
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
|
|
|
__field(unsigned int, call)
|
|
|
|
__field(rxrpc_serial_t, ack_serial)
|
|
|
|
__field(rxrpc_seq_t, seq)
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
|
|
|
__entry->call = call->debug_id;
|
|
|
|
__entry->ack_serial = call->rx_serial;
|
|
|
|
__entry->seq = seq;
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_printk("c=%08x r=%08x q=%08x",
|
|
|
|
__entry->call, __entry->ack_serial, __entry->seq)
|
|
|
|
);
|
|
|
|
|
|
|
|
TRACE_EVENT(rxrpc_rack_mark_loss_tq,
|
|
|
|
TP_PROTO(struct rxrpc_call *call, const struct rxrpc_txqueue *tq),
|
|
|
|
|
|
|
|
TP_ARGS(call, tq),
|
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
|
|
|
__field(unsigned int, call)
|
|
|
|
__field(rxrpc_seq_t, qbase)
|
|
|
|
__field(rxrpc_seq_t, trans)
|
|
|
|
__field(unsigned long, acked)
|
|
|
|
__field(unsigned long, lost)
|
|
|
|
__field(unsigned long, retrans)
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
|
|
|
__entry->call = call->debug_id;
|
|
|
|
__entry->qbase = tq->qbase;
|
|
|
|
__entry->trans = call->tx_transmitted;
|
|
|
|
__entry->acked = tq->segment_acked;
|
|
|
|
__entry->lost = tq->segment_lost;
|
|
|
|
__entry->retrans = tq->segment_retransmitted;
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_printk("c=%08x tq=%08x txq=%08x a=%lx l=%lx r=%lx",
|
|
|
|
__entry->call, __entry->qbase, __entry->trans,
|
|
|
|
__entry->acked, __entry->lost, __entry->retrans)
|
|
|
|
);
|
|
|
|
|
|
|
|
TRACE_EVENT(rxrpc_tlp_probe,
|
|
|
|
TP_PROTO(struct rxrpc_call *call, enum rxrpc_tlp_probe_trace trace),
|
|
|
|
|
|
|
|
TP_ARGS(call, trace),
|
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
|
|
|
__field(unsigned int, call)
|
|
|
|
__field(rxrpc_serial_t, serial)
|
|
|
|
__field(rxrpc_seq_t, seq)
|
|
|
|
__field(enum rxrpc_tlp_probe_trace, trace)
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
|
|
|
__entry->call = call->debug_id;
|
|
|
|
__entry->serial = call->tlp_serial;
|
|
|
|
__entry->seq = call->tlp_seq;
|
|
|
|
__entry->trace = trace;
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_printk("c=%08x r=%08x pq=%08x %s",
|
|
|
|
__entry->call, __entry->serial, __entry->seq,
|
|
|
|
__print_symbolic(__entry->trace, rxrpc_tlp_probe_traces))
|
|
|
|
);
|
|
|
|
|
|
|
|
TRACE_EVENT(rxrpc_tlp_ack,
|
|
|
|
TP_PROTO(struct rxrpc_call *call, struct rxrpc_ack_summary *summary,
|
|
|
|
enum rxrpc_tlp_ack_trace trace),
|
|
|
|
|
|
|
|
TP_ARGS(call, summary, trace),
|
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
|
|
|
__field(unsigned int, call)
|
|
|
|
__field(rxrpc_serial_t, serial)
|
|
|
|
__field(rxrpc_seq_t, tlp_seq)
|
|
|
|
__field(rxrpc_seq_t, hard_ack)
|
|
|
|
__field(enum rxrpc_tlp_ack_trace, trace)
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
|
|
|
__entry->call = call->debug_id;
|
|
|
|
__entry->serial = call->tlp_serial;
|
|
|
|
__entry->tlp_seq = call->tlp_seq;
|
|
|
|
__entry->hard_ack = call->acks_hard_ack;
|
|
|
|
__entry->trace = trace;
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_printk("c=%08x r=%08x pq=%08x hq=%08x %s",
|
|
|
|
__entry->call, __entry->serial,
|
|
|
|
__entry->tlp_seq, __entry->hard_ack,
|
|
|
|
__print_symbolic(__entry->trace, rxrpc_tlp_ack_traces))
|
|
|
|
);
|
|
|
|
|
|
|
|
TRACE_EVENT(rxrpc_rack_timer,
|
|
|
|
TP_PROTO(struct rxrpc_call *call, ktime_t delay, bool exp),
|
|
|
|
|
|
|
|
TP_ARGS(call, delay, exp),
|
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
|
|
|
__field(unsigned int, call)
|
|
|
|
__field(bool, exp)
|
|
|
|
__field(enum rxrpc_rack_timer_mode, mode)
|
|
|
|
__field(ktime_t, delay)
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
|
|
|
__entry->call = call->debug_id;
|
|
|
|
__entry->exp = exp;
|
|
|
|
__entry->mode = call->rack_timer_mode;
|
|
|
|
__entry->delay = delay;
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_printk("c=%08x %s %s to=%lld",
|
|
|
|
__entry->call,
|
|
|
|
__entry->exp ? "Exp" : "Set",
|
|
|
|
__print_symbolic(__entry->mode, rxrpc_rack_timer_modes),
|
|
|
|
ktime_to_us(__entry->delay))
|
|
|
|
);
|
|
|
|
|
2025-04-11 10:52:54 +01:00
|
|
|
TRACE_EVENT(rxrpc_rxgk_rekey,
|
|
|
|
TP_PROTO(struct rxrpc_connection *conn,
|
|
|
|
unsigned int current_key, unsigned int requested_key),
|
|
|
|
|
|
|
|
TP_ARGS(conn, current_key, requested_key),
|
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
|
|
|
__field(unsigned int, conn)
|
|
|
|
__field(unsigned int, current_key)
|
|
|
|
__field(unsigned int, requested_key)
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
|
|
|
__entry->conn = conn->debug_id;
|
|
|
|
__entry->current_key = current_key;
|
|
|
|
__entry->requested_key = requested_key;
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_printk("C=%08x cur=%x req=%x",
|
|
|
|
__entry->conn,
|
|
|
|
__entry->current_key,
|
|
|
|
__entry->requested_key)
|
|
|
|
);
|
|
|
|
|
2022-05-21 08:45:35 +01:00
|
|
|
#undef EM
|
|
|
|
#undef E_
|
2022-10-06 21:45:42 +01:00
|
|
|
|
|
|
|
#endif /* RXRPC_TRACE_ONLY_DEFINE_ENUMS */
|
2016-08-23 15:27:24 +01:00
|
|
|
#endif /* _TRACE_RXRPC_H */
|
|
|
|
|
|
|
|
/* This part must be outside protection */
|
|
|
|
#include <trace/define_trace.h>
|