2019-05-20 19:08:01 +02:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0-or-later */
|
2017-01-05 10:38:34 +00:00
|
|
|
/* AFS tracepoints
|
|
|
|
*
|
|
|
|
* Copyright (C) 2016 Red Hat, Inc. All Rights Reserved.
|
|
|
|
* Written by David Howells (dhowells@redhat.com)
|
|
|
|
*/
|
|
|
|
#undef TRACE_SYSTEM
|
|
|
|
#define TRACE_SYSTEM afs
|
|
|
|
|
|
|
|
#if !defined(_TRACE_AFS_H) || defined(TRACE_HEADER_MULTI_READ)
|
|
|
|
#define _TRACE_AFS_H
|
|
|
|
|
|
|
|
#include <linux/tracepoint.h>
|
|
|
|
|
2017-01-05 10:38:36 +00:00
|
|
|
/*
|
|
|
|
* Define enums for tracing information.
|
|
|
|
*/
|
|
|
|
#ifndef __AFS_DECLARE_TRACE_ENUMS_ONCE_ONLY
|
|
|
|
#define __AFS_DECLARE_TRACE_ENUMS_ONCE_ONLY
|
|
|
|
|
2017-11-02 15:27:51 +00:00
|
|
|
enum afs_fs_operation {
|
|
|
|
afs_FS_FetchData = 130, /* AFS Fetch file data */
|
2019-04-25 14:26:52 +01:00
|
|
|
afs_FS_FetchACL = 131, /* AFS Fetch file ACL */
|
2017-11-02 15:27:51 +00:00
|
|
|
afs_FS_FetchStatus = 132, /* AFS Fetch file status */
|
|
|
|
afs_FS_StoreData = 133, /* AFS Store file data */
|
2019-04-25 14:26:52 +01:00
|
|
|
afs_FS_StoreACL = 134, /* AFS Store file ACL */
|
2017-11-02 15:27:51 +00:00
|
|
|
afs_FS_StoreStatus = 135, /* AFS Store file status */
|
|
|
|
afs_FS_RemoveFile = 136, /* AFS Remove a file */
|
|
|
|
afs_FS_CreateFile = 137, /* AFS Create a file */
|
|
|
|
afs_FS_Rename = 138, /* AFS Rename or move a file or directory */
|
|
|
|
afs_FS_Symlink = 139, /* AFS Create a symbolic link */
|
|
|
|
afs_FS_Link = 140, /* AFS Create a hard link */
|
|
|
|
afs_FS_MakeDir = 141, /* AFS Create a directory */
|
|
|
|
afs_FS_RemoveDir = 142, /* AFS Remove a directory */
|
|
|
|
afs_FS_GetVolumeInfo = 148, /* AFS Get information about a volume */
|
|
|
|
afs_FS_GetVolumeStatus = 149, /* AFS Get volume status information */
|
|
|
|
afs_FS_GetRootVolume = 151, /* AFS Get root volume name */
|
|
|
|
afs_FS_SetLock = 156, /* AFS Request a file lock */
|
|
|
|
afs_FS_ExtendLock = 157, /* AFS Extend a file lock */
|
|
|
|
afs_FS_ReleaseLock = 158, /* AFS Release a file lock */
|
|
|
|
afs_FS_Lookup = 161, /* AFS lookup file in directory */
|
2018-04-09 21:12:31 +01:00
|
|
|
afs_FS_InlineBulkStatus = 65536, /* AFS Fetch multiple file statuses with errors */
|
2017-11-02 15:27:51 +00:00
|
|
|
afs_FS_FetchData64 = 65537, /* AFS Fetch file data */
|
|
|
|
afs_FS_StoreData64 = 65538, /* AFS Store file data */
|
|
|
|
afs_FS_GiveUpAllCallBacks = 65539, /* AFS Give up all our callbacks on a server */
|
|
|
|
afs_FS_GetCapabilities = 65540, /* AFS Get FS server capabilities */
|
2018-10-20 00:57:58 +01:00
|
|
|
|
|
|
|
yfs_FS_FetchData = 130, /* YFS Fetch file data */
|
|
|
|
yfs_FS_FetchACL = 64131, /* YFS Fetch file ACL */
|
|
|
|
yfs_FS_FetchStatus = 64132, /* YFS Fetch file status */
|
|
|
|
yfs_FS_StoreACL = 64134, /* YFS Store file ACL */
|
|
|
|
yfs_FS_StoreStatus = 64135, /* YFS Store file status */
|
|
|
|
yfs_FS_RemoveFile = 64136, /* YFS Remove a file */
|
|
|
|
yfs_FS_CreateFile = 64137, /* YFS Create a file */
|
|
|
|
yfs_FS_Rename = 64138, /* YFS Rename or move a file or directory */
|
|
|
|
yfs_FS_Symlink = 64139, /* YFS Create a symbolic link */
|
|
|
|
yfs_FS_Link = 64140, /* YFS Create a hard link */
|
|
|
|
yfs_FS_MakeDir = 64141, /* YFS Create a directory */
|
|
|
|
yfs_FS_RemoveDir = 64142, /* YFS Remove a directory */
|
|
|
|
yfs_FS_GetVolumeStatus = 64149, /* YFS Get volume status information */
|
|
|
|
yfs_FS_SetVolumeStatus = 64150, /* YFS Set volume status information */
|
|
|
|
yfs_FS_SetLock = 64156, /* YFS Request a file lock */
|
|
|
|
yfs_FS_ExtendLock = 64157, /* YFS Extend a file lock */
|
|
|
|
yfs_FS_ReleaseLock = 64158, /* YFS Release a file lock */
|
|
|
|
yfs_FS_Lookup = 64161, /* YFS lookup file in directory */
|
|
|
|
yfs_FS_FlushCPS = 64165,
|
|
|
|
yfs_FS_FetchOpaqueACL = 64168,
|
|
|
|
yfs_FS_WhoAmI = 64170,
|
|
|
|
yfs_FS_RemoveACL = 64171,
|
|
|
|
yfs_FS_RemoveFile2 = 64173,
|
|
|
|
yfs_FS_StoreOpaqueACL2 = 64174,
|
|
|
|
yfs_FS_InlineBulkStatus = 64536, /* YFS Fetch multiple file statuses with errors */
|
|
|
|
yfs_FS_FetchData64 = 64537, /* YFS Fetch file data */
|
|
|
|
yfs_FS_StoreData64 = 64538, /* YFS Store file data */
|
|
|
|
yfs_FS_UpdateSymlink = 64540,
|
2017-11-02 15:27:51 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
enum afs_vl_operation {
|
|
|
|
afs_VL_GetEntryByNameU = 527, /* AFS Get Vol Entry By Name operation ID */
|
|
|
|
afs_VL_GetAddrsU = 533, /* AFS Get FS server addresses */
|
|
|
|
afs_YFSVL_GetEndpoints = 64002, /* YFS Get FS & Vol server addresses */
|
2020-04-29 17:26:41 +01:00
|
|
|
afs_YFSVL_GetCellName = 64014, /* YFS Get actual cell name */
|
2017-11-02 15:27:51 +00:00
|
|
|
afs_VL_GetCapabilities = 65537, /* AFS Get VL server capabilities */
|
|
|
|
};
|
|
|
|
|
2021-06-15 11:57:26 +01:00
|
|
|
enum afs_cm_operation {
|
|
|
|
afs_CB_CallBack = 204, /* AFS break callback promises */
|
|
|
|
afs_CB_InitCallBackState = 205, /* AFS initialise callback state */
|
|
|
|
afs_CB_Probe = 206, /* AFS probe client */
|
|
|
|
afs_CB_GetLock = 207, /* AFS get contents of CM lock table */
|
|
|
|
afs_CB_GetCE = 208, /* AFS get cache file description */
|
|
|
|
afs_CB_GetXStatsVersion = 209, /* AFS get version of extended statistics */
|
|
|
|
afs_CB_GetXStats = 210, /* AFS get contents of extended statistics data */
|
|
|
|
afs_CB_InitCallBackState3 = 213, /* AFS initialise callback state, version 3 */
|
|
|
|
afs_CB_ProbeUuid = 214, /* AFS check the client hasn't rebooted */
|
|
|
|
};
|
|
|
|
|
|
|
|
enum yfs_cm_operation {
|
|
|
|
yfs_CB_Probe = 206, /* YFS probe client */
|
|
|
|
yfs_CB_GetLock = 207, /* YFS get contents of CM lock table */
|
|
|
|
yfs_CB_XStatsVersion = 209, /* YFS get version of extended statistics */
|
|
|
|
yfs_CB_GetXStats = 210, /* YFS get contents of extended statistics data */
|
|
|
|
yfs_CB_InitCallBackState3 = 213, /* YFS initialise callback state, version 3 */
|
|
|
|
yfs_CB_ProbeUuid = 214, /* YFS check the client hasn't rebooted */
|
|
|
|
yfs_CB_GetServerPrefs = 215,
|
|
|
|
yfs_CB_GetCellServDV = 216,
|
|
|
|
yfs_CB_GetLocalCell = 217,
|
|
|
|
yfs_CB_GetCacheConfig = 218,
|
|
|
|
yfs_CB_GetCellByNum = 65537,
|
|
|
|
yfs_CB_TellMeAboutYourself = 65538, /* get client capabilities */
|
|
|
|
yfs_CB_CallBack = 64204,
|
|
|
|
};
|
|
|
|
|
2017-01-05 10:38:36 +00:00
|
|
|
#endif /* end __AFS_DECLARE_TRACE_ENUMS_ONCE_ONLY */
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Declare tracing information enums and their string mappings for display.
|
|
|
|
*/
|
|
|
|
#define afs_call_traces \
|
|
|
|
EM(afs_call_trace_alloc, "ALLOC") \
|
2024-12-16 20:41:14 +00:00
|
|
|
EM(afs_call_trace_async_abort, "ASYAB") \
|
|
|
|
EM(afs_call_trace_async_kill, "ASYKL") \
|
2017-01-05 10:38:36 +00:00
|
|
|
EM(afs_call_trace_free, "FREE ") \
|
2019-01-10 15:40:50 +00:00
|
|
|
EM(afs_call_trace_get, "GET ") \
|
2017-01-05 10:38:36 +00:00
|
|
|
EM(afs_call_trace_put, "PUT ") \
|
|
|
|
EM(afs_call_trace_wake, "WAKE ") \
|
2020-03-13 13:36:01 +00:00
|
|
|
E_(afs_call_trace_work, "QUEUE")
|
2017-01-05 10:38:36 +00:00
|
|
|
|
2019-06-20 18:12:17 +01:00
|
|
|
#define afs_server_traces \
|
|
|
|
EM(afs_server_trace_callback, "CALLBACK ") \
|
|
|
|
EM(afs_server_trace_destroy, "DESTROY ") \
|
|
|
|
EM(afs_server_trace_free, "FREE ") \
|
|
|
|
EM(afs_server_trace_gc, "GC ") \
|
afs: Actively poll fileservers to maintain NAT or firewall openings
When an AFS client accesses a file, it receives a limited-duration callback
promise that the server will notify it if another client changes a file.
This callback duration can be a few hours in length.
If a client mounts a volume and then an application prevents it from being
unmounted, say by chdir'ing into it, but then does nothing for some time,
the rxrpc_peer record will expire and rxrpc-level keepalive will cease.
If there is NAT or a firewall between the client and the server, the route
back for the server may close after a comparatively short duration, meaning
that attempts by the server to notify the client may then bounce.
The client, however, may (so far as it knows) still have a valid unexpired
promise and will then rely on its cached data and will not see changes made
on the server by a third party until it incidentally rechecks the status or
the promise needs renewal.
To deal with this, the client needs to regularly probe the server. This
has two effects: firstly, it keeps a route open back for the server, and
secondly, it causes the server to disgorge any notifications that got
queued up because they couldn't be sent.
Fix this by adding a mechanism to emit regular probes.
Two levels of probing are made available: Under normal circumstances the
'slow' queue will be used for a fileserver - this just probes the preferred
address once every 5 mins or so; however, if server fails to respond to any
probes, the server will shift to the 'fast' queue from which all its
interfaces will be probed every 30s. When it finally responds, the record
will switch back to the slow queue.
Further notes:
(1) Probing is now no longer driven from the fileserver rotation
algorithm.
(2) Probes are dispatched to all interfaces on a fileserver when that an
afs_server object is set up to record it.
(3) The afs_server object is removed from the probe queues when we start
to probe it. afs_is_probing_server() returns true if it's not listed
- ie. it's undergoing probing.
(4) The afs_server object is added back on to the probe queue when the
final outstanding probe completes, but the probed_at time is set when
we're about to launch a probe so that it's not dependent on the probe
duration.
(5) The timer and the work item added for this must be handed a count on
net->servers_outstanding, which they hand on or release. This makes
sure that network namespace cleanup waits for them.
Fixes: d2ddc776a458 ("afs: Overhaul volume and server record caching and fileserver rotation")
Reported-by: Dave Botsch <botsch@cnf.cornell.edu>
Signed-off-by: David Howells <dhowells@redhat.com>
2020-04-24 15:10:00 +01:00
|
|
|
EM(afs_server_trace_get_probe, "GET probe") \
|
2020-10-15 09:02:25 +01:00
|
|
|
EM(afs_server_trace_purging, "PURGE ") \
|
2019-06-20 18:12:17 +01:00
|
|
|
EM(afs_server_trace_put_cbi, "PUT cbi ") \
|
afs: Actively poll fileservers to maintain NAT or firewall openings
When an AFS client accesses a file, it receives a limited-duration callback
promise that the server will notify it if another client changes a file.
This callback duration can be a few hours in length.
If a client mounts a volume and then an application prevents it from being
unmounted, say by chdir'ing into it, but then does nothing for some time,
the rxrpc_peer record will expire and rxrpc-level keepalive will cease.
If there is NAT or a firewall between the client and the server, the route
back for the server may close after a comparatively short duration, meaning
that attempts by the server to notify the client may then bounce.
The client, however, may (so far as it knows) still have a valid unexpired
promise and will then rely on its cached data and will not see changes made
on the server by a third party until it incidentally rechecks the status or
the promise needs renewal.
To deal with this, the client needs to regularly probe the server. This
has two effects: firstly, it keeps a route open back for the server, and
secondly, it causes the server to disgorge any notifications that got
queued up because they couldn't be sent.
Fix this by adding a mechanism to emit regular probes.
Two levels of probing are made available: Under normal circumstances the
'slow' queue will be used for a fileserver - this just probes the preferred
address once every 5 mins or so; however, if server fails to respond to any
probes, the server will shift to the 'fast' queue from which all its
interfaces will be probed every 30s. When it finally responds, the record
will switch back to the slow queue.
Further notes:
(1) Probing is now no longer driven from the fileserver rotation
algorithm.
(2) Probes are dispatched to all interfaces on a fileserver when that an
afs_server object is set up to record it.
(3) The afs_server object is removed from the probe queues when we start
to probe it. afs_is_probing_server() returns true if it's not listed
- ie. it's undergoing probing.
(4) The afs_server object is added back on to the probe queue when the
final outstanding probe completes, but the probed_at time is set when
we're about to launch a probe so that it's not dependent on the probe
duration.
(5) The timer and the work item added for this must be handed a count on
net->servers_outstanding, which they hand on or release. This makes
sure that network namespace cleanup waits for them.
Fixes: d2ddc776a458 ("afs: Overhaul volume and server record caching and fileserver rotation")
Reported-by: Dave Botsch <botsch@cnf.cornell.edu>
Signed-off-by: David Howells <dhowells@redhat.com>
2020-04-24 15:10:00 +01:00
|
|
|
EM(afs_server_trace_put_probe, "PUT probe") \
|
afs: Fix afs_server ref accounting
The current way that afs_server refs are accounted and cleaned up sometimes
cause rmmod to hang when it is waiting for cell records to be removed. The
problem is that the cell cleanup might occasionally happen before the
server cleanup and then there's nothing that causes the cell to
garbage-collect the remaining servers as they become inactive.
Partially fix this by:
(1) Give each afs_server record its own management timer that rather than
relying on the cell manager's central timer to drive each individual
cell's maintenance work item to garbage collect servers.
This timer is set when afs_unuse_server() reduces a server's activity
count to zero and will schedule the server's destroyer work item upon
firing.
(2) Give each afs_server record its own destroyer work item that removes
the record from the cell's database, shuts down the timer, cancels any
pending work for itself, sends an RPC to the server to cancel
outstanding callbacks.
This change, in combination with the timer, obviates the need to try
and coordinate so closely between the cell record and a bunch of other
server records to try and tear everything down in a coordinated
fashion. With this, the cell record is pinned until the server RCU is
complete and namespace/module removal will wait until all the cell
records are removed.
(3) Now that incoming calls are mapped to servers (and thus cells) using
data attached to an rxrpc_peer, the UUID-to-server mapping tree is
moved from the namespace to the cell (cell->fs_servers). This means
there can no longer be duplicates therein - and that allows the
mapping tree to be simpler as there doesn't need to be a chain of
same-UUID servers that are in different cells.
(4) The lock protecting the UUID mapping tree is switched to an
rw_semaphore on the cell rather than a seqlock on the namespace as
it's now only used during mounting in contexts in which we're allowed
to sleep.
(5) When it comes time for a cell that is being removed to purge its set
of servers, it just needs to iterate over them and wake them up. Once
a server becomes inactive, its destroyer work item will observe the
state of the cell and immediately remove that record.
(6) When a server record is removed, it is marked AFS_SERVER_FL_EXPIRED to
prevent reattempts at removal. The record will be dispatched to RCU
for destruction once its refcount reaches 0.
(7) The AFS_SERVER_FL_UNCREATED/CREATING flags are used to synchronise
simultaneous creation attempts. If one attempt fails, it will abandon
the attempt and allow another to try again.
Note that the record can't just be abandoned when dead as it's bound
into a server list attached to a volume and only subject to
replacement if the server list obtained for the volume from the VLDB
changes.
Signed-off-by: David Howells <dhowells@redhat.com>
cc: Marc Dionne <marc.dionne@auristor.com>
cc: linux-afs@lists.infradead.org
cc: linux-fsdevel@vger.kernel.org
Link: https://lore.kernel.org/r/20250224234154.2014840-15-dhowells@redhat.com/ # v1
Link: https://lore.kernel.org/r/20250310094206.801057-11-dhowells@redhat.com/ # v4
2025-02-24 16:51:36 +00:00
|
|
|
EM(afs_server_trace_see_destroyer, "SEE destr") \
|
2025-02-14 10:16:21 +00:00
|
|
|
EM(afs_server_trace_see_expired, "SEE expd ") \
|
afs: Fix afs_server ref accounting
The current way that afs_server refs are accounted and cleaned up sometimes
cause rmmod to hang when it is waiting for cell records to be removed. The
problem is that the cell cleanup might occasionally happen before the
server cleanup and then there's nothing that causes the cell to
garbage-collect the remaining servers as they become inactive.
Partially fix this by:
(1) Give each afs_server record its own management timer that rather than
relying on the cell manager's central timer to drive each individual
cell's maintenance work item to garbage collect servers.
This timer is set when afs_unuse_server() reduces a server's activity
count to zero and will schedule the server's destroyer work item upon
firing.
(2) Give each afs_server record its own destroyer work item that removes
the record from the cell's database, shuts down the timer, cancels any
pending work for itself, sends an RPC to the server to cancel
outstanding callbacks.
This change, in combination with the timer, obviates the need to try
and coordinate so closely between the cell record and a bunch of other
server records to try and tear everything down in a coordinated
fashion. With this, the cell record is pinned until the server RCU is
complete and namespace/module removal will wait until all the cell
records are removed.
(3) Now that incoming calls are mapped to servers (and thus cells) using
data attached to an rxrpc_peer, the UUID-to-server mapping tree is
moved from the namespace to the cell (cell->fs_servers). This means
there can no longer be duplicates therein - and that allows the
mapping tree to be simpler as there doesn't need to be a chain of
same-UUID servers that are in different cells.
(4) The lock protecting the UUID mapping tree is switched to an
rw_semaphore on the cell rather than a seqlock on the namespace as
it's now only used during mounting in contexts in which we're allowed
to sleep.
(5) When it comes time for a cell that is being removed to purge its set
of servers, it just needs to iterate over them and wake them up. Once
a server becomes inactive, its destroyer work item will observe the
state of the cell and immediately remove that record.
(6) When a server record is removed, it is marked AFS_SERVER_FL_EXPIRED to
prevent reattempts at removal. The record will be dispatched to RCU
for destruction once its refcount reaches 0.
(7) The AFS_SERVER_FL_UNCREATED/CREATING flags are used to synchronise
simultaneous creation attempts. If one attempt fails, it will abandon
the attempt and allow another to try again.
Note that the record can't just be abandoned when dead as it's bound
into a server list attached to a volume and only subject to
replacement if the server list obtained for the volume from the VLDB
changes.
Signed-off-by: David Howells <dhowells@redhat.com>
cc: Marc Dionne <marc.dionne@auristor.com>
cc: linux-afs@lists.infradead.org
cc: linux-fsdevel@vger.kernel.org
Link: https://lore.kernel.org/r/20250224234154.2014840-15-dhowells@redhat.com/ # v1
Link: https://lore.kernel.org/r/20250310094206.801057-11-dhowells@redhat.com/ # v4
2025-02-24 16:51:36 +00:00
|
|
|
EM(afs_server_trace_see_purge, "SEE purge") \
|
|
|
|
EM(afs_server_trace_see_timer, "SEE timer") \
|
2025-02-14 10:16:21 +00:00
|
|
|
EM(afs_server_trace_unuse_call, "UNU call ") \
|
|
|
|
EM(afs_server_trace_unuse_create_fail, "UNU cfail") \
|
|
|
|
EM(afs_server_trace_unuse_slist, "UNU slist") \
|
|
|
|
EM(afs_server_trace_unuse_slist_isort, "UNU isort") \
|
|
|
|
EM(afs_server_trace_update, "UPDATE ") \
|
|
|
|
EM(afs_server_trace_use_by_uuid, "USE uuid ") \
|
|
|
|
EM(afs_server_trace_use_cm_call, "USE cm-cl") \
|
|
|
|
EM(afs_server_trace_use_get_caps, "USE gcaps") \
|
|
|
|
EM(afs_server_trace_use_give_up_cb, "USE gvupc") \
|
afs: Simplify cell record handling
Simplify afs_cell record handling to avoid very occasional races that cause
module removal to hang (it waits for all cell records to be removed).
There are two things that particularly contribute to the difficulty:
firstly, the code tries to pass a ref on the cell to the cell's maintenance
work item (which gets awkward if the work item is already queued); and,
secondly, there's an overall cell manager that tries to use just one timer
for the entire cell collection (to avoid having loads of timers). However,
both of these are probably unnecessarily restrictive.
To simplify this, the following changes are made:
(1) The cell record collection manager is removed. Each cell record
manages itself individually.
(2) Each afs_cell is given a second work item (cell->destroyer) that is
queued when its refcount reaches zero. This is not done in the
context of the putting thread as it might be in an inconvenient place
to sleep.
(3) Each afs_cell is given its own timer. The timer is used to expire the
cell record after a period of unuse if not otherwise pinned and can
also be used for other maintenance tasks if necessary (of which there
are currently none as DNS refresh is triggered by filesystem
operations).
(4) The afs_cell manager work item (cell->manager) is no longer given a
ref on the cell when queued; rather, the manager must be deleted.
This does away with the need to deal with the consequences of losing a
race to queue cell->manager. Clean up of extra queuing is deferred to
the destroyer.
(5) The cell destroyer work item makes sure the cell timer is removed and
that the normal cell work is cancelled before farming the actual
destruction off to RCU.
(6) When a network namespace is destroyed or the kafs module is unloaded,
it's now a simple matter of marking the namespace as dead then just
waking up all the cell work items. They will then remove and destroy
themselves once all remaining activity counts and/or a ref counts are
dropped. This makes sure that all server records are dropped first.
(7) The cell record state set is reduced to just four states: SETTING_UP,
ACTIVE, REMOVING and DEAD. The record persists in the active state
even when it's not being used until the time comes to remove it rather
than downgrading it to an inactive state from whence it can be
restored.
This means that the cell still appears in /proc and /afs when not in
use until it switches to the REMOVING state - at which point it is
removed.
Note that the REMOVING state is included so that someone wanting to
resurrect the cell record is forced to wait whilst the cell is torn
down in that state. Once it's in the DEAD state, it has been removed
from net->cells tree and is no longer findable and can be replaced.
Signed-off-by: David Howells <dhowells@redhat.com>
cc: Marc Dionne <marc.dionne@auristor.com>
cc: linux-afs@lists.infradead.org
cc: linux-fsdevel@vger.kernel.org
Link: https://lore.kernel.org/r/20250224234154.2014840-16-dhowells@redhat.com/ # v1
Link: https://lore.kernel.org/r/20250310094206.801057-12-dhowells@redhat.com/ # v4
2025-02-24 16:06:03 +00:00
|
|
|
EM(afs_server_trace_use_install, "USE inst ") \
|
2025-02-14 10:16:21 +00:00
|
|
|
E_(afs_server_trace_wait_create, "WAIT crt ")
|
2019-06-20 18:12:17 +01:00
|
|
|
|
2020-04-29 17:02:04 +01:00
|
|
|
#define afs_volume_traces \
|
|
|
|
EM(afs_volume_trace_alloc, "ALLOC ") \
|
|
|
|
EM(afs_volume_trace_free, "FREE ") \
|
|
|
|
EM(afs_volume_trace_get_alloc_sbi, "GET sbi-alloc ") \
|
2023-11-07 17:59:33 +00:00
|
|
|
EM(afs_volume_trace_get_callback, "GET callback ") \
|
2020-04-29 17:02:04 +01:00
|
|
|
EM(afs_volume_trace_get_cell_insert, "GET cell-insrt") \
|
|
|
|
EM(afs_volume_trace_get_new_op, "GET op-new ") \
|
|
|
|
EM(afs_volume_trace_get_query_alias, "GET cell-alias") \
|
2023-11-07 17:59:33 +00:00
|
|
|
EM(afs_volume_trace_put_callback, "PUT callback ") \
|
2020-04-29 17:02:04 +01:00
|
|
|
EM(afs_volume_trace_put_cell_dup, "PUT cell-dup ") \
|
|
|
|
EM(afs_volume_trace_put_cell_root, "PUT cell-root ") \
|
|
|
|
EM(afs_volume_trace_put_destroy_sbi, "PUT sbi-destry") \
|
|
|
|
EM(afs_volume_trace_put_free_fc, "PUT fc-free ") \
|
|
|
|
EM(afs_volume_trace_put_put_op, "PUT op-put ") \
|
|
|
|
EM(afs_volume_trace_put_query_alias, "PUT cell-alias") \
|
|
|
|
EM(afs_volume_trace_put_validate_fc, "PUT fc-validat") \
|
|
|
|
E_(afs_volume_trace_remove, "REMOVE ")
|
|
|
|
|
2020-10-13 20:51:59 +01:00
|
|
|
#define afs_cell_traces \
|
|
|
|
EM(afs_cell_trace_alloc, "ALLOC ") \
|
afs: Simplify cell record handling
Simplify afs_cell record handling to avoid very occasional races that cause
module removal to hang (it waits for all cell records to be removed).
There are two things that particularly contribute to the difficulty:
firstly, the code tries to pass a ref on the cell to the cell's maintenance
work item (which gets awkward if the work item is already queued); and,
secondly, there's an overall cell manager that tries to use just one timer
for the entire cell collection (to avoid having loads of timers). However,
both of these are probably unnecessarily restrictive.
To simplify this, the following changes are made:
(1) The cell record collection manager is removed. Each cell record
manages itself individually.
(2) Each afs_cell is given a second work item (cell->destroyer) that is
queued when its refcount reaches zero. This is not done in the
context of the putting thread as it might be in an inconvenient place
to sleep.
(3) Each afs_cell is given its own timer. The timer is used to expire the
cell record after a period of unuse if not otherwise pinned and can
also be used for other maintenance tasks if necessary (of which there
are currently none as DNS refresh is triggered by filesystem
operations).
(4) The afs_cell manager work item (cell->manager) is no longer given a
ref on the cell when queued; rather, the manager must be deleted.
This does away with the need to deal with the consequences of losing a
race to queue cell->manager. Clean up of extra queuing is deferred to
the destroyer.
(5) The cell destroyer work item makes sure the cell timer is removed and
that the normal cell work is cancelled before farming the actual
destruction off to RCU.
(6) When a network namespace is destroyed or the kafs module is unloaded,
it's now a simple matter of marking the namespace as dead then just
waking up all the cell work items. They will then remove and destroy
themselves once all remaining activity counts and/or a ref counts are
dropped. This makes sure that all server records are dropped first.
(7) The cell record state set is reduced to just four states: SETTING_UP,
ACTIVE, REMOVING and DEAD. The record persists in the active state
even when it's not being used until the time comes to remove it rather
than downgrading it to an inactive state from whence it can be
restored.
This means that the cell still appears in /proc and /afs when not in
use until it switches to the REMOVING state - at which point it is
removed.
Note that the REMOVING state is included so that someone wanting to
resurrect the cell record is forced to wait whilst the cell is torn
down in that state. Once it's in the DEAD state, it has been removed
from net->cells tree and is no longer findable and can be replaced.
Signed-off-by: David Howells <dhowells@redhat.com>
cc: Marc Dionne <marc.dionne@auristor.com>
cc: linux-afs@lists.infradead.org
cc: linux-fsdevel@vger.kernel.org
Link: https://lore.kernel.org/r/20250224234154.2014840-16-dhowells@redhat.com/ # v1
Link: https://lore.kernel.org/r/20250310094206.801057-12-dhowells@redhat.com/ # v4
2025-02-24 16:06:03 +00:00
|
|
|
EM(afs_cell_trace_destroy, "DESTROY ") \
|
2020-10-13 20:51:59 +01:00
|
|
|
EM(afs_cell_trace_free, "FREE ") \
|
2025-01-07 18:34:51 +00:00
|
|
|
EM(afs_cell_trace_get_atcell, "GET atcell") \
|
2025-02-18 19:22:48 +00:00
|
|
|
EM(afs_cell_trace_get_server, "GET server") \
|
2020-10-13 20:51:59 +01:00
|
|
|
EM(afs_cell_trace_get_vol, "GET vol ") \
|
afs: Simplify cell record handling
Simplify afs_cell record handling to avoid very occasional races that cause
module removal to hang (it waits for all cell records to be removed).
There are two things that particularly contribute to the difficulty:
firstly, the code tries to pass a ref on the cell to the cell's maintenance
work item (which gets awkward if the work item is already queued); and,
secondly, there's an overall cell manager that tries to use just one timer
for the entire cell collection (to avoid having loads of timers). However,
both of these are probably unnecessarily restrictive.
To simplify this, the following changes are made:
(1) The cell record collection manager is removed. Each cell record
manages itself individually.
(2) Each afs_cell is given a second work item (cell->destroyer) that is
queued when its refcount reaches zero. This is not done in the
context of the putting thread as it might be in an inconvenient place
to sleep.
(3) Each afs_cell is given its own timer. The timer is used to expire the
cell record after a period of unuse if not otherwise pinned and can
also be used for other maintenance tasks if necessary (of which there
are currently none as DNS refresh is triggered by filesystem
operations).
(4) The afs_cell manager work item (cell->manager) is no longer given a
ref on the cell when queued; rather, the manager must be deleted.
This does away with the need to deal with the consequences of losing a
race to queue cell->manager. Clean up of extra queuing is deferred to
the destroyer.
(5) The cell destroyer work item makes sure the cell timer is removed and
that the normal cell work is cancelled before farming the actual
destruction off to RCU.
(6) When a network namespace is destroyed or the kafs module is unloaded,
it's now a simple matter of marking the namespace as dead then just
waking up all the cell work items. They will then remove and destroy
themselves once all remaining activity counts and/or a ref counts are
dropped. This makes sure that all server records are dropped first.
(7) The cell record state set is reduced to just four states: SETTING_UP,
ACTIVE, REMOVING and DEAD. The record persists in the active state
even when it's not being used until the time comes to remove it rather
than downgrading it to an inactive state from whence it can be
restored.
This means that the cell still appears in /proc and /afs when not in
use until it switches to the REMOVING state - at which point it is
removed.
Note that the REMOVING state is included so that someone wanting to
resurrect the cell record is forced to wait whilst the cell is torn
down in that state. Once it's in the DEAD state, it has been removed
from net->cells tree and is no longer findable and can be replaced.
Signed-off-by: David Howells <dhowells@redhat.com>
cc: Marc Dionne <marc.dionne@auristor.com>
cc: linux-afs@lists.infradead.org
cc: linux-fsdevel@vger.kernel.org
Link: https://lore.kernel.org/r/20250224234154.2014840-16-dhowells@redhat.com/ # v1
Link: https://lore.kernel.org/r/20250310094206.801057-12-dhowells@redhat.com/ # v4
2025-02-24 16:06:03 +00:00
|
|
|
EM(afs_cell_trace_purge, "PURGE ") \
|
2025-01-07 18:34:51 +00:00
|
|
|
EM(afs_cell_trace_put_atcell, "PUT atcell") \
|
2020-10-13 20:51:59 +01:00
|
|
|
EM(afs_cell_trace_put_candidate, "PUT candid") \
|
afs: Simplify cell record handling
Simplify afs_cell record handling to avoid very occasional races that cause
module removal to hang (it waits for all cell records to be removed).
There are two things that particularly contribute to the difficulty:
firstly, the code tries to pass a ref on the cell to the cell's maintenance
work item (which gets awkward if the work item is already queued); and,
secondly, there's an overall cell manager that tries to use just one timer
for the entire cell collection (to avoid having loads of timers). However,
both of these are probably unnecessarily restrictive.
To simplify this, the following changes are made:
(1) The cell record collection manager is removed. Each cell record
manages itself individually.
(2) Each afs_cell is given a second work item (cell->destroyer) that is
queued when its refcount reaches zero. This is not done in the
context of the putting thread as it might be in an inconvenient place
to sleep.
(3) Each afs_cell is given its own timer. The timer is used to expire the
cell record after a period of unuse if not otherwise pinned and can
also be used for other maintenance tasks if necessary (of which there
are currently none as DNS refresh is triggered by filesystem
operations).
(4) The afs_cell manager work item (cell->manager) is no longer given a
ref on the cell when queued; rather, the manager must be deleted.
This does away with the need to deal with the consequences of losing a
race to queue cell->manager. Clean up of extra queuing is deferred to
the destroyer.
(5) The cell destroyer work item makes sure the cell timer is removed and
that the normal cell work is cancelled before farming the actual
destruction off to RCU.
(6) When a network namespace is destroyed or the kafs module is unloaded,
it's now a simple matter of marking the namespace as dead then just
waking up all the cell work items. They will then remove and destroy
themselves once all remaining activity counts and/or a ref counts are
dropped. This makes sure that all server records are dropped first.
(7) The cell record state set is reduced to just four states: SETTING_UP,
ACTIVE, REMOVING and DEAD. The record persists in the active state
even when it's not being used until the time comes to remove it rather
than downgrading it to an inactive state from whence it can be
restored.
This means that the cell still appears in /proc and /afs when not in
use until it switches to the REMOVING state - at which point it is
removed.
Note that the REMOVING state is included so that someone wanting to
resurrect the cell record is forced to wait whilst the cell is torn
down in that state. Once it's in the DEAD state, it has been removed
from net->cells tree and is no longer findable and can be replaced.
Signed-off-by: David Howells <dhowells@redhat.com>
cc: Marc Dionne <marc.dionne@auristor.com>
cc: linux-afs@lists.infradead.org
cc: linux-fsdevel@vger.kernel.org
Link: https://lore.kernel.org/r/20250224234154.2014840-16-dhowells@redhat.com/ # v1
Link: https://lore.kernel.org/r/20250310094206.801057-12-dhowells@redhat.com/ # v4
2025-02-24 16:06:03 +00:00
|
|
|
EM(afs_cell_trace_put_final, "PUT final ") \
|
2025-02-18 19:22:48 +00:00
|
|
|
EM(afs_cell_trace_put_server, "PUT server") \
|
2020-10-13 20:51:59 +01:00
|
|
|
EM(afs_cell_trace_put_vol, "PUT vol ") \
|
afs: Simplify cell record handling
Simplify afs_cell record handling to avoid very occasional races that cause
module removal to hang (it waits for all cell records to be removed).
There are two things that particularly contribute to the difficulty:
firstly, the code tries to pass a ref on the cell to the cell's maintenance
work item (which gets awkward if the work item is already queued); and,
secondly, there's an overall cell manager that tries to use just one timer
for the entire cell collection (to avoid having loads of timers). However,
both of these are probably unnecessarily restrictive.
To simplify this, the following changes are made:
(1) The cell record collection manager is removed. Each cell record
manages itself individually.
(2) Each afs_cell is given a second work item (cell->destroyer) that is
queued when its refcount reaches zero. This is not done in the
context of the putting thread as it might be in an inconvenient place
to sleep.
(3) Each afs_cell is given its own timer. The timer is used to expire the
cell record after a period of unuse if not otherwise pinned and can
also be used for other maintenance tasks if necessary (of which there
are currently none as DNS refresh is triggered by filesystem
operations).
(4) The afs_cell manager work item (cell->manager) is no longer given a
ref on the cell when queued; rather, the manager must be deleted.
This does away with the need to deal with the consequences of losing a
race to queue cell->manager. Clean up of extra queuing is deferred to
the destroyer.
(5) The cell destroyer work item makes sure the cell timer is removed and
that the normal cell work is cancelled before farming the actual
destruction off to RCU.
(6) When a network namespace is destroyed or the kafs module is unloaded,
it's now a simple matter of marking the namespace as dead then just
waking up all the cell work items. They will then remove and destroy
themselves once all remaining activity counts and/or a ref counts are
dropped. This makes sure that all server records are dropped first.
(7) The cell record state set is reduced to just four states: SETTING_UP,
ACTIVE, REMOVING and DEAD. The record persists in the active state
even when it's not being used until the time comes to remove it rather
than downgrading it to an inactive state from whence it can be
restored.
This means that the cell still appears in /proc and /afs when not in
use until it switches to the REMOVING state - at which point it is
removed.
Note that the REMOVING state is included so that someone wanting to
resurrect the cell record is forced to wait whilst the cell is torn
down in that state. Once it's in the DEAD state, it has been removed
from net->cells tree and is no longer findable and can be replaced.
Signed-off-by: David Howells <dhowells@redhat.com>
cc: Marc Dionne <marc.dionne@auristor.com>
cc: linux-afs@lists.infradead.org
cc: linux-fsdevel@vger.kernel.org
Link: https://lore.kernel.org/r/20250224234154.2014840-16-dhowells@redhat.com/ # v1
Link: https://lore.kernel.org/r/20250310094206.801057-12-dhowells@redhat.com/ # v4
2025-02-24 16:06:03 +00:00
|
|
|
EM(afs_cell_trace_queue_again, "QUE again ") \
|
|
|
|
EM(afs_cell_trace_queue_dns, "QUE dns ") \
|
|
|
|
EM(afs_cell_trace_queue_new, "QUE new ") \
|
|
|
|
EM(afs_cell_trace_queue_purge, "QUE purge ") \
|
|
|
|
EM(afs_cell_trace_manage, "MANAGE ") \
|
|
|
|
EM(afs_cell_trace_managed, "MANAGED ") \
|
2020-10-13 20:51:59 +01:00
|
|
|
EM(afs_cell_trace_see_source, "SEE source") \
|
afs: Simplify cell record handling
Simplify afs_cell record handling to avoid very occasional races that cause
module removal to hang (it waits for all cell records to be removed).
There are two things that particularly contribute to the difficulty:
firstly, the code tries to pass a ref on the cell to the cell's maintenance
work item (which gets awkward if the work item is already queued); and,
secondly, there's an overall cell manager that tries to use just one timer
for the entire cell collection (to avoid having loads of timers). However,
both of these are probably unnecessarily restrictive.
To simplify this, the following changes are made:
(1) The cell record collection manager is removed. Each cell record
manages itself individually.
(2) Each afs_cell is given a second work item (cell->destroyer) that is
queued when its refcount reaches zero. This is not done in the
context of the putting thread as it might be in an inconvenient place
to sleep.
(3) Each afs_cell is given its own timer. The timer is used to expire the
cell record after a period of unuse if not otherwise pinned and can
also be used for other maintenance tasks if necessary (of which there
are currently none as DNS refresh is triggered by filesystem
operations).
(4) The afs_cell manager work item (cell->manager) is no longer given a
ref on the cell when queued; rather, the manager must be deleted.
This does away with the need to deal with the consequences of losing a
race to queue cell->manager. Clean up of extra queuing is deferred to
the destroyer.
(5) The cell destroyer work item makes sure the cell timer is removed and
that the normal cell work is cancelled before farming the actual
destruction off to RCU.
(6) When a network namespace is destroyed or the kafs module is unloaded,
it's now a simple matter of marking the namespace as dead then just
waking up all the cell work items. They will then remove and destroy
themselves once all remaining activity counts and/or a ref counts are
dropped. This makes sure that all server records are dropped first.
(7) The cell record state set is reduced to just four states: SETTING_UP,
ACTIVE, REMOVING and DEAD. The record persists in the active state
even when it's not being used until the time comes to remove it rather
than downgrading it to an inactive state from whence it can be
restored.
This means that the cell still appears in /proc and /afs when not in
use until it switches to the REMOVING state - at which point it is
removed.
Note that the REMOVING state is included so that someone wanting to
resurrect the cell record is forced to wait whilst the cell is torn
down in that state. Once it's in the DEAD state, it has been removed
from net->cells tree and is no longer findable and can be replaced.
Signed-off-by: David Howells <dhowells@redhat.com>
cc: Marc Dionne <marc.dionne@auristor.com>
cc: linux-afs@lists.infradead.org
cc: linux-fsdevel@vger.kernel.org
Link: https://lore.kernel.org/r/20250224234154.2014840-16-dhowells@redhat.com/ # v1
Link: https://lore.kernel.org/r/20250310094206.801057-12-dhowells@redhat.com/ # v4
2025-02-24 16:06:03 +00:00
|
|
|
EM(afs_cell_trace_see_mgmt_timer, "SEE mtimer") \
|
2020-10-13 20:51:59 +01:00
|
|
|
EM(afs_cell_trace_unuse_alias, "UNU alias ") \
|
|
|
|
EM(afs_cell_trace_unuse_check_alias, "UNU chk-al") \
|
|
|
|
EM(afs_cell_trace_unuse_delete, "UNU delete") \
|
2025-02-24 09:52:58 +00:00
|
|
|
EM(afs_cell_trace_unuse_dynroot_mntpt, "UNU dyn-mp") \
|
2020-10-13 20:51:59 +01:00
|
|
|
EM(afs_cell_trace_unuse_fc, "UNU fc ") \
|
2025-02-24 09:52:58 +00:00
|
|
|
EM(afs_cell_trace_unuse_lookup_dynroot, "UNU lu-dyn") \
|
2025-02-24 10:54:04 +00:00
|
|
|
EM(afs_cell_trace_unuse_lookup_error, "UNU lu-err") \
|
2020-10-13 20:51:59 +01:00
|
|
|
EM(afs_cell_trace_unuse_mntpt, "UNU mntpt ") \
|
2023-02-23 15:24:24 +00:00
|
|
|
EM(afs_cell_trace_unuse_no_pin, "UNU no-pin") \
|
2020-10-13 20:51:59 +01:00
|
|
|
EM(afs_cell_trace_unuse_parse, "UNU parse ") \
|
|
|
|
EM(afs_cell_trace_unuse_pin, "UNU pin ") \
|
|
|
|
EM(afs_cell_trace_unuse_sbi, "UNU sbi ") \
|
|
|
|
EM(afs_cell_trace_unuse_ws, "UNU ws ") \
|
|
|
|
EM(afs_cell_trace_use_alias, "USE alias ") \
|
|
|
|
EM(afs_cell_trace_use_check_alias, "USE chk-al") \
|
|
|
|
EM(afs_cell_trace_use_fc, "USE fc ") \
|
|
|
|
EM(afs_cell_trace_use_fc_alias, "USE fc-al ") \
|
2025-02-24 10:37:56 +00:00
|
|
|
EM(afs_cell_trace_use_lookup_add, "USE lu-add") \
|
|
|
|
EM(afs_cell_trace_use_lookup_canonical, "USE lu-can") \
|
|
|
|
EM(afs_cell_trace_use_lookup_dynroot, "USE lu-dyn") \
|
|
|
|
EM(afs_cell_trace_use_lookup_mntpt, "USE lu-mpt") \
|
|
|
|
EM(afs_cell_trace_use_lookup_mount, "USE lu-mnt") \
|
|
|
|
EM(afs_cell_trace_use_lookup_ws, "USE lu-ws ") \
|
2020-10-13 20:51:59 +01:00
|
|
|
EM(afs_cell_trace_use_mntpt, "USE mntpt ") \
|
|
|
|
EM(afs_cell_trace_use_pin, "USE pin ") \
|
|
|
|
EM(afs_cell_trace_use_probe, "USE probe ") \
|
|
|
|
EM(afs_cell_trace_use_sbi, "USE sbi ") \
|
|
|
|
E_(afs_cell_trace_wait, "WAIT ")
|
|
|
|
|
2023-10-19 13:59:03 +01:00
|
|
|
#define afs_alist_traces \
|
|
|
|
EM(afs_alist_trace_alloc, "ALLOC ") \
|
2023-10-31 16:30:37 +00:00
|
|
|
EM(afs_alist_trace_get_estate, "GET estate") \
|
2023-10-20 16:13:03 +01:00
|
|
|
EM(afs_alist_trace_get_vlgetcaps, "GET vgtcap") \
|
|
|
|
EM(afs_alist_trace_get_vlprobe, "GET vprobe") \
|
2023-10-19 13:59:03 +01:00
|
|
|
EM(afs_alist_trace_get_vlrotate_set, "GET vl-rot") \
|
2023-10-31 16:30:37 +00:00
|
|
|
EM(afs_alist_trace_put_estate, "PUT estate") \
|
2023-10-19 13:59:03 +01:00
|
|
|
EM(afs_alist_trace_put_getaddru, "PUT GtAdrU") \
|
|
|
|
EM(afs_alist_trace_put_parse_empty, "PUT p-empt") \
|
|
|
|
EM(afs_alist_trace_put_parse_error, "PUT p-err ") \
|
afs: Fix afs_server ref accounting
The current way that afs_server refs are accounted and cleaned up sometimes
cause rmmod to hang when it is waiting for cell records to be removed. The
problem is that the cell cleanup might occasionally happen before the
server cleanup and then there's nothing that causes the cell to
garbage-collect the remaining servers as they become inactive.
Partially fix this by:
(1) Give each afs_server record its own management timer that rather than
relying on the cell manager's central timer to drive each individual
cell's maintenance work item to garbage collect servers.
This timer is set when afs_unuse_server() reduces a server's activity
count to zero and will schedule the server's destroyer work item upon
firing.
(2) Give each afs_server record its own destroyer work item that removes
the record from the cell's database, shuts down the timer, cancels any
pending work for itself, sends an RPC to the server to cancel
outstanding callbacks.
This change, in combination with the timer, obviates the need to try
and coordinate so closely between the cell record and a bunch of other
server records to try and tear everything down in a coordinated
fashion. With this, the cell record is pinned until the server RCU is
complete and namespace/module removal will wait until all the cell
records are removed.
(3) Now that incoming calls are mapped to servers (and thus cells) using
data attached to an rxrpc_peer, the UUID-to-server mapping tree is
moved from the namespace to the cell (cell->fs_servers). This means
there can no longer be duplicates therein - and that allows the
mapping tree to be simpler as there doesn't need to be a chain of
same-UUID servers that are in different cells.
(4) The lock protecting the UUID mapping tree is switched to an
rw_semaphore on the cell rather than a seqlock on the namespace as
it's now only used during mounting in contexts in which we're allowed
to sleep.
(5) When it comes time for a cell that is being removed to purge its set
of servers, it just needs to iterate over them and wake them up. Once
a server becomes inactive, its destroyer work item will observe the
state of the cell and immediately remove that record.
(6) When a server record is removed, it is marked AFS_SERVER_FL_EXPIRED to
prevent reattempts at removal. The record will be dispatched to RCU
for destruction once its refcount reaches 0.
(7) The AFS_SERVER_FL_UNCREATED/CREATING flags are used to synchronise
simultaneous creation attempts. If one attempt fails, it will abandon
the attempt and allow another to try again.
Note that the record can't just be abandoned when dead as it's bound
into a server list attached to a volume and only subject to
replacement if the server list obtained for the volume from the VLDB
changes.
Signed-off-by: David Howells <dhowells@redhat.com>
cc: Marc Dionne <marc.dionne@auristor.com>
cc: linux-afs@lists.infradead.org
cc: linux-fsdevel@vger.kernel.org
Link: https://lore.kernel.org/r/20250224234154.2014840-15-dhowells@redhat.com/ # v1
Link: https://lore.kernel.org/r/20250310094206.801057-11-dhowells@redhat.com/ # v4
2025-02-24 16:51:36 +00:00
|
|
|
EM(afs_alist_trace_put_server_create, "PUT sv-crt") \
|
2023-10-19 13:59:03 +01:00
|
|
|
EM(afs_alist_trace_put_server_oom, "PUT sv-oom") \
|
|
|
|
EM(afs_alist_trace_put_server_update, "PUT sv-upd") \
|
2023-10-20 16:13:03 +01:00
|
|
|
EM(afs_alist_trace_put_vlgetcaps, "PUT vgtcap") \
|
|
|
|
EM(afs_alist_trace_put_vlprobe, "PUT vprobe") \
|
|
|
|
EM(afs_alist_trace_put_vlrotate_end, "PUT vr-end") \
|
|
|
|
EM(afs_alist_trace_put_vlrotate_fail, "PUT vr-fai") \
|
|
|
|
EM(afs_alist_trace_put_vlrotate_next, "PUT vr-nxt") \
|
|
|
|
EM(afs_alist_trace_put_vlrotate_restart,"PUT vr-rst") \
|
2023-10-19 13:59:03 +01:00
|
|
|
EM(afs_alist_trace_put_vlserver, "PUT vlsrvr") \
|
|
|
|
EM(afs_alist_trace_put_vlserver_old, "PUT vs-old") \
|
|
|
|
E_(afs_alist_trace_free, "FREE ")
|
|
|
|
|
2023-10-31 16:30:37 +00:00
|
|
|
#define afs_estate_traces \
|
|
|
|
EM(afs_estate_trace_alloc_probe, "ALLOC prob") \
|
|
|
|
EM(afs_estate_trace_alloc_server, "ALLOC srvr") \
|
2023-10-18 09:24:01 +01:00
|
|
|
EM(afs_estate_trace_get_server_state, "GET srv-st") \
|
2023-10-31 16:30:37 +00:00
|
|
|
EM(afs_estate_trace_get_getcaps, "GET getcap") \
|
|
|
|
EM(afs_estate_trace_put_getcaps, "PUT getcap") \
|
|
|
|
EM(afs_estate_trace_put_probe, "PUT probe ") \
|
|
|
|
EM(afs_estate_trace_put_server, "PUT server") \
|
2023-10-18 09:24:01 +01:00
|
|
|
EM(afs_estate_trace_put_server_state, "PUT srv-st") \
|
2023-10-31 16:30:37 +00:00
|
|
|
E_(afs_estate_trace_free, "FREE ")
|
|
|
|
|
2017-11-02 15:27:51 +00:00
|
|
|
#define afs_fs_operations \
|
|
|
|
EM(afs_FS_FetchData, "FS.FetchData") \
|
|
|
|
EM(afs_FS_FetchStatus, "FS.FetchStatus") \
|
|
|
|
EM(afs_FS_StoreData, "FS.StoreData") \
|
|
|
|
EM(afs_FS_StoreStatus, "FS.StoreStatus") \
|
|
|
|
EM(afs_FS_RemoveFile, "FS.RemoveFile") \
|
|
|
|
EM(afs_FS_CreateFile, "FS.CreateFile") \
|
|
|
|
EM(afs_FS_Rename, "FS.Rename") \
|
|
|
|
EM(afs_FS_Symlink, "FS.Symlink") \
|
|
|
|
EM(afs_FS_Link, "FS.Link") \
|
|
|
|
EM(afs_FS_MakeDir, "FS.MakeDir") \
|
|
|
|
EM(afs_FS_RemoveDir, "FS.RemoveDir") \
|
|
|
|
EM(afs_FS_GetVolumeInfo, "FS.GetVolumeInfo") \
|
|
|
|
EM(afs_FS_GetVolumeStatus, "FS.GetVolumeStatus") \
|
|
|
|
EM(afs_FS_GetRootVolume, "FS.GetRootVolume") \
|
|
|
|
EM(afs_FS_SetLock, "FS.SetLock") \
|
|
|
|
EM(afs_FS_ExtendLock, "FS.ExtendLock") \
|
|
|
|
EM(afs_FS_ReleaseLock, "FS.ReleaseLock") \
|
|
|
|
EM(afs_FS_Lookup, "FS.Lookup") \
|
2018-04-09 21:12:31 +01:00
|
|
|
EM(afs_FS_InlineBulkStatus, "FS.InlineBulkStatus") \
|
2017-11-02 15:27:51 +00:00
|
|
|
EM(afs_FS_FetchData64, "FS.FetchData64") \
|
|
|
|
EM(afs_FS_StoreData64, "FS.StoreData64") \
|
|
|
|
EM(afs_FS_GiveUpAllCallBacks, "FS.GiveUpAllCallBacks") \
|
2018-10-20 00:57:58 +01:00
|
|
|
EM(afs_FS_GetCapabilities, "FS.GetCapabilities") \
|
|
|
|
EM(yfs_FS_FetchACL, "YFS.FetchACL") \
|
|
|
|
EM(yfs_FS_FetchStatus, "YFS.FetchStatus") \
|
|
|
|
EM(yfs_FS_StoreACL, "YFS.StoreACL") \
|
|
|
|
EM(yfs_FS_StoreStatus, "YFS.StoreStatus") \
|
|
|
|
EM(yfs_FS_RemoveFile, "YFS.RemoveFile") \
|
|
|
|
EM(yfs_FS_CreateFile, "YFS.CreateFile") \
|
|
|
|
EM(yfs_FS_Rename, "YFS.Rename") \
|
|
|
|
EM(yfs_FS_Symlink, "YFS.Symlink") \
|
|
|
|
EM(yfs_FS_Link, "YFS.Link") \
|
|
|
|
EM(yfs_FS_MakeDir, "YFS.MakeDir") \
|
|
|
|
EM(yfs_FS_RemoveDir, "YFS.RemoveDir") \
|
|
|
|
EM(yfs_FS_GetVolumeStatus, "YFS.GetVolumeStatus") \
|
|
|
|
EM(yfs_FS_SetVolumeStatus, "YFS.SetVolumeStatus") \
|
|
|
|
EM(yfs_FS_SetLock, "YFS.SetLock") \
|
|
|
|
EM(yfs_FS_ExtendLock, "YFS.ExtendLock") \
|
|
|
|
EM(yfs_FS_ReleaseLock, "YFS.ReleaseLock") \
|
|
|
|
EM(yfs_FS_Lookup, "YFS.Lookup") \
|
|
|
|
EM(yfs_FS_FlushCPS, "YFS.FlushCPS") \
|
|
|
|
EM(yfs_FS_FetchOpaqueACL, "YFS.FetchOpaqueACL") \
|
|
|
|
EM(yfs_FS_WhoAmI, "YFS.WhoAmI") \
|
|
|
|
EM(yfs_FS_RemoveACL, "YFS.RemoveACL") \
|
|
|
|
EM(yfs_FS_RemoveFile2, "YFS.RemoveFile2") \
|
|
|
|
EM(yfs_FS_StoreOpaqueACL2, "YFS.StoreOpaqueACL2") \
|
|
|
|
EM(yfs_FS_InlineBulkStatus, "YFS.InlineBulkStatus") \
|
|
|
|
EM(yfs_FS_FetchData64, "YFS.FetchData64") \
|
|
|
|
EM(yfs_FS_StoreData64, "YFS.StoreData64") \
|
|
|
|
E_(yfs_FS_UpdateSymlink, "YFS.UpdateSymlink")
|
2017-11-02 15:27:51 +00:00
|
|
|
|
|
|
|
#define afs_vl_operations \
|
|
|
|
EM(afs_VL_GetEntryByNameU, "VL.GetEntryByNameU") \
|
|
|
|
EM(afs_VL_GetAddrsU, "VL.GetAddrsU") \
|
|
|
|
EM(afs_YFSVL_GetEndpoints, "YFSVL.GetEndpoints") \
|
2020-04-29 17:26:41 +01:00
|
|
|
EM(afs_YFSVL_GetCellName, "YFSVL.GetCellName") \
|
2017-11-02 15:27:51 +00:00
|
|
|
E_(afs_VL_GetCapabilities, "VL.GetCapabilities")
|
|
|
|
|
2021-06-15 11:57:26 +01:00
|
|
|
#define afs_cm_operations \
|
|
|
|
EM(afs_CB_CallBack, "CB.CallBack") \
|
|
|
|
EM(afs_CB_InitCallBackState, "CB.InitCallBackState") \
|
|
|
|
EM(afs_CB_Probe, "CB.Probe") \
|
|
|
|
EM(afs_CB_GetLock, "CB.GetLock") \
|
|
|
|
EM(afs_CB_GetCE, "CB.GetCE") \
|
|
|
|
EM(afs_CB_GetXStatsVersion, "CB.GetXStatsVersion") \
|
|
|
|
EM(afs_CB_GetXStats, "CB.GetXStats") \
|
|
|
|
EM(afs_CB_InitCallBackState3, "CB.InitCallBackState3") \
|
|
|
|
E_(afs_CB_ProbeUuid, "CB.ProbeUuid")
|
|
|
|
|
|
|
|
#define yfs_cm_operations \
|
|
|
|
EM(yfs_CB_Probe, "YFSCB.Probe") \
|
|
|
|
EM(yfs_CB_GetLock, "YFSCB.GetLock") \
|
|
|
|
EM(yfs_CB_XStatsVersion, "YFSCB.XStatsVersion") \
|
|
|
|
EM(yfs_CB_GetXStats, "YFSCB.GetXStats") \
|
|
|
|
EM(yfs_CB_InitCallBackState3, "YFSCB.InitCallBackState3") \
|
|
|
|
EM(yfs_CB_ProbeUuid, "YFSCB.ProbeUuid") \
|
|
|
|
EM(yfs_CB_GetServerPrefs, "YFSCB.GetServerPrefs") \
|
|
|
|
EM(yfs_CB_GetCellServDV, "YFSCB.GetCellServDV") \
|
|
|
|
EM(yfs_CB_GetLocalCell, "YFSCB.GetLocalCell") \
|
|
|
|
EM(yfs_CB_GetCacheConfig, "YFSCB.GetCacheConfig") \
|
|
|
|
EM(yfs_CB_GetCellByNum, "YFSCB.GetCellByNum") \
|
|
|
|
EM(yfs_CB_TellMeAboutYourself, "YFSCB.TellMeAboutYourself") \
|
|
|
|
E_(yfs_CB_CallBack, "YFSCB.CallBack")
|
|
|
|
|
2024-12-16 20:41:07 +00:00
|
|
|
#define afs_cb_promise_traces \
|
|
|
|
EM(afs_cb_promise_clear_cb_break, "CLEAR cb-break") \
|
|
|
|
EM(afs_cb_promise_clear_rmdir, "CLEAR rmdir") \
|
|
|
|
EM(afs_cb_promise_clear_rotate_server, "CLEAR rot-srv") \
|
|
|
|
EM(afs_cb_promise_clear_server_change, "CLEAR srv-chg") \
|
|
|
|
EM(afs_cb_promise_clear_vol_init_cb, "CLEAR vol-init-cb") \
|
|
|
|
EM(afs_cb_promise_set_apply_cb, "SET apply-cb") \
|
|
|
|
EM(afs_cb_promise_set_new_inode, "SET new-inode") \
|
|
|
|
E_(afs_cb_promise_set_new_symlink, "SET new-symlink")
|
|
|
|
|
|
|
|
#define afs_vnode_invalid_traces \
|
|
|
|
EM(afs_vnode_invalid_trace_cb_ro_snapshot, "cb-ro-snapshot") \
|
|
|
|
EM(afs_vnode_invalid_trace_cb_scrub, "cb-scrub") \
|
|
|
|
EM(afs_vnode_invalid_trace_cb_v_break, "cb-v-break") \
|
|
|
|
EM(afs_vnode_invalid_trace_expired, "expired") \
|
|
|
|
EM(afs_vnode_invalid_trace_no_cb_promise, "no-cb-promise") \
|
|
|
|
EM(afs_vnode_invalid_trace_vol_expired, "vol-expired") \
|
|
|
|
EM(afs_vnode_invalid_trace_zap_data, "zap-data") \
|
|
|
|
E_(afs_vnode_valid_trace, "valid")
|
|
|
|
|
|
|
|
#define afs_dir_invalid_traces \
|
|
|
|
EM(afs_dir_invalid_edit_add_bad_size, "edit-add-bad-size") \
|
|
|
|
EM(afs_dir_invalid_edit_add_no_slots, "edit-add-no-slots") \
|
|
|
|
EM(afs_dir_invalid_edit_add_too_many_blocks, "edit-add-too-many-blocks") \
|
|
|
|
EM(afs_dir_invalid_edit_get_block, "edit-get-block") \
|
2024-12-16 20:41:18 +00:00
|
|
|
EM(afs_dir_invalid_edit_mkdir, "edit-mkdir") \
|
2024-12-16 20:41:07 +00:00
|
|
|
EM(afs_dir_invalid_edit_rem_bad_size, "edit-rem-bad-size") \
|
|
|
|
EM(afs_dir_invalid_edit_rem_wrong_name, "edit-rem-wrong_name") \
|
|
|
|
EM(afs_dir_invalid_edit_upd_bad_size, "edit-upd-bad-size") \
|
|
|
|
EM(afs_dir_invalid_edit_upd_no_dd, "edit-upd-no-dotdot") \
|
|
|
|
EM(afs_dir_invalid_dv_mismatch, "dv-mismatch") \
|
|
|
|
EM(afs_dir_invalid_inval_folio, "inv-folio") \
|
|
|
|
EM(afs_dir_invalid_iter_stale, "iter-stale") \
|
|
|
|
EM(afs_dir_invalid_reclaimed_folio, "reclaimed-folio") \
|
|
|
|
EM(afs_dir_invalid_release_folio, "rel-folio") \
|
|
|
|
EM(afs_dir_invalid_remote, "remote") \
|
|
|
|
E_(afs_dir_invalid_subdir_removed, "subdir-removed")
|
|
|
|
|
2018-04-06 14:17:25 +01:00
|
|
|
#define afs_edit_dir_ops \
|
|
|
|
EM(afs_edit_dir_create, "create") \
|
|
|
|
EM(afs_edit_dir_create_error, "c_fail") \
|
|
|
|
EM(afs_edit_dir_create_inval, "c_invl") \
|
|
|
|
EM(afs_edit_dir_create_nospc, "c_nspc") \
|
|
|
|
EM(afs_edit_dir_delete, "delete") \
|
|
|
|
EM(afs_edit_dir_delete_error, "d_err ") \
|
|
|
|
EM(afs_edit_dir_delete_inval, "d_invl") \
|
2024-10-23 11:40:10 +01:00
|
|
|
EM(afs_edit_dir_delete_noent, "d_nent") \
|
2024-12-16 20:41:18 +00:00
|
|
|
EM(afs_edit_dir_mkdir, "mk_ent") \
|
2024-10-23 11:40:10 +01:00
|
|
|
EM(afs_edit_dir_update_dd, "u_ddot") \
|
|
|
|
EM(afs_edit_dir_update_error, "u_fail") \
|
|
|
|
EM(afs_edit_dir_update_inval, "u_invl") \
|
|
|
|
E_(afs_edit_dir_update_nodd, "u_nodd")
|
2018-04-06 14:17:25 +01:00
|
|
|
|
|
|
|
#define afs_edit_dir_reasons \
|
|
|
|
EM(afs_edit_dir_for_create, "Create") \
|
|
|
|
EM(afs_edit_dir_for_link, "Link ") \
|
|
|
|
EM(afs_edit_dir_for_mkdir, "MkDir ") \
|
2019-04-25 14:26:51 +01:00
|
|
|
EM(afs_edit_dir_for_rename_0, "Renam0") \
|
|
|
|
EM(afs_edit_dir_for_rename_1, "Renam1") \
|
|
|
|
EM(afs_edit_dir_for_rename_2, "Renam2") \
|
2024-10-23 11:40:10 +01:00
|
|
|
EM(afs_edit_dir_for_rename_sub, "RnmSub") \
|
2018-04-06 14:17:25 +01:00
|
|
|
EM(afs_edit_dir_for_rmdir, "RmDir ") \
|
2019-04-25 14:26:51 +01:00
|
|
|
EM(afs_edit_dir_for_silly_0, "S_Ren0") \
|
|
|
|
EM(afs_edit_dir_for_silly_1, "S_Ren1") \
|
2018-04-06 14:17:25 +01:00
|
|
|
EM(afs_edit_dir_for_symlink, "Symlnk") \
|
|
|
|
E_(afs_edit_dir_for_unlink, "Unlink")
|
|
|
|
|
2018-10-20 00:57:56 +01:00
|
|
|
#define afs_eproto_causes \
|
|
|
|
EM(afs_eproto_bad_status, "BadStatus") \
|
|
|
|
EM(afs_eproto_cb_count, "CbCount") \
|
|
|
|
EM(afs_eproto_cb_fid_count, "CbFidCount") \
|
2020-04-29 17:26:41 +01:00
|
|
|
EM(afs_eproto_cellname_len, "CellNameLen") \
|
2018-10-20 00:57:56 +01:00
|
|
|
EM(afs_eproto_file_type, "FileTYpe") \
|
|
|
|
EM(afs_eproto_ibulkst_cb_count, "IBS.CbCount") \
|
|
|
|
EM(afs_eproto_ibulkst_count, "IBS.FidCount") \
|
|
|
|
EM(afs_eproto_motd_len, "MotdLen") \
|
|
|
|
EM(afs_eproto_offline_msg_len, "OfflineMsgLen") \
|
|
|
|
EM(afs_eproto_volname_len, "VolNameLen") \
|
|
|
|
EM(afs_eproto_yvl_fsendpt4_len, "YVL.FsEnd4Len") \
|
|
|
|
EM(afs_eproto_yvl_fsendpt6_len, "YVL.FsEnd6Len") \
|
|
|
|
EM(afs_eproto_yvl_fsendpt_num, "YVL.FsEndCount") \
|
|
|
|
EM(afs_eproto_yvl_fsendpt_type, "YVL.FsEndType") \
|
|
|
|
EM(afs_eproto_yvl_vlendpt4_len, "YVL.VlEnd4Len") \
|
|
|
|
EM(afs_eproto_yvl_vlendpt6_len, "YVL.VlEnd6Len") \
|
|
|
|
E_(afs_eproto_yvl_vlendpt_type, "YVL.VlEndType")
|
|
|
|
|
2018-10-20 00:57:57 +01:00
|
|
|
#define afs_io_errors \
|
|
|
|
EM(afs_io_error_cm_reply, "CM_REPLY") \
|
|
|
|
EM(afs_io_error_extract, "EXTRACT") \
|
|
|
|
EM(afs_io_error_fs_probe_fail, "FS_PROBE_FAIL") \
|
2018-10-20 00:57:59 +01:00
|
|
|
EM(afs_io_error_vl_lookup_fail, "VL_LOOKUP_FAIL") \
|
|
|
|
E_(afs_io_error_vl_probe_fail, "VL_PROBE_FAIL")
|
2018-10-20 00:57:57 +01:00
|
|
|
|
|
|
|
#define afs_file_errors \
|
|
|
|
EM(afs_file_error_dir_bad_magic, "DIR_BAD_MAGIC") \
|
|
|
|
EM(afs_file_error_dir_big, "DIR_BIG") \
|
|
|
|
EM(afs_file_error_dir_missing_page, "DIR_MISSING_PAGE") \
|
2020-12-23 10:39:57 +00:00
|
|
|
EM(afs_file_error_dir_name_too_long, "DIR_NAME_TOO_LONG") \
|
2018-10-20 00:57:57 +01:00
|
|
|
EM(afs_file_error_dir_over_end, "DIR_ENT_OVER_END") \
|
|
|
|
EM(afs_file_error_dir_small, "DIR_SMALL") \
|
|
|
|
EM(afs_file_error_dir_unmarked_ext, "DIR_UNMARKED_EXT") \
|
2024-12-16 20:41:12 +00:00
|
|
|
EM(afs_file_error_symlink_big, "SYM_BIG") \
|
2018-10-20 00:57:57 +01:00
|
|
|
EM(afs_file_error_mntpt, "MNTPT_READ_FAILED") \
|
|
|
|
E_(afs_file_error_writeback_fail, "WRITEBACK_FAILED")
|
2017-11-02 15:27:51 +00:00
|
|
|
|
2019-04-25 14:26:50 +01:00
|
|
|
#define afs_flock_types \
|
|
|
|
EM(F_RDLCK, "RDLCK") \
|
|
|
|
EM(F_WRLCK, "WRLCK") \
|
|
|
|
E_(F_UNLCK, "UNLCK")
|
|
|
|
|
|
|
|
#define afs_flock_states \
|
|
|
|
EM(AFS_VNODE_LOCK_NONE, "NONE") \
|
|
|
|
EM(AFS_VNODE_LOCK_WAITING_FOR_CB, "WAIT_FOR_CB") \
|
|
|
|
EM(AFS_VNODE_LOCK_SETTING, "SETTING") \
|
|
|
|
EM(AFS_VNODE_LOCK_GRANTED, "GRANTED") \
|
|
|
|
EM(AFS_VNODE_LOCK_EXTENDING, "EXTENDING") \
|
|
|
|
EM(AFS_VNODE_LOCK_NEED_UNLOCK, "NEED_UNLOCK") \
|
2019-04-25 14:26:51 +01:00
|
|
|
EM(AFS_VNODE_LOCK_UNLOCKING, "UNLOCKING") \
|
|
|
|
E_(AFS_VNODE_LOCK_DELETED, "DELETED")
|
2019-04-25 14:26:50 +01:00
|
|
|
|
|
|
|
#define afs_flock_events \
|
|
|
|
EM(afs_flock_acquired, "Acquired") \
|
|
|
|
EM(afs_flock_callback_break, "Callback") \
|
|
|
|
EM(afs_flock_defer_unlock, "D-Unlock") \
|
2019-04-25 14:26:51 +01:00
|
|
|
EM(afs_flock_extend_fail, "Ext_Fail") \
|
2019-04-25 14:26:50 +01:00
|
|
|
EM(afs_flock_fail_other, "ErrOther") \
|
|
|
|
EM(afs_flock_fail_perm, "ErrPerm ") \
|
|
|
|
EM(afs_flock_no_lockers, "NoLocker") \
|
2019-04-25 14:26:51 +01:00
|
|
|
EM(afs_flock_release_fail, "Rel_Fail") \
|
2019-04-25 14:26:51 +01:00
|
|
|
EM(afs_flock_silly_delete, "SillyDel") \
|
2019-04-25 14:26:50 +01:00
|
|
|
EM(afs_flock_timestamp, "Timestmp") \
|
|
|
|
EM(afs_flock_try_to_lock, "TryToLck") \
|
|
|
|
EM(afs_flock_vfs_lock, "VFSLock ") \
|
|
|
|
EM(afs_flock_vfs_locking, "VFSLking") \
|
|
|
|
EM(afs_flock_waited, "Waited ") \
|
|
|
|
EM(afs_flock_waiting, "Waiting ") \
|
|
|
|
EM(afs_flock_work_extending, "Extendng") \
|
|
|
|
EM(afs_flock_work_retry, "Retry ") \
|
|
|
|
EM(afs_flock_work_unlocking, "Unlcking") \
|
|
|
|
E_(afs_flock_would_block, "EWOULDBL")
|
|
|
|
|
|
|
|
#define afs_flock_operations \
|
|
|
|
EM(afs_flock_op_copy_lock, "COPY ") \
|
|
|
|
EM(afs_flock_op_flock, "->flock ") \
|
|
|
|
EM(afs_flock_op_grant, "GRANT ") \
|
|
|
|
EM(afs_flock_op_lock, "->lock ") \
|
|
|
|
EM(afs_flock_op_release_lock, "RELEASE ") \
|
|
|
|
EM(afs_flock_op_return_ok, "<-OK ") \
|
|
|
|
EM(afs_flock_op_return_edeadlk, "<-EDEADL") \
|
|
|
|
EM(afs_flock_op_return_eagain, "<-EAGAIN") \
|
|
|
|
EM(afs_flock_op_return_error, "<-ERROR ") \
|
|
|
|
EM(afs_flock_op_set_lock, "SET ") \
|
|
|
|
EM(afs_flock_op_unlock, "UNLOCK ") \
|
|
|
|
E_(afs_flock_op_wake, "WAKE ")
|
|
|
|
|
2019-06-20 18:12:16 +01:00
|
|
|
#define afs_cb_break_reasons \
|
|
|
|
EM(afs_cb_break_no_break, "no-break") \
|
|
|
|
EM(afs_cb_break_for_callback, "break-cb") \
|
afs: Parse the VolSync record in the reply of a number of RPC ops
A number of fileserver RPC operations return a VolSync record as part of
their reply that gives some information about the state of the volume being
accessed, including:
(1) A volume Creation timestamp. For an RW volume, this is the time at
which the volume was created; if it changes, the RW volume was
presumably restored from a backup and all cached data should be
scrubbed as Data Version numbers could regress on the files in the
volume.
For an RO volume, this is the time it was last snapshotted from the RW
volume. It is expected to advance each time this happens; if it
regresses, cached data should be scrubbed.
(2) A volume Update timestamp (Auristor only). For an RW volume, this is
updated any time any change is made to a volume or its contents. If
it regresses, all cached data must be scrubbed.
For an RO volume, this is a copy of the RW volume's Update timestamp
at the point of snapshotting. It can be used as a version number when
checking to see if a callback on a RO volume was due to a snapshot.
If it regresses, all cached data must be scrubbed.
but this is currently not made use of by the in-kernel afs filesystem.
Make the afs filesystem use this by:
(1) Add an update time field to the afs_volsync struct and use a value of
TIME64_MIN in both that and the creation time to indicate that they
are unset.
(2) Add creation and update time fields to the afs_volume struct and use
this to track the two timestamps.
(3) Add a volsync_lock mutex to the afs_volume struct to control
modification access for when we detect a change in these values.
(3) Add a 'pre-op volsync' struct to the afs_operation struct to record
the state of the volume tracking before the op.
(4) Add a new counter, cb_scrub, to the afs_volume struct to count events
that require all data to be scrubbed. A copy is placed in the
afs_vnode struct (inode) and if they no longer match, a scrub takes
place.
(5) When the result of an operation is being parsed, parse the VolSync
data too, if it is provided. Note that the two timestamps are handled
separately, since they don't work in quite the same way.
- If the afs_volume tracking is unset, just set it and do nothing
else.
- If the result timestamps are the same as the ones in afs_volume, do
nothing.
- If the timestamps regress, increment cb_scrub if not already done
so.
- If the creation timestamp on a RW volume changes, increment cb_scrub
if not already done so.
- If the creation timestamp on a RO volume advances, update the server
list and see if the current server has been excluded, if so reissue
the op. Once over half of the replication sites have been updated,
increment cb_ro_snapshot to indicate updates may be required and
switch over to excluding unupdated replication sites.
- If the creation timestamp on a Backup volume advances, just
increment cb_ro_snapshot to trigger updates.
Signed-off-by: David Howells <dhowells@redhat.com>
cc: Marc Dionne <marc.dionne@auristor.com>
cc: linux-afs@lists.infradead.org
2023-11-05 16:11:07 +00:00
|
|
|
EM(afs_cb_break_for_creation_regress, "creation-regress") \
|
2019-06-20 18:12:16 +01:00
|
|
|
EM(afs_cb_break_for_deleted, "break-del") \
|
2021-09-02 21:51:01 +01:00
|
|
|
EM(afs_cb_break_for_s_reinit, "s-reinit") \
|
2019-06-20 18:12:16 +01:00
|
|
|
EM(afs_cb_break_for_unlink, "break-unlink") \
|
afs: Parse the VolSync record in the reply of a number of RPC ops
A number of fileserver RPC operations return a VolSync record as part of
their reply that gives some information about the state of the volume being
accessed, including:
(1) A volume Creation timestamp. For an RW volume, this is the time at
which the volume was created; if it changes, the RW volume was
presumably restored from a backup and all cached data should be
scrubbed as Data Version numbers could regress on the files in the
volume.
For an RO volume, this is the time it was last snapshotted from the RW
volume. It is expected to advance each time this happens; if it
regresses, cached data should be scrubbed.
(2) A volume Update timestamp (Auristor only). For an RW volume, this is
updated any time any change is made to a volume or its contents. If
it regresses, all cached data must be scrubbed.
For an RO volume, this is a copy of the RW volume's Update timestamp
at the point of snapshotting. It can be used as a version number when
checking to see if a callback on a RO volume was due to a snapshot.
If it regresses, all cached data must be scrubbed.
but this is currently not made use of by the in-kernel afs filesystem.
Make the afs filesystem use this by:
(1) Add an update time field to the afs_volsync struct and use a value of
TIME64_MIN in both that and the creation time to indicate that they
are unset.
(2) Add creation and update time fields to the afs_volume struct and use
this to track the two timestamps.
(3) Add a volsync_lock mutex to the afs_volume struct to control
modification access for when we detect a change in these values.
(3) Add a 'pre-op volsync' struct to the afs_operation struct to record
the state of the volume tracking before the op.
(4) Add a new counter, cb_scrub, to the afs_volume struct to count events
that require all data to be scrubbed. A copy is placed in the
afs_vnode struct (inode) and if they no longer match, a scrub takes
place.
(5) When the result of an operation is being parsed, parse the VolSync
data too, if it is provided. Note that the two timestamps are handled
separately, since they don't work in quite the same way.
- If the afs_volume tracking is unset, just set it and do nothing
else.
- If the result timestamps are the same as the ones in afs_volume, do
nothing.
- If the timestamps regress, increment cb_scrub if not already done
so.
- If the creation timestamp on a RW volume changes, increment cb_scrub
if not already done so.
- If the creation timestamp on a RO volume advances, update the server
list and see if the current server has been excluded, if so reissue
the op. Once over half of the replication sites have been updated,
increment cb_ro_snapshot to indicate updates may be required and
switch over to excluding unupdated replication sites.
- If the creation timestamp on a Backup volume advances, just
increment cb_ro_snapshot to trigger updates.
Signed-off-by: David Howells <dhowells@redhat.com>
cc: Marc Dionne <marc.dionne@auristor.com>
cc: linux-afs@lists.infradead.org
2023-11-05 16:11:07 +00:00
|
|
|
EM(afs_cb_break_for_update_regress, "update-regress") \
|
2019-06-20 18:12:16 +01:00
|
|
|
EM(afs_cb_break_for_volume_callback, "break-v-cb") \
|
afs: Parse the VolSync record in the reply of a number of RPC ops
A number of fileserver RPC operations return a VolSync record as part of
their reply that gives some information about the state of the volume being
accessed, including:
(1) A volume Creation timestamp. For an RW volume, this is the time at
which the volume was created; if it changes, the RW volume was
presumably restored from a backup and all cached data should be
scrubbed as Data Version numbers could regress on the files in the
volume.
For an RO volume, this is the time it was last snapshotted from the RW
volume. It is expected to advance each time this happens; if it
regresses, cached data should be scrubbed.
(2) A volume Update timestamp (Auristor only). For an RW volume, this is
updated any time any change is made to a volume or its contents. If
it regresses, all cached data must be scrubbed.
For an RO volume, this is a copy of the RW volume's Update timestamp
at the point of snapshotting. It can be used as a version number when
checking to see if a callback on a RO volume was due to a snapshot.
If it regresses, all cached data must be scrubbed.
but this is currently not made use of by the in-kernel afs filesystem.
Make the afs filesystem use this by:
(1) Add an update time field to the afs_volsync struct and use a value of
TIME64_MIN in both that and the creation time to indicate that they
are unset.
(2) Add creation and update time fields to the afs_volume struct and use
this to track the two timestamps.
(3) Add a volsync_lock mutex to the afs_volume struct to control
modification access for when we detect a change in these values.
(3) Add a 'pre-op volsync' struct to the afs_operation struct to record
the state of the volume tracking before the op.
(4) Add a new counter, cb_scrub, to the afs_volume struct to count events
that require all data to be scrubbed. A copy is placed in the
afs_vnode struct (inode) and if they no longer match, a scrub takes
place.
(5) When the result of an operation is being parsed, parse the VolSync
data too, if it is provided. Note that the two timestamps are handled
separately, since they don't work in quite the same way.
- If the afs_volume tracking is unset, just set it and do nothing
else.
- If the result timestamps are the same as the ones in afs_volume, do
nothing.
- If the timestamps regress, increment cb_scrub if not already done
so.
- If the creation timestamp on a RW volume changes, increment cb_scrub
if not already done so.
- If the creation timestamp on a RO volume advances, update the server
list and see if the current server has been excluded, if so reissue
the op. Once over half of the replication sites have been updated,
increment cb_ro_snapshot to indicate updates may be required and
switch over to excluding unupdated replication sites.
- If the creation timestamp on a Backup volume advances, just
increment cb_ro_snapshot to trigger updates.
Signed-off-by: David Howells <dhowells@redhat.com>
cc: Marc Dionne <marc.dionne@auristor.com>
cc: linux-afs@lists.infradead.org
2023-11-05 16:11:07 +00:00
|
|
|
EM(afs_cb_break_for_vos_release, "break-vos-release") \
|
|
|
|
E_(afs_cb_break_volume_excluded, "vol-excluded")
|
2019-06-20 18:12:16 +01:00
|
|
|
|
2023-10-18 09:24:01 +01:00
|
|
|
#define afs_rotate_traces \
|
|
|
|
EM(afs_rotate_trace_aborted, "Abortd") \
|
|
|
|
EM(afs_rotate_trace_busy_sleep, "BsySlp") \
|
|
|
|
EM(afs_rotate_trace_check_vol_status, "VolStt") \
|
|
|
|
EM(afs_rotate_trace_failed, "Failed") \
|
|
|
|
EM(afs_rotate_trace_iter, "Iter ") \
|
|
|
|
EM(afs_rotate_trace_iterate_addr, "ItAddr") \
|
|
|
|
EM(afs_rotate_trace_next_server, "NextSv") \
|
|
|
|
EM(afs_rotate_trace_no_more_servers, "NoMore") \
|
|
|
|
EM(afs_rotate_trace_nomem, "Nomem ") \
|
|
|
|
EM(afs_rotate_trace_probe_error, "PrbErr") \
|
|
|
|
EM(afs_rotate_trace_probe_fileserver, "PrbFsv") \
|
|
|
|
EM(afs_rotate_trace_probe_none, "PrbNon") \
|
|
|
|
EM(afs_rotate_trace_probe_response, "PrbRsp") \
|
|
|
|
EM(afs_rotate_trace_probe_superseded, "PrbSup") \
|
|
|
|
EM(afs_rotate_trace_restart, "Rstart") \
|
|
|
|
EM(afs_rotate_trace_retry_server, "RtrySv") \
|
|
|
|
EM(afs_rotate_trace_selected_server, "SlctSv") \
|
|
|
|
EM(afs_rotate_trace_stale_lock, "StlLck") \
|
|
|
|
EM(afs_rotate_trace_start, "Start ") \
|
|
|
|
EM(afs_rotate_trace_stop, "Stop ") \
|
|
|
|
E_(afs_rotate_trace_stopped, "Stoppd")
|
|
|
|
|
2023-02-23 15:24:24 +00:00
|
|
|
/*
|
|
|
|
* Generate enums for tracing information.
|
|
|
|
*/
|
|
|
|
#ifndef __AFS_GENERATE_TRACE_ENUMS_ONCE_ONLY
|
|
|
|
#define __AFS_GENERATE_TRACE_ENUMS_ONCE_ONLY
|
|
|
|
|
|
|
|
#undef EM
|
|
|
|
#undef E_
|
|
|
|
#define EM(a, b) a,
|
|
|
|
#define E_(a, b) a
|
|
|
|
|
2023-10-19 13:59:03 +01:00
|
|
|
enum afs_alist_trace { afs_alist_traces } __mode(byte);
|
2023-02-23 15:24:24 +00:00
|
|
|
enum afs_call_trace { afs_call_traces } __mode(byte);
|
|
|
|
enum afs_cb_break_reason { afs_cb_break_reasons } __mode(byte);
|
2024-12-16 20:41:07 +00:00
|
|
|
enum afs_cb_promise_trace { afs_cb_promise_traces } __mode(byte);
|
2023-02-23 15:24:24 +00:00
|
|
|
enum afs_cell_trace { afs_cell_traces } __mode(byte);
|
2024-12-16 20:41:07 +00:00
|
|
|
enum afs_dir_invalid_trace { afs_dir_invalid_traces} __mode(byte);
|
2023-02-23 15:24:24 +00:00
|
|
|
enum afs_edit_dir_op { afs_edit_dir_ops } __mode(byte);
|
|
|
|
enum afs_edit_dir_reason { afs_edit_dir_reasons } __mode(byte);
|
|
|
|
enum afs_eproto_cause { afs_eproto_causes } __mode(byte);
|
2023-10-31 16:30:37 +00:00
|
|
|
enum afs_estate_trace { afs_estate_traces } __mode(byte);
|
2023-02-23 15:24:24 +00:00
|
|
|
enum afs_file_error { afs_file_errors } __mode(byte);
|
|
|
|
enum afs_flock_event { afs_flock_events } __mode(byte);
|
|
|
|
enum afs_flock_operation { afs_flock_operations } __mode(byte);
|
|
|
|
enum afs_io_error { afs_io_errors } __mode(byte);
|
2023-10-18 09:24:01 +01:00
|
|
|
enum afs_rotate_trace { afs_rotate_traces } __mode(byte);
|
2023-02-23 15:24:24 +00:00
|
|
|
enum afs_server_trace { afs_server_traces } __mode(byte);
|
2024-12-16 20:41:07 +00:00
|
|
|
enum afs_vnode_invalid_trace { afs_vnode_invalid_traces} __mode(byte);
|
2023-02-23 15:24:24 +00:00
|
|
|
enum afs_volume_trace { afs_volume_traces } __mode(byte);
|
|
|
|
|
|
|
|
#endif /* end __AFS_GENERATE_TRACE_ENUMS_ONCE_ONLY */
|
|
|
|
|
2017-01-05 10:38:36 +00:00
|
|
|
/*
|
|
|
|
* Export enum symbols via userspace.
|
|
|
|
*/
|
|
|
|
#undef EM
|
|
|
|
#undef E_
|
|
|
|
#define EM(a, b) TRACE_DEFINE_ENUM(a);
|
|
|
|
#define E_(a, b) TRACE_DEFINE_ENUM(a);
|
|
|
|
|
2023-10-19 13:59:03 +01:00
|
|
|
afs_alist_traces;
|
2017-01-05 10:38:36 +00:00
|
|
|
afs_call_traces;
|
2023-10-18 09:24:01 +01:00
|
|
|
afs_cb_break_reasons;
|
2024-12-16 20:41:07 +00:00
|
|
|
afs_cb_promise_traces;
|
2020-10-13 20:51:59 +01:00
|
|
|
afs_cell_traces;
|
2021-06-15 11:57:26 +01:00
|
|
|
afs_cm_operations;
|
2024-12-16 20:41:07 +00:00
|
|
|
afs_dir_invalid_traces;
|
2018-04-06 14:17:25 +01:00
|
|
|
afs_edit_dir_ops;
|
|
|
|
afs_edit_dir_reasons;
|
2018-10-20 00:57:57 +01:00
|
|
|
afs_eproto_causes;
|
2023-10-31 16:30:37 +00:00
|
|
|
afs_estate_traces;
|
2018-10-20 00:57:57 +01:00
|
|
|
afs_file_errors;
|
2019-04-25 14:26:50 +01:00
|
|
|
afs_flock_operations;
|
2023-10-18 09:24:01 +01:00
|
|
|
afs_flock_types;
|
|
|
|
afs_fs_operations;
|
|
|
|
afs_io_errors;
|
|
|
|
afs_rotate_traces;
|
|
|
|
afs_server_traces;
|
2024-12-16 20:41:07 +00:00
|
|
|
afs_vnode_invalid_traces;
|
2023-10-18 09:24:01 +01:00
|
|
|
afs_vl_operations;
|
|
|
|
yfs_cm_operations;
|
2017-01-05 10:38:36 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Now redefine the EM() and E_() macros to map the enums to the strings that
|
|
|
|
* will be printed in the output.
|
|
|
|
*/
|
|
|
|
#undef EM
|
|
|
|
#undef E_
|
|
|
|
#define EM(a, b) { a, b },
|
|
|
|
#define E_(a, b) { a, b }
|
|
|
|
|
2018-10-20 00:57:56 +01:00
|
|
|
TRACE_EVENT(afs_receive_data,
|
|
|
|
TP_PROTO(struct afs_call *call, struct iov_iter *iter,
|
2017-01-05 10:38:34 +00:00
|
|
|
bool want_more, int ret),
|
|
|
|
|
2018-10-20 00:57:56 +01:00
|
|
|
TP_ARGS(call, iter, want_more, ret),
|
2017-01-05 10:38:34 +00:00
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
2023-02-23 15:11:42 +00:00
|
|
|
__field(loff_t, remain)
|
|
|
|
__field(unsigned int, call)
|
|
|
|
__field(enum afs_call_state, state)
|
|
|
|
__field(unsigned short, unmarshall)
|
|
|
|
__field(bool, want_more)
|
|
|
|
__field(int, ret)
|
2017-01-05 10:38:34 +00:00
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
2018-03-27 23:03:00 +01:00
|
|
|
__entry->call = call->debug_id;
|
2017-01-05 10:38:34 +00:00
|
|
|
__entry->state = call->state;
|
|
|
|
__entry->unmarshall = call->unmarshall;
|
2018-10-20 00:57:56 +01:00
|
|
|
__entry->remain = iov_iter_count(iter);
|
2017-01-05 10:38:34 +00:00
|
|
|
__entry->want_more = want_more;
|
|
|
|
__entry->ret = ret;
|
|
|
|
),
|
|
|
|
|
2018-10-20 00:57:56 +01:00
|
|
|
TP_printk("c=%08x r=%llu u=%u w=%u s=%u ret=%d",
|
2017-01-05 10:38:34 +00:00
|
|
|
__entry->call,
|
2018-10-20 00:57:56 +01:00
|
|
|
__entry->remain,
|
|
|
|
__entry->unmarshall,
|
|
|
|
__entry->want_more,
|
|
|
|
__entry->state,
|
|
|
|
__entry->ret)
|
2017-01-05 10:38:34 +00:00
|
|
|
);
|
|
|
|
|
|
|
|
TRACE_EVENT(afs_notify_call,
|
|
|
|
TP_PROTO(struct rxrpc_call *rxcall, struct afs_call *call),
|
|
|
|
|
|
|
|
TP_ARGS(rxcall, call),
|
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
2023-02-23 15:11:42 +00:00
|
|
|
__field(unsigned int, call)
|
|
|
|
__field(enum afs_call_state, state)
|
|
|
|
__field(unsigned short, unmarshall)
|
2017-01-05 10:38:34 +00:00
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
2018-03-27 23:03:00 +01:00
|
|
|
__entry->call = call->debug_id;
|
2017-01-05 10:38:34 +00:00
|
|
|
__entry->state = call->state;
|
|
|
|
__entry->unmarshall = call->unmarshall;
|
|
|
|
),
|
|
|
|
|
2018-03-27 23:03:00 +01:00
|
|
|
TP_printk("c=%08x s=%u u=%u",
|
2017-01-05 10:38:34 +00:00
|
|
|
__entry->call,
|
|
|
|
__entry->state, __entry->unmarshall)
|
|
|
|
);
|
|
|
|
|
|
|
|
TRACE_EVENT(afs_cb_call,
|
|
|
|
TP_PROTO(struct afs_call *call),
|
|
|
|
|
|
|
|
TP_ARGS(call),
|
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
2023-02-23 15:11:42 +00:00
|
|
|
__field(unsigned int, call)
|
|
|
|
__field(u32, op)
|
|
|
|
__field(u16, service_id)
|
2025-04-11 10:52:56 +01:00
|
|
|
__field(u8, security_ix)
|
|
|
|
__field(u32, enctype)
|
2017-01-05 10:38:34 +00:00
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
2018-03-27 23:03:00 +01:00
|
|
|
__entry->call = call->debug_id;
|
2017-01-05 10:38:34 +00:00
|
|
|
__entry->op = call->operation_ID;
|
2021-06-15 11:57:26 +01:00
|
|
|
__entry->service_id = call->service_id;
|
2025-04-11 10:52:56 +01:00
|
|
|
__entry->security_ix = call->security_ix;
|
|
|
|
__entry->enctype = call->enctype;
|
2017-01-05 10:38:34 +00:00
|
|
|
),
|
|
|
|
|
2025-04-11 10:52:56 +01:00
|
|
|
TP_printk("c=%08x %s sv=%u sx=%u en=%u",
|
2017-01-05 10:38:34 +00:00
|
|
|
__entry->call,
|
2021-06-15 11:57:26 +01:00
|
|
|
__entry->service_id == 2501 ?
|
|
|
|
__print_symbolic(__entry->op, yfs_cm_operations) :
|
2025-04-11 10:52:56 +01:00
|
|
|
__print_symbolic(__entry->op, afs_cm_operations),
|
|
|
|
__entry->service_id,
|
|
|
|
__entry->security_ix,
|
|
|
|
__entry->enctype)
|
2017-01-05 10:38:34 +00:00
|
|
|
);
|
|
|
|
|
2017-01-05 10:38:36 +00:00
|
|
|
TRACE_EVENT(afs_call,
|
2022-07-06 11:26:14 +01:00
|
|
|
TP_PROTO(unsigned int call_debug_id, enum afs_call_trace op,
|
2022-07-06 10:52:14 +01:00
|
|
|
int ref, int outstanding, const void *where),
|
2017-01-05 10:38:36 +00:00
|
|
|
|
2022-07-06 11:26:14 +01:00
|
|
|
TP_ARGS(call_debug_id, op, ref, outstanding, where),
|
2017-01-05 10:38:36 +00:00
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
2023-02-23 15:11:42 +00:00
|
|
|
__field(unsigned int, call)
|
|
|
|
__field(int, op)
|
|
|
|
__field(int, ref)
|
|
|
|
__field(int, outstanding)
|
|
|
|
__field(const void *, where)
|
2017-01-05 10:38:36 +00:00
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
2022-07-06 11:26:14 +01:00
|
|
|
__entry->call = call_debug_id;
|
2017-01-05 10:38:36 +00:00
|
|
|
__entry->op = op;
|
2022-07-06 10:52:14 +01:00
|
|
|
__entry->ref = ref;
|
2017-01-05 10:38:36 +00:00
|
|
|
__entry->outstanding = outstanding;
|
|
|
|
__entry->where = where;
|
|
|
|
),
|
|
|
|
|
2022-07-06 10:52:14 +01:00
|
|
|
TP_printk("c=%08x %s r=%d o=%d sp=%pSR",
|
2017-01-05 10:38:36 +00:00
|
|
|
__entry->call,
|
|
|
|
__print_symbolic(__entry->op, afs_call_traces),
|
2022-07-06 10:52:14 +01:00
|
|
|
__entry->ref,
|
2017-01-05 10:38:36 +00:00
|
|
|
__entry->outstanding,
|
|
|
|
__entry->where)
|
|
|
|
);
|
|
|
|
|
2017-11-02 15:27:51 +00:00
|
|
|
TRACE_EVENT(afs_make_fs_call,
|
|
|
|
TP_PROTO(struct afs_call *call, const struct afs_fid *fid),
|
|
|
|
|
|
|
|
TP_ARGS(call, fid),
|
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
2023-02-23 15:11:42 +00:00
|
|
|
__field(unsigned int, call)
|
|
|
|
__field(enum afs_fs_operation, op)
|
|
|
|
__field_struct(struct afs_fid, fid)
|
2017-11-02 15:27:51 +00:00
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
2018-03-27 23:03:00 +01:00
|
|
|
__entry->call = call->debug_id;
|
2017-11-02 15:27:51 +00:00
|
|
|
__entry->op = call->operation_ID;
|
|
|
|
if (fid) {
|
|
|
|
__entry->fid = *fid;
|
|
|
|
} else {
|
|
|
|
__entry->fid.vid = 0;
|
|
|
|
__entry->fid.vnode = 0;
|
|
|
|
__entry->fid.unique = 0;
|
|
|
|
}
|
|
|
|
),
|
|
|
|
|
2024-12-16 20:41:07 +00:00
|
|
|
TP_printk("c=%08x V=%llx i=%llx:%x %s",
|
2017-11-02 15:27:51 +00:00
|
|
|
__entry->call,
|
|
|
|
__entry->fid.vid,
|
|
|
|
__entry->fid.vnode,
|
|
|
|
__entry->fid.unique,
|
|
|
|
__print_symbolic(__entry->op, afs_fs_operations))
|
|
|
|
);
|
|
|
|
|
2019-04-25 14:26:52 +01:00
|
|
|
TRACE_EVENT(afs_make_fs_calli,
|
|
|
|
TP_PROTO(struct afs_call *call, const struct afs_fid *fid,
|
|
|
|
unsigned int i),
|
|
|
|
|
|
|
|
TP_ARGS(call, fid, i),
|
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
2023-02-23 15:11:42 +00:00
|
|
|
__field(unsigned int, call)
|
|
|
|
__field(unsigned int, i)
|
|
|
|
__field(enum afs_fs_operation, op)
|
|
|
|
__field_struct(struct afs_fid, fid)
|
2019-04-25 14:26:52 +01:00
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
|
|
|
__entry->call = call->debug_id;
|
|
|
|
__entry->i = i;
|
|
|
|
__entry->op = call->operation_ID;
|
|
|
|
if (fid) {
|
|
|
|
__entry->fid = *fid;
|
|
|
|
} else {
|
|
|
|
__entry->fid.vid = 0;
|
|
|
|
__entry->fid.vnode = 0;
|
|
|
|
__entry->fid.unique = 0;
|
|
|
|
}
|
|
|
|
),
|
|
|
|
|
2024-12-16 20:41:07 +00:00
|
|
|
TP_printk("c=%08x V=%llx i=%llx:%x %s i=%u",
|
2019-04-25 14:26:52 +01:00
|
|
|
__entry->call,
|
|
|
|
__entry->fid.vid,
|
|
|
|
__entry->fid.vnode,
|
|
|
|
__entry->fid.unique,
|
|
|
|
__print_symbolic(__entry->op, afs_fs_operations),
|
|
|
|
__entry->i)
|
|
|
|
);
|
|
|
|
|
2019-04-25 14:26:51 +01:00
|
|
|
TRACE_EVENT(afs_make_fs_call1,
|
|
|
|
TP_PROTO(struct afs_call *call, const struct afs_fid *fid,
|
afs: Build an abstraction around an "operation" concept
Turn the afs_operation struct into the main way that most fileserver
operations are managed. Various things are added to the struct, including
the following:
(1) All the parameters and results of the relevant operations are moved
into it, removing corresponding fields from the afs_call struct.
afs_call gets a pointer to the op.
(2) The target volume is made the main focus of the operation, rather than
the target vnode(s), and a bunch of op->vnode->volume are made
op->volume instead.
(3) Two vnode records are defined (op->file[]) for the vnode(s) involved
in most operations. The vnode record (struct afs_vnode_param)
contains:
- The vnode pointer.
- The fid of the vnode to be included in the parameters or that was
returned in the reply (eg. FS.MakeDir).
- The status and callback information that may be returned in the
reply about the vnode.
- Callback break and data version tracking for detecting
simultaneous third-parth changes.
(4) Pointers to dentries to be updated with new inodes.
(5) An operations table pointer. The table includes pointers to functions
for issuing AFS and YFS-variant RPCs, handling the success and abort
of an operation and handling post-I/O-lock local editing of a
directory.
To make this work, the following function restructuring is made:
(A) The rotation loop that issues calls to fileservers that can be found
in each function that wants to issue an RPC (such as afs_mkdir()) is
extracted out into common code, in a new file called fs_operation.c.
(B) The rotation loops, such as the one in afs_mkdir(), are replaced with
a much smaller piece of code that allocates an operation, sets the
parameters and then calls out to the common code to do the actual
work.
(C) The code for handling the success and failure of an operation are
moved into operation functions (as (5) above) and these are called
from the core code at appropriate times.
(D) The pseudo inode getting stuff used by the dynamic root code is moved
over into dynroot.c.
(E) struct afs_iget_data is absorbed into the operation struct and
afs_iget() expects to be given an op pointer and a vnode record.
(F) Point (E) doesn't work for the root dir of a volume, but we know the
FID in advance (it's always vnode 1, unique 1), so a separate inode
getter, afs_root_iget(), is provided to special-case that.
(G) The inode status init/update functions now also take an op and a vnode
record.
(H) The RPC marshalling functions now, for the most part, just take an
afs_operation struct as their only argument. All the data they need
is held there. The result delivery functions write their answers
there as well.
(I) The call is attached to the operation and then the operation core does
the waiting.
And then the new operation code is, for the moment, made to just initialise
the operation, get the appropriate vnode I/O locks and do the same rotation
loop as before.
This lays the foundation for the following changes in the future:
(*) Overhauling the rotation (again).
(*) Support for asynchronous I/O, where the fileserver rotation must be
done asynchronously also.
Signed-off-by: David Howells <dhowells@redhat.com>
2020-04-10 20:51:51 +01:00
|
|
|
const struct qstr *name),
|
2019-04-25 14:26:51 +01:00
|
|
|
|
|
|
|
TP_ARGS(call, fid, name),
|
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
2023-02-23 15:11:42 +00:00
|
|
|
__field(unsigned int, call)
|
|
|
|
__field(enum afs_fs_operation, op)
|
|
|
|
__field_struct(struct afs_fid, fid)
|
|
|
|
__array(char, name, 24)
|
2019-04-25 14:26:51 +01:00
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
afs: Build an abstraction around an "operation" concept
Turn the afs_operation struct into the main way that most fileserver
operations are managed. Various things are added to the struct, including
the following:
(1) All the parameters and results of the relevant operations are moved
into it, removing corresponding fields from the afs_call struct.
afs_call gets a pointer to the op.
(2) The target volume is made the main focus of the operation, rather than
the target vnode(s), and a bunch of op->vnode->volume are made
op->volume instead.
(3) Two vnode records are defined (op->file[]) for the vnode(s) involved
in most operations. The vnode record (struct afs_vnode_param)
contains:
- The vnode pointer.
- The fid of the vnode to be included in the parameters or that was
returned in the reply (eg. FS.MakeDir).
- The status and callback information that may be returned in the
reply about the vnode.
- Callback break and data version tracking for detecting
simultaneous third-parth changes.
(4) Pointers to dentries to be updated with new inodes.
(5) An operations table pointer. The table includes pointers to functions
for issuing AFS and YFS-variant RPCs, handling the success and abort
of an operation and handling post-I/O-lock local editing of a
directory.
To make this work, the following function restructuring is made:
(A) The rotation loop that issues calls to fileservers that can be found
in each function that wants to issue an RPC (such as afs_mkdir()) is
extracted out into common code, in a new file called fs_operation.c.
(B) The rotation loops, such as the one in afs_mkdir(), are replaced with
a much smaller piece of code that allocates an operation, sets the
parameters and then calls out to the common code to do the actual
work.
(C) The code for handling the success and failure of an operation are
moved into operation functions (as (5) above) and these are called
from the core code at appropriate times.
(D) The pseudo inode getting stuff used by the dynamic root code is moved
over into dynroot.c.
(E) struct afs_iget_data is absorbed into the operation struct and
afs_iget() expects to be given an op pointer and a vnode record.
(F) Point (E) doesn't work for the root dir of a volume, but we know the
FID in advance (it's always vnode 1, unique 1), so a separate inode
getter, afs_root_iget(), is provided to special-case that.
(G) The inode status init/update functions now also take an op and a vnode
record.
(H) The RPC marshalling functions now, for the most part, just take an
afs_operation struct as their only argument. All the data they need
is held there. The result delivery functions write their answers
there as well.
(I) The call is attached to the operation and then the operation core does
the waiting.
And then the new operation code is, for the moment, made to just initialise
the operation, get the appropriate vnode I/O locks and do the same rotation
loop as before.
This lays the foundation for the following changes in the future:
(*) Overhauling the rotation (again).
(*) Support for asynchronous I/O, where the fileserver rotation must be
done asynchronously also.
Signed-off-by: David Howells <dhowells@redhat.com>
2020-04-10 20:51:51 +01:00
|
|
|
unsigned int __len = min_t(unsigned int, name->len, 23);
|
2019-04-25 14:26:51 +01:00
|
|
|
__entry->call = call->debug_id;
|
|
|
|
__entry->op = call->operation_ID;
|
|
|
|
if (fid) {
|
|
|
|
__entry->fid = *fid;
|
|
|
|
} else {
|
|
|
|
__entry->fid.vid = 0;
|
|
|
|
__entry->fid.vnode = 0;
|
|
|
|
__entry->fid.unique = 0;
|
|
|
|
}
|
afs: Build an abstraction around an "operation" concept
Turn the afs_operation struct into the main way that most fileserver
operations are managed. Various things are added to the struct, including
the following:
(1) All the parameters and results of the relevant operations are moved
into it, removing corresponding fields from the afs_call struct.
afs_call gets a pointer to the op.
(2) The target volume is made the main focus of the operation, rather than
the target vnode(s), and a bunch of op->vnode->volume are made
op->volume instead.
(3) Two vnode records are defined (op->file[]) for the vnode(s) involved
in most operations. The vnode record (struct afs_vnode_param)
contains:
- The vnode pointer.
- The fid of the vnode to be included in the parameters or that was
returned in the reply (eg. FS.MakeDir).
- The status and callback information that may be returned in the
reply about the vnode.
- Callback break and data version tracking for detecting
simultaneous third-parth changes.
(4) Pointers to dentries to be updated with new inodes.
(5) An operations table pointer. The table includes pointers to functions
for issuing AFS and YFS-variant RPCs, handling the success and abort
of an operation and handling post-I/O-lock local editing of a
directory.
To make this work, the following function restructuring is made:
(A) The rotation loop that issues calls to fileservers that can be found
in each function that wants to issue an RPC (such as afs_mkdir()) is
extracted out into common code, in a new file called fs_operation.c.
(B) The rotation loops, such as the one in afs_mkdir(), are replaced with
a much smaller piece of code that allocates an operation, sets the
parameters and then calls out to the common code to do the actual
work.
(C) The code for handling the success and failure of an operation are
moved into operation functions (as (5) above) and these are called
from the core code at appropriate times.
(D) The pseudo inode getting stuff used by the dynamic root code is moved
over into dynroot.c.
(E) struct afs_iget_data is absorbed into the operation struct and
afs_iget() expects to be given an op pointer and a vnode record.
(F) Point (E) doesn't work for the root dir of a volume, but we know the
FID in advance (it's always vnode 1, unique 1), so a separate inode
getter, afs_root_iget(), is provided to special-case that.
(G) The inode status init/update functions now also take an op and a vnode
record.
(H) The RPC marshalling functions now, for the most part, just take an
afs_operation struct as their only argument. All the data they need
is held there. The result delivery functions write their answers
there as well.
(I) The call is attached to the operation and then the operation core does
the waiting.
And then the new operation code is, for the moment, made to just initialise
the operation, get the appropriate vnode I/O locks and do the same rotation
loop as before.
This lays the foundation for the following changes in the future:
(*) Overhauling the rotation (again).
(*) Support for asynchronous I/O, where the fileserver rotation must be
done asynchronously also.
Signed-off-by: David Howells <dhowells@redhat.com>
2020-04-10 20:51:51 +01:00
|
|
|
memcpy(__entry->name, name->name, __len);
|
2019-04-25 14:26:51 +01:00
|
|
|
__entry->name[__len] = 0;
|
|
|
|
),
|
|
|
|
|
2024-12-16 20:41:07 +00:00
|
|
|
TP_printk("c=%08x V=%llx i=%llx:%x %s \"%s\"",
|
2019-04-25 14:26:51 +01:00
|
|
|
__entry->call,
|
|
|
|
__entry->fid.vid,
|
|
|
|
__entry->fid.vnode,
|
|
|
|
__entry->fid.unique,
|
|
|
|
__print_symbolic(__entry->op, afs_fs_operations),
|
|
|
|
__entry->name)
|
|
|
|
);
|
|
|
|
|
|
|
|
TRACE_EVENT(afs_make_fs_call2,
|
|
|
|
TP_PROTO(struct afs_call *call, const struct afs_fid *fid,
|
afs: Build an abstraction around an "operation" concept
Turn the afs_operation struct into the main way that most fileserver
operations are managed. Various things are added to the struct, including
the following:
(1) All the parameters and results of the relevant operations are moved
into it, removing corresponding fields from the afs_call struct.
afs_call gets a pointer to the op.
(2) The target volume is made the main focus of the operation, rather than
the target vnode(s), and a bunch of op->vnode->volume are made
op->volume instead.
(3) Two vnode records are defined (op->file[]) for the vnode(s) involved
in most operations. The vnode record (struct afs_vnode_param)
contains:
- The vnode pointer.
- The fid of the vnode to be included in the parameters or that was
returned in the reply (eg. FS.MakeDir).
- The status and callback information that may be returned in the
reply about the vnode.
- Callback break and data version tracking for detecting
simultaneous third-parth changes.
(4) Pointers to dentries to be updated with new inodes.
(5) An operations table pointer. The table includes pointers to functions
for issuing AFS and YFS-variant RPCs, handling the success and abort
of an operation and handling post-I/O-lock local editing of a
directory.
To make this work, the following function restructuring is made:
(A) The rotation loop that issues calls to fileservers that can be found
in each function that wants to issue an RPC (such as afs_mkdir()) is
extracted out into common code, in a new file called fs_operation.c.
(B) The rotation loops, such as the one in afs_mkdir(), are replaced with
a much smaller piece of code that allocates an operation, sets the
parameters and then calls out to the common code to do the actual
work.
(C) The code for handling the success and failure of an operation are
moved into operation functions (as (5) above) and these are called
from the core code at appropriate times.
(D) The pseudo inode getting stuff used by the dynamic root code is moved
over into dynroot.c.
(E) struct afs_iget_data is absorbed into the operation struct and
afs_iget() expects to be given an op pointer and a vnode record.
(F) Point (E) doesn't work for the root dir of a volume, but we know the
FID in advance (it's always vnode 1, unique 1), so a separate inode
getter, afs_root_iget(), is provided to special-case that.
(G) The inode status init/update functions now also take an op and a vnode
record.
(H) The RPC marshalling functions now, for the most part, just take an
afs_operation struct as their only argument. All the data they need
is held there. The result delivery functions write their answers
there as well.
(I) The call is attached to the operation and then the operation core does
the waiting.
And then the new operation code is, for the moment, made to just initialise
the operation, get the appropriate vnode I/O locks and do the same rotation
loop as before.
This lays the foundation for the following changes in the future:
(*) Overhauling the rotation (again).
(*) Support for asynchronous I/O, where the fileserver rotation must be
done asynchronously also.
Signed-off-by: David Howells <dhowells@redhat.com>
2020-04-10 20:51:51 +01:00
|
|
|
const struct qstr *name, const struct qstr *name2),
|
2019-04-25 14:26:51 +01:00
|
|
|
|
|
|
|
TP_ARGS(call, fid, name, name2),
|
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
2023-02-23 15:11:42 +00:00
|
|
|
__field(unsigned int, call)
|
|
|
|
__field(enum afs_fs_operation, op)
|
|
|
|
__field_struct(struct afs_fid, fid)
|
|
|
|
__array(char, name, 24)
|
|
|
|
__array(char, name2, 24)
|
2019-04-25 14:26:51 +01:00
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
afs: Build an abstraction around an "operation" concept
Turn the afs_operation struct into the main way that most fileserver
operations are managed. Various things are added to the struct, including
the following:
(1) All the parameters and results of the relevant operations are moved
into it, removing corresponding fields from the afs_call struct.
afs_call gets a pointer to the op.
(2) The target volume is made the main focus of the operation, rather than
the target vnode(s), and a bunch of op->vnode->volume are made
op->volume instead.
(3) Two vnode records are defined (op->file[]) for the vnode(s) involved
in most operations. The vnode record (struct afs_vnode_param)
contains:
- The vnode pointer.
- The fid of the vnode to be included in the parameters or that was
returned in the reply (eg. FS.MakeDir).
- The status and callback information that may be returned in the
reply about the vnode.
- Callback break and data version tracking for detecting
simultaneous third-parth changes.
(4) Pointers to dentries to be updated with new inodes.
(5) An operations table pointer. The table includes pointers to functions
for issuing AFS and YFS-variant RPCs, handling the success and abort
of an operation and handling post-I/O-lock local editing of a
directory.
To make this work, the following function restructuring is made:
(A) The rotation loop that issues calls to fileservers that can be found
in each function that wants to issue an RPC (such as afs_mkdir()) is
extracted out into common code, in a new file called fs_operation.c.
(B) The rotation loops, such as the one in afs_mkdir(), are replaced with
a much smaller piece of code that allocates an operation, sets the
parameters and then calls out to the common code to do the actual
work.
(C) The code for handling the success and failure of an operation are
moved into operation functions (as (5) above) and these are called
from the core code at appropriate times.
(D) The pseudo inode getting stuff used by the dynamic root code is moved
over into dynroot.c.
(E) struct afs_iget_data is absorbed into the operation struct and
afs_iget() expects to be given an op pointer and a vnode record.
(F) Point (E) doesn't work for the root dir of a volume, but we know the
FID in advance (it's always vnode 1, unique 1), so a separate inode
getter, afs_root_iget(), is provided to special-case that.
(G) The inode status init/update functions now also take an op and a vnode
record.
(H) The RPC marshalling functions now, for the most part, just take an
afs_operation struct as their only argument. All the data they need
is held there. The result delivery functions write their answers
there as well.
(I) The call is attached to the operation and then the operation core does
the waiting.
And then the new operation code is, for the moment, made to just initialise
the operation, get the appropriate vnode I/O locks and do the same rotation
loop as before.
This lays the foundation for the following changes in the future:
(*) Overhauling the rotation (again).
(*) Support for asynchronous I/O, where the fileserver rotation must be
done asynchronously also.
Signed-off-by: David Howells <dhowells@redhat.com>
2020-04-10 20:51:51 +01:00
|
|
|
unsigned int __len = min_t(unsigned int, name->len, 23);
|
|
|
|
unsigned int __len2 = min_t(unsigned int, name2->len, 23);
|
2019-04-25 14:26:51 +01:00
|
|
|
__entry->call = call->debug_id;
|
|
|
|
__entry->op = call->operation_ID;
|
|
|
|
if (fid) {
|
|
|
|
__entry->fid = *fid;
|
|
|
|
} else {
|
|
|
|
__entry->fid.vid = 0;
|
|
|
|
__entry->fid.vnode = 0;
|
|
|
|
__entry->fid.unique = 0;
|
|
|
|
}
|
afs: Build an abstraction around an "operation" concept
Turn the afs_operation struct into the main way that most fileserver
operations are managed. Various things are added to the struct, including
the following:
(1) All the parameters and results of the relevant operations are moved
into it, removing corresponding fields from the afs_call struct.
afs_call gets a pointer to the op.
(2) The target volume is made the main focus of the operation, rather than
the target vnode(s), and a bunch of op->vnode->volume are made
op->volume instead.
(3) Two vnode records are defined (op->file[]) for the vnode(s) involved
in most operations. The vnode record (struct afs_vnode_param)
contains:
- The vnode pointer.
- The fid of the vnode to be included in the parameters or that was
returned in the reply (eg. FS.MakeDir).
- The status and callback information that may be returned in the
reply about the vnode.
- Callback break and data version tracking for detecting
simultaneous third-parth changes.
(4) Pointers to dentries to be updated with new inodes.
(5) An operations table pointer. The table includes pointers to functions
for issuing AFS and YFS-variant RPCs, handling the success and abort
of an operation and handling post-I/O-lock local editing of a
directory.
To make this work, the following function restructuring is made:
(A) The rotation loop that issues calls to fileservers that can be found
in each function that wants to issue an RPC (such as afs_mkdir()) is
extracted out into common code, in a new file called fs_operation.c.
(B) The rotation loops, such as the one in afs_mkdir(), are replaced with
a much smaller piece of code that allocates an operation, sets the
parameters and then calls out to the common code to do the actual
work.
(C) The code for handling the success and failure of an operation are
moved into operation functions (as (5) above) and these are called
from the core code at appropriate times.
(D) The pseudo inode getting stuff used by the dynamic root code is moved
over into dynroot.c.
(E) struct afs_iget_data is absorbed into the operation struct and
afs_iget() expects to be given an op pointer and a vnode record.
(F) Point (E) doesn't work for the root dir of a volume, but we know the
FID in advance (it's always vnode 1, unique 1), so a separate inode
getter, afs_root_iget(), is provided to special-case that.
(G) The inode status init/update functions now also take an op and a vnode
record.
(H) The RPC marshalling functions now, for the most part, just take an
afs_operation struct as their only argument. All the data they need
is held there. The result delivery functions write their answers
there as well.
(I) The call is attached to the operation and then the operation core does
the waiting.
And then the new operation code is, for the moment, made to just initialise
the operation, get the appropriate vnode I/O locks and do the same rotation
loop as before.
This lays the foundation for the following changes in the future:
(*) Overhauling the rotation (again).
(*) Support for asynchronous I/O, where the fileserver rotation must be
done asynchronously also.
Signed-off-by: David Howells <dhowells@redhat.com>
2020-04-10 20:51:51 +01:00
|
|
|
memcpy(__entry->name, name->name, __len);
|
2019-04-25 14:26:51 +01:00
|
|
|
__entry->name[__len] = 0;
|
afs: Build an abstraction around an "operation" concept
Turn the afs_operation struct into the main way that most fileserver
operations are managed. Various things are added to the struct, including
the following:
(1) All the parameters and results of the relevant operations are moved
into it, removing corresponding fields from the afs_call struct.
afs_call gets a pointer to the op.
(2) The target volume is made the main focus of the operation, rather than
the target vnode(s), and a bunch of op->vnode->volume are made
op->volume instead.
(3) Two vnode records are defined (op->file[]) for the vnode(s) involved
in most operations. The vnode record (struct afs_vnode_param)
contains:
- The vnode pointer.
- The fid of the vnode to be included in the parameters or that was
returned in the reply (eg. FS.MakeDir).
- The status and callback information that may be returned in the
reply about the vnode.
- Callback break and data version tracking for detecting
simultaneous third-parth changes.
(4) Pointers to dentries to be updated with new inodes.
(5) An operations table pointer. The table includes pointers to functions
for issuing AFS and YFS-variant RPCs, handling the success and abort
of an operation and handling post-I/O-lock local editing of a
directory.
To make this work, the following function restructuring is made:
(A) The rotation loop that issues calls to fileservers that can be found
in each function that wants to issue an RPC (such as afs_mkdir()) is
extracted out into common code, in a new file called fs_operation.c.
(B) The rotation loops, such as the one in afs_mkdir(), are replaced with
a much smaller piece of code that allocates an operation, sets the
parameters and then calls out to the common code to do the actual
work.
(C) The code for handling the success and failure of an operation are
moved into operation functions (as (5) above) and these are called
from the core code at appropriate times.
(D) The pseudo inode getting stuff used by the dynamic root code is moved
over into dynroot.c.
(E) struct afs_iget_data is absorbed into the operation struct and
afs_iget() expects to be given an op pointer and a vnode record.
(F) Point (E) doesn't work for the root dir of a volume, but we know the
FID in advance (it's always vnode 1, unique 1), so a separate inode
getter, afs_root_iget(), is provided to special-case that.
(G) The inode status init/update functions now also take an op and a vnode
record.
(H) The RPC marshalling functions now, for the most part, just take an
afs_operation struct as their only argument. All the data they need
is held there. The result delivery functions write their answers
there as well.
(I) The call is attached to the operation and then the operation core does
the waiting.
And then the new operation code is, for the moment, made to just initialise
the operation, get the appropriate vnode I/O locks and do the same rotation
loop as before.
This lays the foundation for the following changes in the future:
(*) Overhauling the rotation (again).
(*) Support for asynchronous I/O, where the fileserver rotation must be
done asynchronously also.
Signed-off-by: David Howells <dhowells@redhat.com>
2020-04-10 20:51:51 +01:00
|
|
|
memcpy(__entry->name2, name2->name, __len2);
|
2019-04-25 14:26:51 +01:00
|
|
|
__entry->name2[__len2] = 0;
|
|
|
|
),
|
|
|
|
|
2024-12-16 20:41:07 +00:00
|
|
|
TP_printk("c=%08x V=%llx i=%llx:%x %s \"%s\" \"%s\"",
|
2019-04-25 14:26:51 +01:00
|
|
|
__entry->call,
|
|
|
|
__entry->fid.vid,
|
|
|
|
__entry->fid.vnode,
|
|
|
|
__entry->fid.unique,
|
|
|
|
__print_symbolic(__entry->op, afs_fs_operations),
|
|
|
|
__entry->name,
|
|
|
|
__entry->name2)
|
|
|
|
);
|
|
|
|
|
2017-11-02 15:27:51 +00:00
|
|
|
TRACE_EVENT(afs_make_vl_call,
|
|
|
|
TP_PROTO(struct afs_call *call),
|
|
|
|
|
|
|
|
TP_ARGS(call),
|
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
2023-02-23 15:11:42 +00:00
|
|
|
__field(unsigned int, call)
|
|
|
|
__field(enum afs_vl_operation, op)
|
2017-11-02 15:27:51 +00:00
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
2018-03-27 23:03:00 +01:00
|
|
|
__entry->call = call->debug_id;
|
2017-11-02 15:27:51 +00:00
|
|
|
__entry->op = call->operation_ID;
|
|
|
|
),
|
|
|
|
|
2018-03-27 23:03:00 +01:00
|
|
|
TP_printk("c=%08x %s",
|
2017-11-02 15:27:51 +00:00
|
|
|
__entry->call,
|
|
|
|
__print_symbolic(__entry->op, afs_vl_operations))
|
|
|
|
);
|
|
|
|
|
|
|
|
TRACE_EVENT(afs_call_done,
|
|
|
|
TP_PROTO(struct afs_call *call),
|
|
|
|
|
|
|
|
TP_ARGS(call),
|
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
2023-02-23 15:11:42 +00:00
|
|
|
__field(unsigned int, call)
|
|
|
|
__field(struct rxrpc_call *, rx_call)
|
|
|
|
__field(int, ret)
|
|
|
|
__field(u32, abort_code)
|
2017-11-02 15:27:51 +00:00
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
2018-03-27 23:03:00 +01:00
|
|
|
__entry->call = call->debug_id;
|
2017-11-02 15:27:51 +00:00
|
|
|
__entry->rx_call = call->rxcall;
|
|
|
|
__entry->ret = call->error;
|
|
|
|
__entry->abort_code = call->abort_code;
|
|
|
|
),
|
|
|
|
|
2018-03-27 23:03:00 +01:00
|
|
|
TP_printk(" c=%08x ret=%d ab=%d [%p]",
|
2017-11-02 15:27:51 +00:00
|
|
|
__entry->call,
|
|
|
|
__entry->ret,
|
|
|
|
__entry->abort_code,
|
|
|
|
__entry->rx_call)
|
|
|
|
);
|
|
|
|
|
2020-02-06 14:22:28 +00:00
|
|
|
TRACE_EVENT(afs_send_data,
|
|
|
|
TP_PROTO(struct afs_call *call, struct msghdr *msg),
|
2017-11-02 15:27:51 +00:00
|
|
|
|
2020-02-06 14:22:28 +00:00
|
|
|
TP_ARGS(call, msg),
|
2017-11-02 15:27:51 +00:00
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
2023-02-23 15:11:42 +00:00
|
|
|
__field(unsigned int, call)
|
|
|
|
__field(unsigned int, flags)
|
|
|
|
__field(loff_t, offset)
|
|
|
|
__field(loff_t, count)
|
2017-11-02 15:27:51 +00:00
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
2018-03-27 23:03:00 +01:00
|
|
|
__entry->call = call->debug_id;
|
2017-11-02 15:27:51 +00:00
|
|
|
__entry->flags = msg->msg_flags;
|
2020-02-06 14:22:28 +00:00
|
|
|
__entry->offset = msg->msg_iter.xarray_start + msg->msg_iter.iov_offset;
|
|
|
|
__entry->count = iov_iter_count(&msg->msg_iter);
|
2017-11-02 15:27:51 +00:00
|
|
|
),
|
|
|
|
|
2020-02-06 14:22:28 +00:00
|
|
|
TP_printk(" c=%08x o=%llx n=%llx f=%x",
|
|
|
|
__entry->call, __entry->offset, __entry->count,
|
2017-11-02 15:27:51 +00:00
|
|
|
__entry->flags)
|
|
|
|
);
|
|
|
|
|
2020-02-06 14:22:28 +00:00
|
|
|
TRACE_EVENT(afs_sent_data,
|
|
|
|
TP_PROTO(struct afs_call *call, struct msghdr *msg, int ret),
|
2017-11-02 15:27:51 +00:00
|
|
|
|
2020-02-06 14:22:28 +00:00
|
|
|
TP_ARGS(call, msg, ret),
|
2017-11-02 15:27:51 +00:00
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
2023-02-23 15:11:42 +00:00
|
|
|
__field(unsigned int, call)
|
|
|
|
__field(int, ret)
|
|
|
|
__field(loff_t, offset)
|
|
|
|
__field(loff_t, count)
|
2017-11-02 15:27:51 +00:00
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
2018-03-27 23:03:00 +01:00
|
|
|
__entry->call = call->debug_id;
|
2017-11-02 15:27:51 +00:00
|
|
|
__entry->ret = ret;
|
2020-02-06 14:22:28 +00:00
|
|
|
__entry->offset = msg->msg_iter.xarray_start + msg->msg_iter.iov_offset;
|
|
|
|
__entry->count = iov_iter_count(&msg->msg_iter);
|
2017-11-02 15:27:51 +00:00
|
|
|
),
|
|
|
|
|
2020-02-06 14:22:28 +00:00
|
|
|
TP_printk(" c=%08x o=%llx n=%llx r=%x",
|
|
|
|
__entry->call, __entry->offset, __entry->count,
|
|
|
|
__entry->ret)
|
2017-11-02 15:27:51 +00:00
|
|
|
);
|
|
|
|
|
2017-11-02 15:27:52 +00:00
|
|
|
TRACE_EVENT(afs_dir_check_failed,
|
afs: Use netfslib for directories
In the AFS ecosystem, directories are just a special type of file that is
downloaded and parsed locally. Download is done by the same mechanism as
ordinary files and the data can be cached. There is one important semantic
restriction on directories over files: the client must download the entire
directory in one go because, for example, the server could fabricate the
contents of the blob on the fly with each download and give a different
image each time.
So that we can cache the directory download, switch AFS directory support
over to using the netfslib single-object API, thereby allowing directory
content to be stored in the local cache.
To make this work, the following changes are made:
(1) A directory's contents are now stored in a folio_queue chain attached
to the afs_vnode (inode) struct rather than its associated pagecache,
though multipage folios are still used to hold the data. The folio
queue is discarded when the directory inode is evicted.
This also helps with the phasing out of ITER_XARRAY.
(2) Various directory operations are made to use and unuse the cache
cookie.
(3) The content checking, content dumping and content iteration are now
performed with a standard iov_iter iterator over the contents of the
folio queue.
(4) Iteration and modification must be done with the vnode's validate_lock
held. In conjunction with (1), this means that the iteration can be
done without the need to lock pages or take extra refs on them, unlike
when accessing ->i_pages.
(5) Convert to using netfs_read_single() to read data.
(6) Provide a ->writepages() to call netfs_writeback_single() to save the
data to the cache according to the VM's scheduling whilst holding the
validate_lock read-locked as (4).
(7) Change local directory image editing functions:
(a) Provide a function to get a specific block by number from the
folio_queue as we can no longer use the i_pages xarray to locate
folios by index. This uses a cursor to remember the current
position as we need to iterate through the directory contents.
The block is kmapped before being returned.
(b) Make the function in (a) extend the directory by an extra folio if
we run out of space.
(c) Raise the check of the block free space counter, for those blocks
that have one, higher in the function to eliminate a call to get a
block.
(d) Remove the page unlocking and putting done during the editing
loops. This is no longer necessary as the folio_queue holds the
references and the pages are no longer in the pagecache.
(e) Mark the inode dirty and pin the cache usage till writeback at the
end of a successful edit.
(8) Don't set the large_folios flag on the inode as we do the allocation
ourselves rather than the VM doing it automatically.
(9) Mark the inode as being a single object that isn't uploaded to the
server.
(10) Enable caching on directories.
(11) Only set the upload key for writeback for regular files.
Notes:
(*) We keep the ->release_folio(), ->invalidate_folio() and
->migrate_folio() ops as we set the mapping pointer on the folio.
Signed-off-by: David Howells <dhowells@redhat.com>
Link: https://lore.kernel.org/r/20241216204124.3752367-22-dhowells@redhat.com
cc: Marc Dionne <marc.dionne@auristor.com>
cc: Jeff Layton <jlayton@kernel.org>
cc: linux-afs@lists.infradead.org
cc: netfs@lists.linux.dev
cc: linux-fsdevel@vger.kernel.org
Signed-off-by: Christian Brauner <brauner@kernel.org>
2024-12-16 20:41:11 +00:00
|
|
|
TP_PROTO(struct afs_vnode *vnode, loff_t off),
|
2017-11-02 15:27:52 +00:00
|
|
|
|
afs: Use netfslib for directories
In the AFS ecosystem, directories are just a special type of file that is
downloaded and parsed locally. Download is done by the same mechanism as
ordinary files and the data can be cached. There is one important semantic
restriction on directories over files: the client must download the entire
directory in one go because, for example, the server could fabricate the
contents of the blob on the fly with each download and give a different
image each time.
So that we can cache the directory download, switch AFS directory support
over to using the netfslib single-object API, thereby allowing directory
content to be stored in the local cache.
To make this work, the following changes are made:
(1) A directory's contents are now stored in a folio_queue chain attached
to the afs_vnode (inode) struct rather than its associated pagecache,
though multipage folios are still used to hold the data. The folio
queue is discarded when the directory inode is evicted.
This also helps with the phasing out of ITER_XARRAY.
(2) Various directory operations are made to use and unuse the cache
cookie.
(3) The content checking, content dumping and content iteration are now
performed with a standard iov_iter iterator over the contents of the
folio queue.
(4) Iteration and modification must be done with the vnode's validate_lock
held. In conjunction with (1), this means that the iteration can be
done without the need to lock pages or take extra refs on them, unlike
when accessing ->i_pages.
(5) Convert to using netfs_read_single() to read data.
(6) Provide a ->writepages() to call netfs_writeback_single() to save the
data to the cache according to the VM's scheduling whilst holding the
validate_lock read-locked as (4).
(7) Change local directory image editing functions:
(a) Provide a function to get a specific block by number from the
folio_queue as we can no longer use the i_pages xarray to locate
folios by index. This uses a cursor to remember the current
position as we need to iterate through the directory contents.
The block is kmapped before being returned.
(b) Make the function in (a) extend the directory by an extra folio if
we run out of space.
(c) Raise the check of the block free space counter, for those blocks
that have one, higher in the function to eliminate a call to get a
block.
(d) Remove the page unlocking and putting done during the editing
loops. This is no longer necessary as the folio_queue holds the
references and the pages are no longer in the pagecache.
(e) Mark the inode dirty and pin the cache usage till writeback at the
end of a successful edit.
(8) Don't set the large_folios flag on the inode as we do the allocation
ourselves rather than the VM doing it automatically.
(9) Mark the inode as being a single object that isn't uploaded to the
server.
(10) Enable caching on directories.
(11) Only set the upload key for writeback for regular files.
Notes:
(*) We keep the ->release_folio(), ->invalidate_folio() and
->migrate_folio() ops as we set the mapping pointer on the folio.
Signed-off-by: David Howells <dhowells@redhat.com>
Link: https://lore.kernel.org/r/20241216204124.3752367-22-dhowells@redhat.com
cc: Marc Dionne <marc.dionne@auristor.com>
cc: Jeff Layton <jlayton@kernel.org>
cc: linux-afs@lists.infradead.org
cc: netfs@lists.linux.dev
cc: linux-fsdevel@vger.kernel.org
Signed-off-by: Christian Brauner <brauner@kernel.org>
2024-12-16 20:41:11 +00:00
|
|
|
TP_ARGS(vnode, off),
|
2017-11-02 15:27:52 +00:00
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
2023-02-23 15:11:42 +00:00
|
|
|
__field(struct afs_vnode *, vnode)
|
|
|
|
__field(loff_t, off)
|
|
|
|
__field(loff_t, i_size)
|
2017-11-02 15:27:52 +00:00
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
|
|
|
__entry->vnode = vnode;
|
|
|
|
__entry->off = off;
|
afs: Use netfslib for directories
In the AFS ecosystem, directories are just a special type of file that is
downloaded and parsed locally. Download is done by the same mechanism as
ordinary files and the data can be cached. There is one important semantic
restriction on directories over files: the client must download the entire
directory in one go because, for example, the server could fabricate the
contents of the blob on the fly with each download and give a different
image each time.
So that we can cache the directory download, switch AFS directory support
over to using the netfslib single-object API, thereby allowing directory
content to be stored in the local cache.
To make this work, the following changes are made:
(1) A directory's contents are now stored in a folio_queue chain attached
to the afs_vnode (inode) struct rather than its associated pagecache,
though multipage folios are still used to hold the data. The folio
queue is discarded when the directory inode is evicted.
This also helps with the phasing out of ITER_XARRAY.
(2) Various directory operations are made to use and unuse the cache
cookie.
(3) The content checking, content dumping and content iteration are now
performed with a standard iov_iter iterator over the contents of the
folio queue.
(4) Iteration and modification must be done with the vnode's validate_lock
held. In conjunction with (1), this means that the iteration can be
done without the need to lock pages or take extra refs on them, unlike
when accessing ->i_pages.
(5) Convert to using netfs_read_single() to read data.
(6) Provide a ->writepages() to call netfs_writeback_single() to save the
data to the cache according to the VM's scheduling whilst holding the
validate_lock read-locked as (4).
(7) Change local directory image editing functions:
(a) Provide a function to get a specific block by number from the
folio_queue as we can no longer use the i_pages xarray to locate
folios by index. This uses a cursor to remember the current
position as we need to iterate through the directory contents.
The block is kmapped before being returned.
(b) Make the function in (a) extend the directory by an extra folio if
we run out of space.
(c) Raise the check of the block free space counter, for those blocks
that have one, higher in the function to eliminate a call to get a
block.
(d) Remove the page unlocking and putting done during the editing
loops. This is no longer necessary as the folio_queue holds the
references and the pages are no longer in the pagecache.
(e) Mark the inode dirty and pin the cache usage till writeback at the
end of a successful edit.
(8) Don't set the large_folios flag on the inode as we do the allocation
ourselves rather than the VM doing it automatically.
(9) Mark the inode as being a single object that isn't uploaded to the
server.
(10) Enable caching on directories.
(11) Only set the upload key for writeback for regular files.
Notes:
(*) We keep the ->release_folio(), ->invalidate_folio() and
->migrate_folio() ops as we set the mapping pointer on the folio.
Signed-off-by: David Howells <dhowells@redhat.com>
Link: https://lore.kernel.org/r/20241216204124.3752367-22-dhowells@redhat.com
cc: Marc Dionne <marc.dionne@auristor.com>
cc: Jeff Layton <jlayton@kernel.org>
cc: linux-afs@lists.infradead.org
cc: netfs@lists.linux.dev
cc: linux-fsdevel@vger.kernel.org
Signed-off-by: Christian Brauner <brauner@kernel.org>
2024-12-16 20:41:11 +00:00
|
|
|
__entry->i_size = i_size_read(&vnode->netfs.inode);
|
2017-11-02 15:27:52 +00:00
|
|
|
),
|
|
|
|
|
|
|
|
TP_printk("vn=%p %llx/%llx",
|
|
|
|
__entry->vnode, __entry->off, __entry->i_size)
|
|
|
|
);
|
|
|
|
|
2017-11-02 15:27:53 +00:00
|
|
|
TRACE_EVENT(afs_call_state,
|
|
|
|
TP_PROTO(struct afs_call *call,
|
|
|
|
enum afs_call_state from,
|
|
|
|
enum afs_call_state to,
|
|
|
|
int ret, u32 remote_abort),
|
|
|
|
|
|
|
|
TP_ARGS(call, from, to, ret, remote_abort),
|
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
2023-02-23 15:11:42 +00:00
|
|
|
__field(unsigned int, call)
|
|
|
|
__field(enum afs_call_state, from)
|
|
|
|
__field(enum afs_call_state, to)
|
|
|
|
__field(int, ret)
|
|
|
|
__field(u32, abort)
|
2017-11-02 15:27:53 +00:00
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
2018-03-27 23:03:00 +01:00
|
|
|
__entry->call = call->debug_id;
|
2017-11-02 15:27:53 +00:00
|
|
|
__entry->from = from;
|
|
|
|
__entry->to = to;
|
|
|
|
__entry->ret = ret;
|
|
|
|
__entry->abort = remote_abort;
|
|
|
|
),
|
|
|
|
|
2018-03-27 23:03:00 +01:00
|
|
|
TP_printk("c=%08x %u->%u r=%d ab=%d",
|
2017-11-02 15:27:53 +00:00
|
|
|
__entry->call,
|
|
|
|
__entry->from, __entry->to,
|
|
|
|
__entry->ret, __entry->abort)
|
|
|
|
);
|
|
|
|
|
2019-04-25 14:26:51 +01:00
|
|
|
TRACE_EVENT(afs_lookup,
|
|
|
|
TP_PROTO(struct afs_vnode *dvnode, const struct qstr *name,
|
2020-01-14 16:16:25 +00:00
|
|
|
struct afs_fid *fid),
|
2019-04-25 14:26:51 +01:00
|
|
|
|
2020-01-14 16:16:25 +00:00
|
|
|
TP_ARGS(dvnode, name, fid),
|
2019-04-25 14:26:51 +01:00
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
2023-02-23 15:11:42 +00:00
|
|
|
__field_struct(struct afs_fid, dfid)
|
|
|
|
__field_struct(struct afs_fid, fid)
|
|
|
|
__array(char, name, 24)
|
2019-04-25 14:26:51 +01:00
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
|
|
|
int __len = min_t(int, name->len, 23);
|
|
|
|
__entry->dfid = dvnode->fid;
|
2020-01-14 16:16:25 +00:00
|
|
|
__entry->fid = *fid;
|
2019-04-25 14:26:51 +01:00
|
|
|
memcpy(__entry->name, name->name, __len);
|
|
|
|
__entry->name[__len] = 0;
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_printk("d=%llx:%llx:%x \"%s\" f=%llx:%x",
|
|
|
|
__entry->dfid.vid, __entry->dfid.vnode, __entry->dfid.unique,
|
|
|
|
__entry->name,
|
|
|
|
__entry->fid.vnode, __entry->fid.unique)
|
|
|
|
);
|
|
|
|
|
2018-04-06 14:17:25 +01:00
|
|
|
TRACE_EVENT(afs_edit_dir,
|
|
|
|
TP_PROTO(struct afs_vnode *dvnode,
|
|
|
|
enum afs_edit_dir_reason why,
|
|
|
|
enum afs_edit_dir_op op,
|
|
|
|
unsigned int block,
|
|
|
|
unsigned int slot,
|
|
|
|
unsigned int f_vnode,
|
|
|
|
unsigned int f_unique,
|
|
|
|
const char *name),
|
|
|
|
|
|
|
|
TP_ARGS(dvnode, why, op, block, slot, f_vnode, f_unique, name),
|
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
2023-02-23 15:11:42 +00:00
|
|
|
__field(unsigned int, vnode)
|
|
|
|
__field(unsigned int, unique)
|
|
|
|
__field(enum afs_edit_dir_reason, why)
|
|
|
|
__field(enum afs_edit_dir_op, op)
|
|
|
|
__field(unsigned int, block)
|
|
|
|
__field(unsigned short, slot)
|
|
|
|
__field(unsigned int, f_vnode)
|
|
|
|
__field(unsigned int, f_unique)
|
|
|
|
__array(char, name, 24)
|
2018-04-06 14:17:25 +01:00
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
|
|
|
int __len = strlen(name);
|
2019-04-25 14:26:51 +01:00
|
|
|
__len = min(__len, 23);
|
2018-04-06 14:17:25 +01:00
|
|
|
__entry->vnode = dvnode->fid.vnode;
|
|
|
|
__entry->unique = dvnode->fid.unique;
|
|
|
|
__entry->why = why;
|
|
|
|
__entry->op = op;
|
|
|
|
__entry->block = block;
|
|
|
|
__entry->slot = slot;
|
|
|
|
__entry->f_vnode = f_vnode;
|
|
|
|
__entry->f_unique = f_unique;
|
|
|
|
memcpy(__entry->name, name, __len);
|
|
|
|
__entry->name[__len] = 0;
|
|
|
|
),
|
|
|
|
|
2024-12-16 20:41:07 +00:00
|
|
|
TP_printk("di=%x:%x %s %s %u[%u] fi=%x:%x \"%s\"",
|
2018-04-06 14:17:25 +01:00
|
|
|
__entry->vnode, __entry->unique,
|
|
|
|
__print_symbolic(__entry->why, afs_edit_dir_reasons),
|
|
|
|
__print_symbolic(__entry->op, afs_edit_dir_ops),
|
|
|
|
__entry->block, __entry->slot,
|
|
|
|
__entry->f_vnode, __entry->f_unique,
|
|
|
|
__entry->name)
|
|
|
|
);
|
|
|
|
|
2024-12-16 20:41:07 +00:00
|
|
|
TRACE_EVENT(afs_dir_invalid,
|
|
|
|
TP_PROTO(const struct afs_vnode *dvnode, enum afs_dir_invalid_trace trace),
|
|
|
|
|
|
|
|
TP_ARGS(dvnode, trace),
|
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
|
|
|
__field(unsigned int, vnode)
|
|
|
|
__field(unsigned int, unique)
|
|
|
|
__field(enum afs_dir_invalid_trace, trace)
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
|
|
|
__entry->vnode = dvnode->fid.vnode;
|
|
|
|
__entry->unique = dvnode->fid.unique;
|
|
|
|
__entry->trace = trace;
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_printk("di=%x:%x %s",
|
|
|
|
__entry->vnode, __entry->unique,
|
|
|
|
__print_symbolic(__entry->trace, afs_dir_invalid_traces))
|
|
|
|
);
|
|
|
|
|
|
|
|
TRACE_EVENT(afs_cb_promise,
|
|
|
|
TP_PROTO(const struct afs_vnode *vnode, enum afs_cb_promise_trace trace),
|
|
|
|
|
|
|
|
TP_ARGS(vnode, trace),
|
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
|
|
|
__field(unsigned int, vnode)
|
|
|
|
__field(unsigned int, unique)
|
|
|
|
__field(enum afs_cb_promise_trace, trace)
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
|
|
|
__entry->vnode = vnode->fid.vnode;
|
|
|
|
__entry->unique = vnode->fid.unique;
|
|
|
|
__entry->trace = trace;
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_printk("di=%x:%x %s",
|
|
|
|
__entry->vnode, __entry->unique,
|
|
|
|
__print_symbolic(__entry->trace, afs_cb_promise_traces))
|
|
|
|
);
|
|
|
|
|
|
|
|
TRACE_EVENT(afs_vnode_invalid,
|
|
|
|
TP_PROTO(const struct afs_vnode *vnode, enum afs_vnode_invalid_trace trace),
|
|
|
|
|
|
|
|
TP_ARGS(vnode, trace),
|
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
|
|
|
__field(unsigned int, vnode)
|
|
|
|
__field(unsigned int, unique)
|
|
|
|
__field(enum afs_vnode_invalid_trace, trace)
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
|
|
|
__entry->vnode = vnode->fid.vnode;
|
|
|
|
__entry->unique = vnode->fid.unique;
|
|
|
|
__entry->trace = trace;
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_printk("di=%x:%x %s",
|
|
|
|
__entry->vnode, __entry->unique,
|
|
|
|
__print_symbolic(__entry->trace, afs_vnode_invalid_traces))
|
|
|
|
);
|
|
|
|
|
|
|
|
TRACE_EVENT(afs_set_dv,
|
|
|
|
TP_PROTO(const struct afs_vnode *dvnode, u64 new_dv),
|
|
|
|
|
|
|
|
TP_ARGS(dvnode, new_dv),
|
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
|
|
|
__field(unsigned int, vnode)
|
|
|
|
__field(unsigned int, unique)
|
|
|
|
__field(u64, old_dv)
|
|
|
|
__field(u64, new_dv)
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
|
|
|
__entry->vnode = dvnode->fid.vnode;
|
|
|
|
__entry->unique = dvnode->fid.unique;
|
|
|
|
__entry->old_dv = dvnode->status.data_version;
|
|
|
|
__entry->new_dv = new_dv;
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_printk("di=%x:%x dv=%llx -> dv=%llx",
|
|
|
|
__entry->vnode, __entry->unique,
|
|
|
|
__entry->old_dv, __entry->new_dv)
|
|
|
|
);
|
|
|
|
|
|
|
|
TRACE_EVENT(afs_dv_mismatch,
|
|
|
|
TP_PROTO(const struct afs_vnode *dvnode, u64 before_dv, int delta, u64 new_dv),
|
|
|
|
|
|
|
|
TP_ARGS(dvnode, before_dv, delta, new_dv),
|
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
|
|
|
__field(unsigned int, vnode)
|
|
|
|
__field(unsigned int, unique)
|
|
|
|
__field(int, delta)
|
|
|
|
__field(u64, before_dv)
|
|
|
|
__field(u64, new_dv)
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
|
|
|
__entry->vnode = dvnode->fid.vnode;
|
|
|
|
__entry->unique = dvnode->fid.unique;
|
|
|
|
__entry->delta = delta;
|
|
|
|
__entry->before_dv = before_dv;
|
|
|
|
__entry->new_dv = new_dv;
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_printk("di=%x:%x xdv=%llx+%d dv=%llx",
|
|
|
|
__entry->vnode, __entry->unique,
|
|
|
|
__entry->before_dv, __entry->delta, __entry->new_dv)
|
|
|
|
);
|
|
|
|
|
2018-04-06 14:17:25 +01:00
|
|
|
TRACE_EVENT(afs_protocol_error,
|
2020-04-08 16:49:08 +01:00
|
|
|
TP_PROTO(struct afs_call *call, enum afs_eproto_cause cause),
|
2018-04-06 14:17:25 +01:00
|
|
|
|
2020-04-08 16:49:08 +01:00
|
|
|
TP_ARGS(call, cause),
|
2018-04-06 14:17:25 +01:00
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
2023-02-23 15:11:42 +00:00
|
|
|
__field(unsigned int, call)
|
|
|
|
__field(enum afs_eproto_cause, cause)
|
2018-04-06 14:17:25 +01:00
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
|
|
|
__entry->call = call ? call->debug_id : 0;
|
2018-10-20 00:57:56 +01:00
|
|
|
__entry->cause = cause;
|
2018-04-06 14:17:25 +01:00
|
|
|
),
|
|
|
|
|
2020-04-08 16:49:08 +01:00
|
|
|
TP_printk("c=%08x %s",
|
|
|
|
__entry->call,
|
2018-10-20 00:57:56 +01:00
|
|
|
__print_symbolic(__entry->cause, afs_eproto_causes))
|
2018-04-06 14:17:25 +01:00
|
|
|
);
|
|
|
|
|
2018-10-20 00:57:57 +01:00
|
|
|
TRACE_EVENT(afs_io_error,
|
|
|
|
TP_PROTO(unsigned int call, int error, enum afs_io_error where),
|
|
|
|
|
|
|
|
TP_ARGS(call, error, where),
|
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
2023-02-23 15:11:42 +00:00
|
|
|
__field(unsigned int, call)
|
|
|
|
__field(int, error)
|
|
|
|
__field(enum afs_io_error, where)
|
2018-10-20 00:57:57 +01:00
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
|
|
|
__entry->call = call;
|
|
|
|
__entry->error = error;
|
|
|
|
__entry->where = where;
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_printk("c=%08x r=%d %s",
|
|
|
|
__entry->call, __entry->error,
|
|
|
|
__print_symbolic(__entry->where, afs_io_errors))
|
|
|
|
);
|
|
|
|
|
|
|
|
TRACE_EVENT(afs_file_error,
|
|
|
|
TP_PROTO(struct afs_vnode *vnode, int error, enum afs_file_error where),
|
|
|
|
|
|
|
|
TP_ARGS(vnode, error, where),
|
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
2023-02-23 15:11:42 +00:00
|
|
|
__field_struct(struct afs_fid, fid)
|
|
|
|
__field(int, error)
|
|
|
|
__field(enum afs_file_error, where)
|
2018-10-20 00:57:57 +01:00
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
|
|
|
__entry->fid = vnode->fid;
|
|
|
|
__entry->error = error;
|
|
|
|
__entry->where = where;
|
|
|
|
),
|
|
|
|
|
2018-10-20 00:57:57 +01:00
|
|
|
TP_printk("%llx:%llx:%x r=%d %s",
|
2018-10-20 00:57:57 +01:00
|
|
|
__entry->fid.vid, __entry->fid.vnode, __entry->fid.unique,
|
|
|
|
__entry->error,
|
|
|
|
__print_symbolic(__entry->where, afs_file_errors))
|
|
|
|
);
|
|
|
|
|
afs: Fix error handling with lookup via FS.InlineBulkStatus
When afs does a lookup, it tries to use FS.InlineBulkStatus to preemptively
look up a bunch of files in the parent directory and cache this locally, on
the basis that we might want to look at them too (for example if someone
does an ls on a directory, they may want want to then stat every file
listed).
FS.InlineBulkStatus can be considered a compound op with the normal abort
code applying to the compound as a whole. Each status fetch within the
compound is then given its own individual abort code - but assuming no
error that prevents the bulk fetch from returning the compound result will
be 0, even if all the constituent status fetches failed.
At the conclusion of afs_do_lookup(), we should use the abort code from the
appropriate status to determine the error to return, if any - but instead
it is assumed that we were successful if the op as a whole succeeded and we
return an incompletely initialised inode, resulting in ENOENT, no matter
the actual reason. In the particular instance reported, a vnode with no
permission granted to be accessed is being given a UAEACCES abort code
which should be reported as EACCES, but is instead being reported as
ENOENT.
Fix this by abandoning the inode (which will be cleaned up with the op) if
file[1] has an abort code indicated and turn that abort code into an error
instead.
Whilst we're at it, add a tracepoint so that the abort codes of the
individual subrequests of FS.InlineBulkStatus can be logged. At the moment
only the container abort code can be 0.
Fixes: e49c7b2f6de7 ("afs: Build an abstraction around an "operation" concept")
Reported-by: Jeffrey Altman <jaltman@auristor.com>
Signed-off-by: David Howells <dhowells@redhat.com>
Reviewed-by: Marc Dionne <marc.dionne@auristor.com>
cc: linux-afs@lists.infradead.org
2024-01-02 14:02:37 +00:00
|
|
|
TRACE_EVENT(afs_bulkstat_error,
|
|
|
|
TP_PROTO(struct afs_operation *op, struct afs_fid *fid, unsigned int index, s32 abort),
|
|
|
|
|
|
|
|
TP_ARGS(op, fid, index, abort),
|
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
|
|
|
__field_struct(struct afs_fid, fid)
|
|
|
|
__field(unsigned int, op)
|
|
|
|
__field(unsigned int, index)
|
|
|
|
__field(s32, abort)
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
|
|
|
__entry->op = op->debug_id;
|
|
|
|
__entry->fid = *fid;
|
|
|
|
__entry->index = index;
|
|
|
|
__entry->abort = abort;
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_printk("OP=%08x[%02x] %llx:%llx:%x a=%d",
|
|
|
|
__entry->op, __entry->index,
|
|
|
|
__entry->fid.vid, __entry->fid.vnode, __entry->fid.unique,
|
|
|
|
__entry->abort)
|
|
|
|
);
|
|
|
|
|
2018-05-11 22:59:42 +01:00
|
|
|
TRACE_EVENT(afs_cm_no_server,
|
2025-01-23 11:01:55 +00:00
|
|
|
TP_PROTO(struct afs_call *call, const struct sockaddr_rxrpc *srx),
|
2018-05-11 22:59:42 +01:00
|
|
|
|
|
|
|
TP_ARGS(call, srx),
|
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
2023-02-23 15:11:42 +00:00
|
|
|
__field(unsigned int, call)
|
|
|
|
__field(unsigned int, op_id)
|
|
|
|
__field_struct(struct sockaddr_rxrpc, srx)
|
2018-05-11 22:59:42 +01:00
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
|
|
|
__entry->call = call->debug_id;
|
|
|
|
__entry->op_id = call->operation_ID;
|
|
|
|
memcpy(&__entry->srx, srx, sizeof(__entry->srx));
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_printk("c=%08x op=%u %pISpc",
|
|
|
|
__entry->call, __entry->op_id, &__entry->srx.transport)
|
|
|
|
);
|
|
|
|
|
|
|
|
TRACE_EVENT(afs_cm_no_server_u,
|
|
|
|
TP_PROTO(struct afs_call *call, const uuid_t *uuid),
|
|
|
|
|
|
|
|
TP_ARGS(call, uuid),
|
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
2023-02-23 15:11:42 +00:00
|
|
|
__field(unsigned int, call)
|
|
|
|
__field(unsigned int, op_id)
|
|
|
|
__field_struct(uuid_t, uuid)
|
2018-05-11 22:59:42 +01:00
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
|
|
|
__entry->call = call->debug_id;
|
|
|
|
__entry->op_id = call->operation_ID;
|
|
|
|
memcpy(&__entry->uuid, uuid, sizeof(__entry->uuid));
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_printk("c=%08x op=%u %pU",
|
|
|
|
__entry->call, __entry->op_id, &__entry->uuid)
|
|
|
|
);
|
|
|
|
|
2019-04-25 14:26:50 +01:00
|
|
|
TRACE_EVENT(afs_flock_ev,
|
|
|
|
TP_PROTO(struct afs_vnode *vnode, struct file_lock *fl,
|
|
|
|
enum afs_flock_event event, int error),
|
|
|
|
|
|
|
|
TP_ARGS(vnode, fl, event, error),
|
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
2023-02-23 15:11:42 +00:00
|
|
|
__field_struct(struct afs_fid, fid)
|
|
|
|
__field(enum afs_flock_event, event)
|
|
|
|
__field(enum afs_lock_state, state)
|
|
|
|
__field(int, error)
|
|
|
|
__field(unsigned int, debug_id)
|
2019-04-25 14:26:50 +01:00
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
|
|
|
__entry->fid = vnode->fid;
|
|
|
|
__entry->event = event;
|
|
|
|
__entry->state = vnode->lock_state;
|
|
|
|
__entry->error = error;
|
|
|
|
__entry->debug_id = fl ? fl->fl_u.afs.debug_id : 0;
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_printk("%llx:%llx:%x %04x %s s=%s e=%d",
|
|
|
|
__entry->fid.vid, __entry->fid.vnode, __entry->fid.unique,
|
|
|
|
__entry->debug_id,
|
|
|
|
__print_symbolic(__entry->event, afs_flock_events),
|
|
|
|
__print_symbolic(__entry->state, afs_flock_states),
|
|
|
|
__entry->error)
|
|
|
|
);
|
|
|
|
|
|
|
|
TRACE_EVENT(afs_flock_op,
|
|
|
|
TP_PROTO(struct afs_vnode *vnode, struct file_lock *fl,
|
|
|
|
enum afs_flock_operation op),
|
|
|
|
|
|
|
|
TP_ARGS(vnode, fl, op),
|
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
2023-02-23 15:11:42 +00:00
|
|
|
__field_struct(struct afs_fid, fid)
|
|
|
|
__field(loff_t, from)
|
|
|
|
__field(loff_t, len)
|
|
|
|
__field(enum afs_flock_operation, op)
|
|
|
|
__field(unsigned char, type)
|
|
|
|
__field(unsigned int, flags)
|
|
|
|
__field(unsigned int, debug_id)
|
2019-04-25 14:26:50 +01:00
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
|
|
|
__entry->fid = vnode->fid;
|
|
|
|
__entry->from = fl->fl_start;
|
|
|
|
__entry->len = fl->fl_end - fl->fl_start + 1;
|
|
|
|
__entry->op = op;
|
2024-01-31 18:02:16 -05:00
|
|
|
__entry->type = fl->c.flc_type;
|
|
|
|
__entry->flags = fl->c.flc_flags;
|
2019-04-25 14:26:50 +01:00
|
|
|
__entry->debug_id = fl->fl_u.afs.debug_id;
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_printk("%llx:%llx:%x %04x %s t=%s R=%llx/%llx f=%x",
|
|
|
|
__entry->fid.vid, __entry->fid.vnode, __entry->fid.unique,
|
|
|
|
__entry->debug_id,
|
|
|
|
__print_symbolic(__entry->op, afs_flock_operations),
|
|
|
|
__print_symbolic(__entry->type, afs_flock_types),
|
|
|
|
__entry->from, __entry->len, __entry->flags)
|
|
|
|
);
|
|
|
|
|
2019-04-25 14:26:51 +01:00
|
|
|
TRACE_EVENT(afs_reload_dir,
|
|
|
|
TP_PROTO(struct afs_vnode *vnode),
|
|
|
|
|
|
|
|
TP_ARGS(vnode),
|
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
2023-02-23 15:11:42 +00:00
|
|
|
__field_struct(struct afs_fid, fid)
|
2019-04-25 14:26:51 +01:00
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
|
|
|
__entry->fid = vnode->fid;
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_printk("%llx:%llx:%x",
|
|
|
|
__entry->fid.vid, __entry->fid.vnode, __entry->fid.unique)
|
|
|
|
);
|
|
|
|
|
2019-04-25 14:26:51 +01:00
|
|
|
TRACE_EVENT(afs_silly_rename,
|
|
|
|
TP_PROTO(struct afs_vnode *vnode, bool done),
|
|
|
|
|
|
|
|
TP_ARGS(vnode, done),
|
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
2023-02-23 15:11:42 +00:00
|
|
|
__field_struct(struct afs_fid, fid)
|
|
|
|
__field(bool, done)
|
2019-04-25 14:26:51 +01:00
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
|
|
|
__entry->fid = vnode->fid;
|
|
|
|
__entry->done = done;
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_printk("%llx:%llx:%x done=%u",
|
|
|
|
__entry->fid.vid, __entry->fid.vnode, __entry->fid.unique,
|
|
|
|
__entry->done)
|
|
|
|
);
|
|
|
|
|
2019-04-25 14:26:51 +01:00
|
|
|
TRACE_EVENT(afs_get_tree,
|
|
|
|
TP_PROTO(struct afs_cell *cell, struct afs_volume *volume),
|
|
|
|
|
|
|
|
TP_ARGS(cell, volume),
|
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
2023-02-23 15:11:42 +00:00
|
|
|
__field(u64, vid)
|
|
|
|
__array(char, cell, 24)
|
|
|
|
__array(char, volume, 24)
|
2019-04-25 14:26:51 +01:00
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
|
|
|
int __len;
|
|
|
|
__entry->vid = volume->vid;
|
|
|
|
__len = min_t(int, cell->name_len, 23);
|
|
|
|
memcpy(__entry->cell, cell->name, __len);
|
|
|
|
__entry->cell[__len] = 0;
|
|
|
|
__len = min_t(int, volume->name_len, 23);
|
|
|
|
memcpy(__entry->volume, volume->name, __len);
|
|
|
|
__entry->volume[__len] = 0;
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_printk("--- MOUNT %s:%s %llx",
|
|
|
|
__entry->cell, __entry->volume, __entry->vid)
|
|
|
|
);
|
|
|
|
|
afs: Parse the VolSync record in the reply of a number of RPC ops
A number of fileserver RPC operations return a VolSync record as part of
their reply that gives some information about the state of the volume being
accessed, including:
(1) A volume Creation timestamp. For an RW volume, this is the time at
which the volume was created; if it changes, the RW volume was
presumably restored from a backup and all cached data should be
scrubbed as Data Version numbers could regress on the files in the
volume.
For an RO volume, this is the time it was last snapshotted from the RW
volume. It is expected to advance each time this happens; if it
regresses, cached data should be scrubbed.
(2) A volume Update timestamp (Auristor only). For an RW volume, this is
updated any time any change is made to a volume or its contents. If
it regresses, all cached data must be scrubbed.
For an RO volume, this is a copy of the RW volume's Update timestamp
at the point of snapshotting. It can be used as a version number when
checking to see if a callback on a RO volume was due to a snapshot.
If it regresses, all cached data must be scrubbed.
but this is currently not made use of by the in-kernel afs filesystem.
Make the afs filesystem use this by:
(1) Add an update time field to the afs_volsync struct and use a value of
TIME64_MIN in both that and the creation time to indicate that they
are unset.
(2) Add creation and update time fields to the afs_volume struct and use
this to track the two timestamps.
(3) Add a volsync_lock mutex to the afs_volume struct to control
modification access for when we detect a change in these values.
(3) Add a 'pre-op volsync' struct to the afs_operation struct to record
the state of the volume tracking before the op.
(4) Add a new counter, cb_scrub, to the afs_volume struct to count events
that require all data to be scrubbed. A copy is placed in the
afs_vnode struct (inode) and if they no longer match, a scrub takes
place.
(5) When the result of an operation is being parsed, parse the VolSync
data too, if it is provided. Note that the two timestamps are handled
separately, since they don't work in quite the same way.
- If the afs_volume tracking is unset, just set it and do nothing
else.
- If the result timestamps are the same as the ones in afs_volume, do
nothing.
- If the timestamps regress, increment cb_scrub if not already done
so.
- If the creation timestamp on a RW volume changes, increment cb_scrub
if not already done so.
- If the creation timestamp on a RO volume advances, update the server
list and see if the current server has been excluded, if so reissue
the op. Once over half of the replication sites have been updated,
increment cb_ro_snapshot to indicate updates may be required and
switch over to excluding unupdated replication sites.
- If the creation timestamp on a Backup volume advances, just
increment cb_ro_snapshot to trigger updates.
Signed-off-by: David Howells <dhowells@redhat.com>
cc: Marc Dionne <marc.dionne@auristor.com>
cc: linux-afs@lists.infradead.org
2023-11-05 16:11:07 +00:00
|
|
|
TRACE_EVENT(afs_cb_v_break,
|
|
|
|
TP_PROTO(afs_volid_t vid, unsigned int cb_v_break,
|
|
|
|
enum afs_cb_break_reason reason),
|
|
|
|
|
|
|
|
TP_ARGS(vid, cb_v_break, reason),
|
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
|
|
|
__field(afs_volid_t, vid)
|
|
|
|
__field(unsigned int, cb_v_break)
|
|
|
|
__field(enum afs_cb_break_reason, reason)
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
|
|
|
__entry->vid = vid;
|
|
|
|
__entry->cb_v_break = cb_v_break;
|
|
|
|
__entry->reason = reason;
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_printk("%llx vb=%x %s",
|
|
|
|
__entry->vid,
|
|
|
|
__entry->cb_v_break,
|
|
|
|
__print_symbolic(__entry->reason, afs_cb_break_reasons))
|
|
|
|
);
|
|
|
|
|
2019-06-20 18:12:16 +01:00
|
|
|
TRACE_EVENT(afs_cb_break,
|
|
|
|
TP_PROTO(struct afs_fid *fid, unsigned int cb_break,
|
|
|
|
enum afs_cb_break_reason reason, bool skipped),
|
|
|
|
|
|
|
|
TP_ARGS(fid, cb_break, reason, skipped),
|
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
2023-02-23 15:11:42 +00:00
|
|
|
__field_struct(struct afs_fid, fid)
|
|
|
|
__field(unsigned int, cb_break)
|
|
|
|
__field(enum afs_cb_break_reason, reason)
|
|
|
|
__field(bool, skipped)
|
2019-06-20 18:12:16 +01:00
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
|
|
|
__entry->fid = *fid;
|
|
|
|
__entry->cb_break = cb_break;
|
|
|
|
__entry->reason = reason;
|
|
|
|
__entry->skipped = skipped;
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_printk("%llx:%llx:%x b=%x s=%u %s",
|
|
|
|
__entry->fid.vid, __entry->fid.vnode, __entry->fid.unique,
|
|
|
|
__entry->cb_break,
|
|
|
|
__entry->skipped,
|
|
|
|
__print_symbolic(__entry->reason, afs_cb_break_reasons))
|
|
|
|
);
|
|
|
|
|
|
|
|
TRACE_EVENT(afs_cb_miss,
|
|
|
|
TP_PROTO(struct afs_fid *fid, enum afs_cb_break_reason reason),
|
|
|
|
|
|
|
|
TP_ARGS(fid, reason),
|
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
2023-02-23 15:11:42 +00:00
|
|
|
__field_struct(struct afs_fid, fid)
|
|
|
|
__field(enum afs_cb_break_reason, reason)
|
2019-06-20 18:12:16 +01:00
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
|
|
|
__entry->fid = *fid;
|
|
|
|
__entry->reason = reason;
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_printk(" %llx:%llx:%x %s",
|
|
|
|
__entry->fid.vid, __entry->fid.vnode, __entry->fid.unique,
|
|
|
|
__print_symbolic(__entry->reason, afs_cb_break_reasons))
|
|
|
|
);
|
|
|
|
|
2019-06-20 18:12:17 +01:00
|
|
|
TRACE_EVENT(afs_server,
|
2022-07-06 11:26:14 +01:00
|
|
|
TP_PROTO(unsigned int server_debug_id, int ref, int active,
|
2020-04-17 17:31:26 +01:00
|
|
|
enum afs_server_trace reason),
|
2019-06-20 18:12:17 +01:00
|
|
|
|
2022-07-06 11:26:14 +01:00
|
|
|
TP_ARGS(server_debug_id, ref, active, reason),
|
2019-06-20 18:12:17 +01:00
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
2023-02-23 15:11:42 +00:00
|
|
|
__field(unsigned int, server)
|
|
|
|
__field(int, ref)
|
|
|
|
__field(int, active)
|
|
|
|
__field(int, reason)
|
2019-06-20 18:12:17 +01:00
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
2022-07-06 11:26:14 +01:00
|
|
|
__entry->server = server_debug_id;
|
2020-04-17 17:31:26 +01:00
|
|
|
__entry->ref = ref;
|
|
|
|
__entry->active = active;
|
2019-06-20 18:12:17 +01:00
|
|
|
__entry->reason = reason;
|
|
|
|
),
|
|
|
|
|
2025-02-14 10:16:21 +00:00
|
|
|
TP_printk("s=%08x %s r=%d a=%d",
|
2019-06-20 18:12:17 +01:00
|
|
|
__entry->server,
|
|
|
|
__print_symbolic(__entry->reason, afs_server_traces),
|
2020-04-17 17:31:26 +01:00
|
|
|
__entry->ref,
|
|
|
|
__entry->active)
|
2019-06-20 18:12:17 +01:00
|
|
|
);
|
|
|
|
|
2020-04-29 17:02:04 +01:00
|
|
|
TRACE_EVENT(afs_volume,
|
2025-02-14 20:41:30 +00:00
|
|
|
TP_PROTO(unsigned int debug_id, afs_volid_t vid, int ref,
|
|
|
|
enum afs_volume_trace reason),
|
2020-04-29 17:02:04 +01:00
|
|
|
|
2025-02-14 20:41:30 +00:00
|
|
|
TP_ARGS(debug_id, vid, ref, reason),
|
2020-04-29 17:02:04 +01:00
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
2025-02-14 20:41:30 +00:00
|
|
|
__field(unsigned int, debug_id)
|
2023-02-23 15:11:42 +00:00
|
|
|
__field(afs_volid_t, vid)
|
|
|
|
__field(int, ref)
|
|
|
|
__field(enum afs_volume_trace, reason)
|
2020-04-29 17:02:04 +01:00
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
2025-02-14 20:41:30 +00:00
|
|
|
__entry->debug_id = debug_id;
|
|
|
|
__entry->vid = vid;
|
|
|
|
__entry->ref = ref;
|
|
|
|
__entry->reason = reason;
|
2020-04-29 17:02:04 +01:00
|
|
|
),
|
|
|
|
|
2025-02-14 20:41:30 +00:00
|
|
|
TP_printk("V=%08x %s vid=%llx r=%d",
|
|
|
|
__entry->debug_id,
|
2020-04-29 17:02:04 +01:00
|
|
|
__print_symbolic(__entry->reason, afs_volume_traces),
|
2025-02-14 20:41:30 +00:00
|
|
|
__entry->vid,
|
2020-04-29 17:02:04 +01:00
|
|
|
__entry->ref)
|
|
|
|
);
|
|
|
|
|
2020-10-13 20:51:59 +01:00
|
|
|
TRACE_EVENT(afs_cell,
|
2022-07-06 10:52:14 +01:00
|
|
|
TP_PROTO(unsigned int cell_debug_id, int ref, int active,
|
2020-10-13 20:51:59 +01:00
|
|
|
enum afs_cell_trace reason),
|
|
|
|
|
2022-07-06 10:52:14 +01:00
|
|
|
TP_ARGS(cell_debug_id, ref, active, reason),
|
2020-10-13 20:51:59 +01:00
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
2023-02-23 15:11:42 +00:00
|
|
|
__field(unsigned int, cell)
|
|
|
|
__field(int, ref)
|
|
|
|
__field(int, active)
|
|
|
|
__field(int, reason)
|
2020-10-13 20:51:59 +01:00
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
|
|
|
__entry->cell = cell_debug_id;
|
2022-07-06 10:52:14 +01:00
|
|
|
__entry->ref = ref;
|
2020-10-13 20:51:59 +01:00
|
|
|
__entry->active = active;
|
|
|
|
__entry->reason = reason;
|
|
|
|
),
|
|
|
|
|
2022-07-06 10:52:14 +01:00
|
|
|
TP_printk("L=%08x %s r=%d a=%d",
|
2020-10-13 20:51:59 +01:00
|
|
|
__entry->cell,
|
|
|
|
__print_symbolic(__entry->reason, afs_cell_traces),
|
2022-07-06 10:52:14 +01:00
|
|
|
__entry->ref,
|
2020-10-13 20:51:59 +01:00
|
|
|
__entry->active)
|
|
|
|
);
|
|
|
|
|
2023-10-19 13:59:03 +01:00
|
|
|
TRACE_EVENT(afs_alist,
|
|
|
|
TP_PROTO(unsigned int alist_debug_id, int ref, enum afs_alist_trace reason),
|
|
|
|
|
|
|
|
TP_ARGS(alist_debug_id, ref, reason),
|
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
|
|
|
__field(unsigned int, alist)
|
|
|
|
__field(int, ref)
|
|
|
|
__field(int, active)
|
|
|
|
__field(int, reason)
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
|
|
|
__entry->alist = alist_debug_id;
|
|
|
|
__entry->ref = ref;
|
|
|
|
__entry->reason = reason;
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_printk("AL=%08x %s r=%d",
|
|
|
|
__entry->alist,
|
|
|
|
__print_symbolic(__entry->reason, afs_alist_traces),
|
|
|
|
__entry->ref)
|
|
|
|
);
|
|
|
|
|
2023-10-31 16:30:37 +00:00
|
|
|
TRACE_EVENT(afs_estate,
|
|
|
|
TP_PROTO(unsigned int server_debug_id, unsigned int estate_debug_id,
|
|
|
|
int ref, enum afs_estate_trace reason),
|
|
|
|
|
|
|
|
TP_ARGS(server_debug_id, estate_debug_id, ref, reason),
|
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
|
|
|
__field(unsigned int, server)
|
|
|
|
__field(unsigned int, estate)
|
|
|
|
__field(int, ref)
|
|
|
|
__field(int, active)
|
|
|
|
__field(int, reason)
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
|
|
|
__entry->server = server_debug_id;
|
|
|
|
__entry->estate = estate_debug_id;
|
|
|
|
__entry->ref = ref;
|
|
|
|
__entry->reason = reason;
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_printk("ES=%08x[%x] %s r=%d",
|
|
|
|
__entry->server,
|
|
|
|
__entry->estate,
|
|
|
|
__print_symbolic(__entry->reason, afs_estate_traces),
|
|
|
|
__entry->ref)
|
|
|
|
);
|
|
|
|
|
2023-10-30 11:43:24 +00:00
|
|
|
TRACE_EVENT(afs_fs_probe,
|
2023-10-31 16:30:37 +00:00
|
|
|
TP_PROTO(struct afs_server *server, bool tx, struct afs_endpoint_state *estate,
|
2023-10-30 11:43:24 +00:00
|
|
|
unsigned int addr_index, int error, s32 abort_code, unsigned int rtt_us),
|
|
|
|
|
2023-10-31 16:30:37 +00:00
|
|
|
TP_ARGS(server, tx, estate, addr_index, error, abort_code, rtt_us),
|
2023-10-30 11:43:24 +00:00
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
|
|
|
__field(unsigned int, server)
|
2023-10-31 16:30:37 +00:00
|
|
|
__field(unsigned int, estate)
|
2023-10-30 11:43:24 +00:00
|
|
|
__field(bool, tx)
|
|
|
|
__field(u16, addr_index)
|
|
|
|
__field(short, error)
|
|
|
|
__field(s32, abort_code)
|
|
|
|
__field(unsigned int, rtt_us)
|
|
|
|
__field_struct(struct sockaddr_rxrpc, srx)
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
2023-10-31 16:30:37 +00:00
|
|
|
struct afs_addr_list *alist = estate->addresses;
|
2023-10-30 11:43:24 +00:00
|
|
|
__entry->server = server->debug_id;
|
2023-10-31 16:30:37 +00:00
|
|
|
__entry->estate = estate->probe_seq;
|
2023-10-30 11:43:24 +00:00
|
|
|
__entry->tx = tx;
|
|
|
|
__entry->addr_index = addr_index;
|
|
|
|
__entry->error = error;
|
|
|
|
__entry->abort_code = abort_code;
|
|
|
|
__entry->rtt_us = rtt_us;
|
|
|
|
memcpy(&__entry->srx, rxrpc_kernel_remote_srx(alist->addrs[addr_index].peer),
|
|
|
|
sizeof(__entry->srx));
|
|
|
|
),
|
|
|
|
|
2023-10-31 16:30:37 +00:00
|
|
|
TP_printk("s=%08x %s pq=%x ax=%u e=%d ac=%d rtt=%d %pISpc",
|
|
|
|
__entry->server, __entry->tx ? "tx" : "rx", __entry->estate,
|
|
|
|
__entry->addr_index, __entry->error, __entry->abort_code, __entry->rtt_us,
|
2023-10-30 11:43:24 +00:00
|
|
|
&__entry->srx.transport)
|
|
|
|
);
|
|
|
|
|
2023-10-30 11:53:16 +00:00
|
|
|
TRACE_EVENT(afs_vl_probe,
|
|
|
|
TP_PROTO(struct afs_vlserver *server, bool tx, struct afs_addr_list *alist,
|
|
|
|
unsigned int addr_index, int error, s32 abort_code, unsigned int rtt_us),
|
|
|
|
|
|
|
|
TP_ARGS(server, tx, alist, addr_index, error, abort_code, rtt_us),
|
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
|
|
|
__field(unsigned int, server)
|
|
|
|
__field(bool, tx)
|
|
|
|
__field(unsigned short, flags)
|
|
|
|
__field(u16, addr_index)
|
|
|
|
__field(short, error)
|
|
|
|
__field(s32, abort_code)
|
|
|
|
__field(unsigned int, rtt_us)
|
|
|
|
__field_struct(struct sockaddr_rxrpc, srx)
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
|
|
|
__entry->server = server->debug_id;
|
|
|
|
__entry->tx = tx;
|
|
|
|
__entry->addr_index = addr_index;
|
|
|
|
__entry->error = error;
|
|
|
|
__entry->abort_code = abort_code;
|
|
|
|
__entry->rtt_us = rtt_us;
|
|
|
|
memcpy(&__entry->srx, rxrpc_kernel_remote_srx(alist->addrs[addr_index].peer),
|
|
|
|
sizeof(__entry->srx));
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_printk("vl=%08x %s ax=%u e=%d ac=%d rtt=%d %pISpc",
|
|
|
|
__entry->server, __entry->tx ? "tx" : "rx", __entry->addr_index,
|
|
|
|
__entry->error, __entry->abort_code, __entry->rtt_us,
|
|
|
|
&__entry->srx.transport)
|
|
|
|
);
|
|
|
|
|
2023-10-18 09:24:01 +01:00
|
|
|
TRACE_EVENT(afs_rotate,
|
|
|
|
TP_PROTO(struct afs_operation *op, enum afs_rotate_trace reason, unsigned int extra),
|
|
|
|
|
|
|
|
TP_ARGS(op, reason, extra),
|
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
|
|
|
__field(unsigned int, op)
|
|
|
|
__field(unsigned int, flags)
|
|
|
|
__field(unsigned int, extra)
|
|
|
|
__field(unsigned short, iteration)
|
|
|
|
__field(short, server_index)
|
|
|
|
__field(short, addr_index)
|
|
|
|
__field(enum afs_rotate_trace, reason)
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
|
|
|
__entry->op = op->debug_id;
|
|
|
|
__entry->flags = op->flags;
|
|
|
|
__entry->iteration = op->nr_iterations;
|
|
|
|
__entry->server_index = op->server_index;
|
|
|
|
__entry->addr_index = op->addr_index;
|
|
|
|
__entry->reason = reason;
|
|
|
|
__entry->extra = extra;
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_printk("OP=%08x it=%02x %s fl=%x sx=%d ax=%d ext=%d",
|
|
|
|
__entry->op,
|
|
|
|
__entry->iteration,
|
|
|
|
__print_symbolic(__entry->reason, afs_rotate_traces),
|
|
|
|
__entry->flags,
|
|
|
|
__entry->server_index,
|
|
|
|
__entry->addr_index,
|
|
|
|
__entry->extra)
|
|
|
|
);
|
|
|
|
|
2023-11-17 09:20:28 +00:00
|
|
|
TRACE_EVENT(afs_make_call,
|
|
|
|
TP_PROTO(struct afs_call *call),
|
|
|
|
|
|
|
|
TP_ARGS(call),
|
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
|
|
|
__field(unsigned int, call)
|
|
|
|
__field(bool, is_vl)
|
|
|
|
__field(enum afs_fs_operation, op)
|
|
|
|
__field_struct(struct afs_fid, fid)
|
|
|
|
__field_struct(struct sockaddr_rxrpc, srx)
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
|
|
|
__entry->call = call->debug_id;
|
|
|
|
__entry->op = call->operation_ID;
|
|
|
|
__entry->fid = call->fid;
|
|
|
|
memcpy(&__entry->srx, rxrpc_kernel_remote_srx(call->peer),
|
|
|
|
sizeof(__entry->srx));
|
|
|
|
__entry->srx.srx_service = call->service_id;
|
|
|
|
__entry->is_vl = (__entry->srx.srx_service == VL_SERVICE ||
|
|
|
|
__entry->srx.srx_service == YFS_VL_SERVICE);
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_printk("c=%08x %pISpc+%u %s %llx:%llx:%x",
|
|
|
|
__entry->call,
|
|
|
|
&__entry->srx.transport,
|
|
|
|
__entry->srx.srx_service,
|
|
|
|
__entry->is_vl ?
|
|
|
|
__print_symbolic(__entry->op, afs_vl_operations) :
|
|
|
|
__print_symbolic(__entry->op, afs_fs_operations),
|
|
|
|
__entry->fid.vid,
|
|
|
|
__entry->fid.vnode,
|
|
|
|
__entry->fid.unique)
|
|
|
|
);
|
|
|
|
|
2024-12-16 20:41:21 +00:00
|
|
|
TRACE_EVENT(afs_read_recv,
|
|
|
|
TP_PROTO(const struct afs_operation *op, const struct afs_call *call),
|
|
|
|
|
|
|
|
TP_ARGS(op, call),
|
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
|
|
|
__field(unsigned int, rreq)
|
|
|
|
__field(unsigned int, sreq)
|
|
|
|
__field(unsigned int, op)
|
|
|
|
__field(unsigned int, op_flags)
|
|
|
|
__field(unsigned int, call)
|
|
|
|
__field(enum afs_call_state, call_state)
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
|
|
|
__entry->op = op->debug_id;
|
|
|
|
__entry->sreq = op->fetch.subreq->debug_index;
|
|
|
|
__entry->rreq = op->fetch.subreq->rreq->debug_id;
|
|
|
|
__entry->op_flags = op->flags;
|
|
|
|
__entry->call = call->debug_id;
|
|
|
|
__entry->call_state = call->state;
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_printk("R=%08x[%x] OP=%08x c=%08x cs=%x of=%x",
|
|
|
|
__entry->rreq, __entry->sreq,
|
|
|
|
__entry->op,
|
|
|
|
__entry->call, __entry->call_state,
|
|
|
|
__entry->op_flags)
|
|
|
|
);
|
|
|
|
|
2017-01-05 10:38:34 +00:00
|
|
|
#endif /* _TRACE_AFS_H */
|
|
|
|
|
|
|
|
/* This part must be outside protection */
|
|
|
|
#include <trace/define_trace.h>
|