2019-05-20 19:08:01 +02:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-or-later
|
2018-10-20 00:57:57 +01:00
|
|
|
/* Handle vlserver selection and rotation.
|
|
|
|
*
|
|
|
|
* Copyright (C) 2018 Red Hat, Inc. All Rights Reserved.
|
|
|
|
* Written by David Howells (dhowells@redhat.com)
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/kernel.h>
|
|
|
|
#include <linux/sched.h>
|
|
|
|
#include <linux/sched/signal.h>
|
|
|
|
#include "internal.h"
|
|
|
|
#include "afs_vl.h"
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Begin an operation on a volume location server.
|
|
|
|
*/
|
|
|
|
bool afs_begin_vlserver_operation(struct afs_vl_cursor *vc, struct afs_cell *cell,
|
|
|
|
struct key *key)
|
|
|
|
{
|
2023-10-20 16:13:03 +01:00
|
|
|
static atomic_t debug_ids;
|
|
|
|
|
2018-10-20 00:57:57 +01:00
|
|
|
memset(vc, 0, sizeof(*vc));
|
|
|
|
vc->cell = cell;
|
|
|
|
vc->key = key;
|
2023-10-25 17:53:33 +01:00
|
|
|
vc->cumul_error.error = -EDESTADDRREQ;
|
|
|
|
vc->nr_iterations = -1;
|
2018-10-20 00:57:57 +01:00
|
|
|
|
|
|
|
if (signal_pending(current)) {
|
2023-10-25 17:53:33 +01:00
|
|
|
vc->cumul_error.error = -EINTR;
|
2018-10-20 00:57:57 +01:00
|
|
|
vc->flags |= AFS_VL_CURSOR_STOP;
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2023-10-20 16:13:03 +01:00
|
|
|
vc->debug_id = atomic_inc_return(&debug_ids);
|
2018-10-20 00:57:57 +01:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Begin iteration through a server list, starting with the last used server if
|
|
|
|
* possible, or the last recorded good server if not.
|
|
|
|
*/
|
|
|
|
static bool afs_start_vl_iteration(struct afs_vl_cursor *vc)
|
|
|
|
{
|
|
|
|
struct afs_cell *cell = vc->cell;
|
2019-05-07 15:06:36 +01:00
|
|
|
unsigned int dns_lookup_count;
|
2018-10-20 00:57:57 +01:00
|
|
|
|
2019-05-07 15:06:36 +01:00
|
|
|
if (cell->dns_source == DNS_RECORD_UNAVAILABLE ||
|
|
|
|
cell->dns_expiry <= ktime_get_real_seconds()) {
|
|
|
|
dns_lookup_count = smp_load_acquire(&cell->dns_lookup_count);
|
|
|
|
set_bit(AFS_CELL_FL_DO_LOOKUP, &cell->flags);
|
afs: Simplify cell record handling
Simplify afs_cell record handling to avoid very occasional races that cause
module removal to hang (it waits for all cell records to be removed).
There are two things that particularly contribute to the difficulty:
firstly, the code tries to pass a ref on the cell to the cell's maintenance
work item (which gets awkward if the work item is already queued); and,
secondly, there's an overall cell manager that tries to use just one timer
for the entire cell collection (to avoid having loads of timers). However,
both of these are probably unnecessarily restrictive.
To simplify this, the following changes are made:
(1) The cell record collection manager is removed. Each cell record
manages itself individually.
(2) Each afs_cell is given a second work item (cell->destroyer) that is
queued when its refcount reaches zero. This is not done in the
context of the putting thread as it might be in an inconvenient place
to sleep.
(3) Each afs_cell is given its own timer. The timer is used to expire the
cell record after a period of unuse if not otherwise pinned and can
also be used for other maintenance tasks if necessary (of which there
are currently none as DNS refresh is triggered by filesystem
operations).
(4) The afs_cell manager work item (cell->manager) is no longer given a
ref on the cell when queued; rather, the manager must be deleted.
This does away with the need to deal with the consequences of losing a
race to queue cell->manager. Clean up of extra queuing is deferred to
the destroyer.
(5) The cell destroyer work item makes sure the cell timer is removed and
that the normal cell work is cancelled before farming the actual
destruction off to RCU.
(6) When a network namespace is destroyed or the kafs module is unloaded,
it's now a simple matter of marking the namespace as dead then just
waking up all the cell work items. They will then remove and destroy
themselves once all remaining activity counts and/or a ref counts are
dropped. This makes sure that all server records are dropped first.
(7) The cell record state set is reduced to just four states: SETTING_UP,
ACTIVE, REMOVING and DEAD. The record persists in the active state
even when it's not being used until the time comes to remove it rather
than downgrading it to an inactive state from whence it can be
restored.
This means that the cell still appears in /proc and /afs when not in
use until it switches to the REMOVING state - at which point it is
removed.
Note that the REMOVING state is included so that someone wanting to
resurrect the cell record is forced to wait whilst the cell is torn
down in that state. Once it's in the DEAD state, it has been removed
from net->cells tree and is no longer findable and can be replaced.
Signed-off-by: David Howells <dhowells@redhat.com>
cc: Marc Dionne <marc.dionne@auristor.com>
cc: linux-afs@lists.infradead.org
cc: linux-fsdevel@vger.kernel.org
Link: https://lore.kernel.org/r/20250224234154.2014840-16-dhowells@redhat.com/ # v1
Link: https://lore.kernel.org/r/20250310094206.801057-12-dhowells@redhat.com/ # v4
2025-02-24 16:06:03 +00:00
|
|
|
afs_queue_cell(cell, afs_cell_trace_queue_dns);
|
2019-05-07 15:06:36 +01:00
|
|
|
|
|
|
|
if (cell->dns_source == DNS_RECORD_UNAVAILABLE) {
|
|
|
|
if (wait_var_event_interruptible(
|
|
|
|
&cell->dns_lookup_count,
|
|
|
|
smp_load_acquire(&cell->dns_lookup_count)
|
|
|
|
!= dns_lookup_count) < 0) {
|
2023-10-25 17:53:33 +01:00
|
|
|
vc->cumul_error.error = -ERESTARTSYS;
|
2019-05-07 15:06:36 +01:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Status load is ordered after lookup counter load */
|
2023-10-26 01:25:07 +01:00
|
|
|
if (cell->dns_status == DNS_LOOKUP_GOT_NOT_FOUND) {
|
|
|
|
pr_warn("No record of cell %s\n", cell->name);
|
2023-10-25 17:53:33 +01:00
|
|
|
vc->cumul_error.error = -ENOENT;
|
2023-10-26 01:25:07 +01:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2019-05-07 15:06:36 +01:00
|
|
|
if (cell->dns_source == DNS_RECORD_UNAVAILABLE) {
|
2023-10-25 17:53:33 +01:00
|
|
|
vc->cumul_error.error = -EDESTADDRREQ;
|
2019-05-07 15:06:36 +01:00
|
|
|
return false;
|
|
|
|
}
|
2018-10-20 00:57:57 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
read_lock(&cell->vl_servers_lock);
|
|
|
|
vc->server_list = afs_get_vlserverlist(
|
|
|
|
rcu_dereference_protected(cell->vl_servers,
|
|
|
|
lockdep_is_held(&cell->vl_servers_lock)));
|
|
|
|
read_unlock(&cell->vl_servers_lock);
|
2019-05-07 15:30:34 +01:00
|
|
|
if (!vc->server_list->nr_servers)
|
2018-10-20 00:57:57 +01:00
|
|
|
return false;
|
|
|
|
|
2023-10-26 15:56:39 +01:00
|
|
|
vc->untried_servers = (1UL << vc->server_list->nr_servers) - 1;
|
|
|
|
vc->server_index = -1;
|
2018-10-20 00:57:57 +01:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Select the vlserver to use. May be called multiple times to rotate
|
|
|
|
* through the vlservers.
|
|
|
|
*/
|
|
|
|
bool afs_select_vlserver(struct afs_vl_cursor *vc)
|
|
|
|
{
|
2023-10-20 16:13:03 +01:00
|
|
|
struct afs_addr_list *alist = vc->alist;
|
2018-10-20 00:57:57 +01:00
|
|
|
struct afs_vlserver *vlserver;
|
2023-10-20 16:13:03 +01:00
|
|
|
unsigned long set, failed;
|
rxrpc, afs: Allow afs to pin rxrpc_peer objects
Change rxrpc's API such that:
(1) A new function, rxrpc_kernel_lookup_peer(), is provided to look up an
rxrpc_peer record for a remote address and a corresponding function,
rxrpc_kernel_put_peer(), is provided to dispose of it again.
(2) When setting up a call, the rxrpc_peer object used during a call is
now passed in rather than being set up by rxrpc_connect_call(). For
afs, this meenat passing it to rxrpc_kernel_begin_call() rather than
the full address (the service ID then has to be passed in as a
separate parameter).
(3) A new function, rxrpc_kernel_remote_addr(), is added so that afs can
get a pointer to the transport address for display purposed, and
another, rxrpc_kernel_remote_srx(), to gain a pointer to the full
rxrpc address.
(4) The function to retrieve the RTT from a call, rxrpc_kernel_get_srtt(),
is then altered to take a peer. This now returns the RTT or -1 if
there are insufficient samples.
(5) Rename rxrpc_kernel_get_peer() to rxrpc_kernel_call_get_peer().
(6) Provide a new function, rxrpc_kernel_get_peer(), to get a ref on a
peer the caller already has.
This allows the afs filesystem to pin the rxrpc_peer records that it is
using, allowing faster lookups and pointer comparisons rather than
comparing sockaddr_rxrpc contents. It also makes it easier to get hold of
the RTT. The following changes are made to afs:
(1) The addr_list struct's addrs[] elements now hold a peer struct pointer
and a service ID rather than a sockaddr_rxrpc.
(2) When displaying the transport address, rxrpc_kernel_remote_addr() is
used.
(3) The port arg is removed from afs_alloc_addrlist() since it's always
overridden.
(4) afs_merge_fs_addr4() and afs_merge_fs_addr6() do peer lookup and may
now return an error that must be handled.
(5) afs_find_server() now takes a peer pointer to specify the address.
(6) afs_find_server(), afs_compare_fs_alists() and afs_merge_fs_addr[46]{}
now do peer pointer comparison rather than address comparison.
Signed-off-by: David Howells <dhowells@redhat.com>
cc: Marc Dionne <marc.dionne@auristor.com>
cc: linux-afs@lists.infradead.org
2023-10-19 12:55:11 +01:00
|
|
|
unsigned int rtt;
|
2023-10-25 17:53:33 +01:00
|
|
|
s32 abort_code = vc->call_abort_code;
|
|
|
|
int error = vc->call_error, i;
|
|
|
|
|
|
|
|
vc->nr_iterations++;
|
2018-10-20 00:57:57 +01:00
|
|
|
|
2023-10-20 16:13:03 +01:00
|
|
|
_enter("VC=%x+%x,%d{%lx},%d{%lx},%d,%d",
|
|
|
|
vc->debug_id, vc->nr_iterations, vc->server_index, vc->untried_servers,
|
|
|
|
vc->addr_index, vc->addr_tried,
|
2023-10-25 17:53:33 +01:00
|
|
|
error, abort_code);
|
2018-10-20 00:57:57 +01:00
|
|
|
|
|
|
|
if (vc->flags & AFS_VL_CURSOR_STOP) {
|
|
|
|
_leave(" = f [stopped]");
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2023-10-25 17:53:33 +01:00
|
|
|
if (vc->nr_iterations == 0)
|
|
|
|
goto start;
|
2018-10-20 00:57:58 +01:00
|
|
|
|
2023-10-27 10:45:56 +01:00
|
|
|
WRITE_ONCE(alist->addrs[vc->addr_index].last_error, error);
|
|
|
|
|
2018-10-20 00:57:57 +01:00
|
|
|
/* Evaluate the result of the previous operation, if there was one. */
|
|
|
|
switch (error) {
|
|
|
|
default:
|
|
|
|
case 0:
|
|
|
|
/* Success or local failure. Stop. */
|
2023-10-25 17:53:33 +01:00
|
|
|
vc->cumul_error.error = error;
|
2018-10-20 00:57:57 +01:00
|
|
|
vc->flags |= AFS_VL_CURSOR_STOP;
|
2023-10-25 17:53:33 +01:00
|
|
|
_leave(" = f [okay/local %d]", vc->cumul_error.error);
|
2018-10-20 00:57:57 +01:00
|
|
|
return false;
|
|
|
|
|
|
|
|
case -ECONNABORTED:
|
|
|
|
/* The far side rejected the operation on some grounds. This
|
|
|
|
* might involve the server being busy or the volume having been moved.
|
|
|
|
*/
|
2023-10-25 17:53:33 +01:00
|
|
|
switch (abort_code) {
|
2018-10-20 00:57:57 +01:00
|
|
|
case AFSVL_IO:
|
|
|
|
case AFSVL_BADVOLOPER:
|
|
|
|
case AFSVL_NOMEM:
|
|
|
|
/* The server went weird. */
|
2023-10-25 17:53:33 +01:00
|
|
|
afs_prioritise_error(&vc->cumul_error, -EREMOTEIO, abort_code);
|
2018-10-20 00:57:57 +01:00
|
|
|
//write_lock(&vc->cell->vl_servers_lock);
|
2023-10-26 15:56:39 +01:00
|
|
|
//vc->server_list->weird_mask |= 1 << vc->server_index;
|
2018-10-20 00:57:57 +01:00
|
|
|
//write_unlock(&vc->cell->vl_servers_lock);
|
|
|
|
goto next_server;
|
|
|
|
|
|
|
|
default:
|
2023-10-25 17:53:33 +01:00
|
|
|
afs_prioritise_error(&vc->cumul_error, error, abort_code);
|
2018-10-20 00:57:57 +01:00
|
|
|
goto failed;
|
|
|
|
}
|
|
|
|
|
2018-11-13 23:20:28 +00:00
|
|
|
case -ERFKILL:
|
|
|
|
case -EADDRNOTAVAIL:
|
2018-10-20 00:57:57 +01:00
|
|
|
case -ENETUNREACH:
|
|
|
|
case -EHOSTUNREACH:
|
2018-11-13 23:20:28 +00:00
|
|
|
case -EHOSTDOWN:
|
2018-10-20 00:57:57 +01:00
|
|
|
case -ECONNREFUSED:
|
|
|
|
case -ETIMEDOUT:
|
|
|
|
case -ETIME:
|
|
|
|
_debug("no conn %d", error);
|
2023-10-25 17:53:33 +01:00
|
|
|
afs_prioritise_error(&vc->cumul_error, error, 0);
|
2018-10-20 00:57:57 +01:00
|
|
|
goto iterate_address;
|
|
|
|
|
|
|
|
case -ECONNRESET:
|
|
|
|
_debug("call reset");
|
2023-10-25 17:53:33 +01:00
|
|
|
afs_prioritise_error(&vc->cumul_error, error, 0);
|
2018-10-20 00:57:57 +01:00
|
|
|
vc->flags |= AFS_VL_CURSOR_RETRY;
|
|
|
|
goto next_server;
|
2020-04-27 15:01:09 +01:00
|
|
|
|
|
|
|
case -EOPNOTSUPP:
|
|
|
|
_debug("notsupp");
|
|
|
|
goto next_server;
|
2018-10-20 00:57:57 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
restart_from_beginning:
|
|
|
|
_debug("restart");
|
2023-10-20 16:13:03 +01:00
|
|
|
if (vc->call_responded &&
|
|
|
|
vc->addr_index != vc->alist->preferred &&
|
|
|
|
test_bit(alist->preferred, &vc->addr_tried))
|
|
|
|
WRITE_ONCE(alist->preferred, vc->addr_index);
|
|
|
|
afs_put_addrlist(alist, afs_alist_trace_put_vlrotate_restart);
|
|
|
|
alist = vc->alist = NULL;
|
|
|
|
|
2018-10-20 00:57:57 +01:00
|
|
|
afs_put_vlserverlist(vc->cell->net, vc->server_list);
|
|
|
|
vc->server_list = NULL;
|
|
|
|
if (vc->flags & AFS_VL_CURSOR_RETRIED)
|
|
|
|
goto failed;
|
|
|
|
vc->flags |= AFS_VL_CURSOR_RETRIED;
|
|
|
|
start:
|
|
|
|
_debug("start");
|
2023-10-20 16:13:03 +01:00
|
|
|
ASSERTCMP(alist, ==, NULL);
|
2018-10-20 00:57:57 +01:00
|
|
|
|
|
|
|
if (!afs_start_vl_iteration(vc))
|
|
|
|
goto failed;
|
|
|
|
|
2018-10-20 00:57:59 +01:00
|
|
|
error = afs_send_vl_probes(vc->cell->net, vc->key, vc->server_list);
|
2023-10-25 17:53:33 +01:00
|
|
|
if (error < 0) {
|
|
|
|
afs_prioritise_error(&vc->cumul_error, error, 0);
|
|
|
|
goto failed;
|
|
|
|
}
|
2018-10-20 00:57:59 +01:00
|
|
|
|
|
|
|
pick_server:
|
2023-10-26 15:56:39 +01:00
|
|
|
_debug("pick [%lx]", vc->untried_servers);
|
2023-10-20 16:13:03 +01:00
|
|
|
ASSERTCMP(alist, ==, NULL);
|
2018-10-20 00:57:59 +01:00
|
|
|
|
2023-10-26 15:56:39 +01:00
|
|
|
error = afs_wait_for_vl_probes(vc->server_list, vc->untried_servers);
|
2023-10-25 17:53:33 +01:00
|
|
|
if (error < 0) {
|
|
|
|
afs_prioritise_error(&vc->cumul_error, error, 0);
|
|
|
|
goto failed;
|
|
|
|
}
|
2018-10-20 00:57:59 +01:00
|
|
|
|
|
|
|
/* Pick the untried server with the lowest RTT. */
|
2023-10-26 15:56:39 +01:00
|
|
|
vc->server_index = vc->server_list->preferred;
|
|
|
|
if (test_bit(vc->server_index, &vc->untried_servers))
|
2018-10-20 00:57:59 +01:00
|
|
|
goto selected_server;
|
|
|
|
|
2023-10-26 15:56:39 +01:00
|
|
|
vc->server_index = -1;
|
rxrpc, afs: Allow afs to pin rxrpc_peer objects
Change rxrpc's API such that:
(1) A new function, rxrpc_kernel_lookup_peer(), is provided to look up an
rxrpc_peer record for a remote address and a corresponding function,
rxrpc_kernel_put_peer(), is provided to dispose of it again.
(2) When setting up a call, the rxrpc_peer object used during a call is
now passed in rather than being set up by rxrpc_connect_call(). For
afs, this meenat passing it to rxrpc_kernel_begin_call() rather than
the full address (the service ID then has to be passed in as a
separate parameter).
(3) A new function, rxrpc_kernel_remote_addr(), is added so that afs can
get a pointer to the transport address for display purposed, and
another, rxrpc_kernel_remote_srx(), to gain a pointer to the full
rxrpc address.
(4) The function to retrieve the RTT from a call, rxrpc_kernel_get_srtt(),
is then altered to take a peer. This now returns the RTT or -1 if
there are insufficient samples.
(5) Rename rxrpc_kernel_get_peer() to rxrpc_kernel_call_get_peer().
(6) Provide a new function, rxrpc_kernel_get_peer(), to get a ref on a
peer the caller already has.
This allows the afs filesystem to pin the rxrpc_peer records that it is
using, allowing faster lookups and pointer comparisons rather than
comparing sockaddr_rxrpc contents. It also makes it easier to get hold of
the RTT. The following changes are made to afs:
(1) The addr_list struct's addrs[] elements now hold a peer struct pointer
and a service ID rather than a sockaddr_rxrpc.
(2) When displaying the transport address, rxrpc_kernel_remote_addr() is
used.
(3) The port arg is removed from afs_alloc_addrlist() since it's always
overridden.
(4) afs_merge_fs_addr4() and afs_merge_fs_addr6() do peer lookup and may
now return an error that must be handled.
(5) afs_find_server() now takes a peer pointer to specify the address.
(6) afs_find_server(), afs_compare_fs_alists() and afs_merge_fs_addr[46]{}
now do peer pointer comparison rather than address comparison.
Signed-off-by: David Howells <dhowells@redhat.com>
cc: Marc Dionne <marc.dionne@auristor.com>
cc: linux-afs@lists.infradead.org
2023-10-19 12:55:11 +01:00
|
|
|
rtt = UINT_MAX;
|
2018-10-20 00:57:59 +01:00
|
|
|
for (i = 0; i < vc->server_list->nr_servers; i++) {
|
|
|
|
struct afs_vlserver *s = vc->server_list->servers[i].server;
|
|
|
|
|
2023-10-26 15:56:39 +01:00
|
|
|
if (!test_bit(i, &vc->untried_servers) ||
|
2020-08-19 15:27:17 +01:00
|
|
|
!test_bit(AFS_VLSERVER_FL_RESPONDING, &s->flags))
|
2018-10-20 00:57:59 +01:00
|
|
|
continue;
|
2023-10-30 16:40:57 +00:00
|
|
|
if (s->probe.rtt <= rtt) {
|
2023-10-26 15:56:39 +01:00
|
|
|
vc->server_index = i;
|
2018-10-20 00:57:59 +01:00
|
|
|
rtt = s->probe.rtt;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-10-26 15:56:39 +01:00
|
|
|
if (vc->server_index == -1)
|
2018-10-20 00:57:59 +01:00
|
|
|
goto no_more_servers;
|
|
|
|
|
|
|
|
selected_server:
|
2023-10-26 15:56:39 +01:00
|
|
|
_debug("use %d", vc->server_index);
|
|
|
|
__clear_bit(vc->server_index, &vc->untried_servers);
|
2018-10-20 00:57:59 +01:00
|
|
|
|
2018-10-20 00:57:57 +01:00
|
|
|
/* We're starting on a different vlserver from the list. We need to
|
|
|
|
* check it, find its address list and probe its capabilities before we
|
|
|
|
* use it.
|
|
|
|
*/
|
2023-10-26 15:56:39 +01:00
|
|
|
vlserver = vc->server_list->servers[vc->server_index].server;
|
2018-10-20 00:57:59 +01:00
|
|
|
vc->server = vlserver;
|
2018-10-20 00:57:57 +01:00
|
|
|
|
|
|
|
_debug("USING VLSERVER: %s", vlserver->name);
|
|
|
|
|
|
|
|
read_lock(&vlserver->lock);
|
|
|
|
alist = rcu_dereference_protected(vlserver->addresses,
|
|
|
|
lockdep_is_held(&vlserver->lock));
|
2023-10-20 16:13:03 +01:00
|
|
|
vc->alist = afs_get_addrlist(alist, afs_alist_trace_get_vlrotate_set);
|
2018-10-20 00:57:57 +01:00
|
|
|
read_unlock(&vlserver->lock);
|
|
|
|
|
2023-10-20 16:13:03 +01:00
|
|
|
vc->addr_tried = 0;
|
|
|
|
vc->addr_index = -1;
|
2018-10-20 00:57:57 +01:00
|
|
|
|
|
|
|
iterate_address:
|
|
|
|
/* Iterate over the current server's address list to try and find an
|
|
|
|
* address on which it will respond to us.
|
|
|
|
*/
|
2023-10-20 16:13:03 +01:00
|
|
|
set = READ_ONCE(alist->responded);
|
|
|
|
failed = READ_ONCE(alist->probe_failed);
|
|
|
|
vc->addr_index = READ_ONCE(alist->preferred);
|
|
|
|
|
|
|
|
_debug("%lx-%lx-%lx,%d", set, failed, vc->addr_tried, vc->addr_index);
|
|
|
|
|
|
|
|
set &= ~(failed | vc->addr_tried);
|
|
|
|
|
|
|
|
if (!set)
|
2018-10-20 00:57:57 +01:00
|
|
|
goto next_server;
|
|
|
|
|
2023-10-20 16:13:03 +01:00
|
|
|
if (!test_bit(vc->addr_index, &set))
|
|
|
|
vc->addr_index = __ffs(set);
|
|
|
|
|
|
|
|
set_bit(vc->addr_index, &vc->addr_tried);
|
|
|
|
vc->alist = alist;
|
|
|
|
|
|
|
|
_debug("VL address %d/%d", vc->addr_index, alist->nr_addrs);
|
2018-10-20 00:57:59 +01:00
|
|
|
|
2023-10-25 17:53:33 +01:00
|
|
|
vc->call_responded = false;
|
2023-10-20 16:13:03 +01:00
|
|
|
_leave(" = t %pISpc", rxrpc_kernel_remote_addr(alist->addrs[vc->addr_index].peer));
|
2018-10-20 00:57:57 +01:00
|
|
|
return true;
|
|
|
|
|
|
|
|
next_server:
|
|
|
|
_debug("next");
|
2023-10-20 16:13:03 +01:00
|
|
|
ASSERT(alist);
|
|
|
|
if (vc->call_responded &&
|
|
|
|
vc->addr_index != alist->preferred &&
|
|
|
|
test_bit(alist->preferred, &vc->addr_tried))
|
|
|
|
WRITE_ONCE(alist->preferred, vc->addr_index);
|
|
|
|
afs_put_addrlist(alist, afs_alist_trace_put_vlrotate_next);
|
|
|
|
alist = vc->alist = NULL;
|
2018-10-20 00:57:59 +01:00
|
|
|
goto pick_server;
|
2018-10-20 00:57:57 +01:00
|
|
|
|
2018-10-20 00:57:59 +01:00
|
|
|
no_more_servers:
|
2018-10-20 00:57:57 +01:00
|
|
|
/* That's all the servers poked to no good effect. Try again if some
|
|
|
|
* of them were busy.
|
|
|
|
*/
|
|
|
|
if (vc->flags & AFS_VL_CURSOR_RETRY)
|
|
|
|
goto restart_from_beginning;
|
|
|
|
|
2018-10-20 00:57:59 +01:00
|
|
|
for (i = 0; i < vc->server_list->nr_servers; i++) {
|
|
|
|
struct afs_vlserver *s = vc->server_list->servers[i].server;
|
|
|
|
|
2020-08-20 16:13:05 +01:00
|
|
|
if (test_bit(AFS_VLSERVER_FL_RESPONDING, &s->flags))
|
2023-10-25 17:53:33 +01:00
|
|
|
vc->cumul_error.responded = true;
|
|
|
|
afs_prioritise_error(&vc->cumul_error, READ_ONCE(s->probe.error),
|
2018-11-13 23:20:28 +00:00
|
|
|
s->probe.abort_code);
|
2018-10-20 00:57:59 +01:00
|
|
|
}
|
|
|
|
|
2018-10-20 00:57:57 +01:00
|
|
|
failed:
|
2023-10-20 16:13:03 +01:00
|
|
|
if (alist) {
|
|
|
|
if (vc->call_responded &&
|
|
|
|
vc->addr_index != alist->preferred &&
|
|
|
|
test_bit(alist->preferred, &vc->addr_tried))
|
|
|
|
WRITE_ONCE(alist->preferred, vc->addr_index);
|
|
|
|
afs_put_addrlist(alist, afs_alist_trace_put_vlrotate_fail);
|
|
|
|
alist = vc->alist = NULL;
|
|
|
|
}
|
2018-10-20 00:57:57 +01:00
|
|
|
vc->flags |= AFS_VL_CURSOR_STOP;
|
2023-10-25 17:53:33 +01:00
|
|
|
_leave(" = f [failed %d]", vc->cumul_error.error);
|
2018-10-20 00:57:57 +01:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2018-10-20 00:57:58 +01:00
|
|
|
/*
|
|
|
|
* Dump cursor state in the case of the error being EDESTADDRREQ.
|
|
|
|
*/
|
|
|
|
static void afs_vl_dump_edestaddrreq(const struct afs_vl_cursor *vc)
|
|
|
|
{
|
2023-10-26 01:25:07 +01:00
|
|
|
struct afs_cell *cell = vc->cell;
|
2018-10-20 00:57:58 +01:00
|
|
|
static int count;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
if (!IS_ENABLED(CONFIG_AFS_DEBUG_CURSOR) || count > 3)
|
|
|
|
return;
|
|
|
|
count++;
|
|
|
|
|
|
|
|
rcu_read_lock();
|
|
|
|
pr_notice("EDESTADDR occurred\n");
|
2023-10-26 01:25:07 +01:00
|
|
|
pr_notice("CELL: %s err=%d\n", cell->name, cell->error);
|
|
|
|
pr_notice("DNS: src=%u st=%u lc=%x\n",
|
|
|
|
cell->dns_source, cell->dns_status, cell->dns_lookup_count);
|
2018-10-20 00:57:59 +01:00
|
|
|
pr_notice("VC: ut=%lx ix=%u ni=%hu fl=%hx err=%hd\n",
|
2023-10-20 16:13:03 +01:00
|
|
|
vc->untried_servers, vc->server_index, vc->nr_iterations,
|
|
|
|
vc->flags, vc->cumul_error.error);
|
2023-10-25 17:53:33 +01:00
|
|
|
pr_notice("VC: call er=%d ac=%d r=%u\n",
|
|
|
|
vc->call_error, vc->call_abort_code, vc->call_responded);
|
2018-10-20 00:57:58 +01:00
|
|
|
|
|
|
|
if (vc->server_list) {
|
|
|
|
const struct afs_vlserver_list *sl = vc->server_list;
|
|
|
|
pr_notice("VC: SL nr=%u ix=%u\n",
|
|
|
|
sl->nr_servers, sl->index);
|
|
|
|
for (i = 0; i < sl->nr_servers; i++) {
|
|
|
|
const struct afs_vlserver *s = sl->servers[i].server;
|
2018-10-20 00:57:59 +01:00
|
|
|
pr_notice("VC: server %s+%hu fl=%lx E=%hd\n",
|
|
|
|
s->name, s->port, s->flags, s->probe.error);
|
2018-10-20 00:57:58 +01:00
|
|
|
if (s->addresses) {
|
|
|
|
const struct afs_addr_list *a =
|
|
|
|
rcu_dereference(s->addresses);
|
2018-10-20 00:57:59 +01:00
|
|
|
pr_notice("VC: - nr=%u/%u/%u pf=%u\n",
|
2018-10-20 00:57:58 +01:00
|
|
|
a->nr_ipv4, a->nr_addrs, a->max_addrs,
|
2018-10-20 00:57:59 +01:00
|
|
|
a->preferred);
|
2020-04-24 11:23:17 +01:00
|
|
|
pr_notice("VC: - R=%lx F=%lx\n",
|
2023-10-20 14:12:42 +01:00
|
|
|
a->responded, a->probe_failed);
|
2023-10-20 16:13:03 +01:00
|
|
|
if (a == vc->alist)
|
2018-10-20 00:57:58 +01:00
|
|
|
pr_notice("VC: - current\n");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-10-20 16:13:03 +01:00
|
|
|
pr_notice("AC: t=%lx ax=%u\n", vc->addr_tried, vc->addr_index);
|
2018-10-20 00:57:58 +01:00
|
|
|
rcu_read_unlock();
|
|
|
|
}
|
|
|
|
|
2018-10-20 00:57:57 +01:00
|
|
|
/*
|
|
|
|
* Tidy up a volume location server cursor and unlock the vnode.
|
|
|
|
*/
|
|
|
|
int afs_end_vlserver_operation(struct afs_vl_cursor *vc)
|
|
|
|
{
|
|
|
|
struct afs_net *net = vc->cell->net;
|
|
|
|
|
2023-10-20 16:13:03 +01:00
|
|
|
_enter("VC=%x+%x", vc->debug_id, vc->nr_iterations);
|
|
|
|
|
2023-10-25 17:53:33 +01:00
|
|
|
switch (vc->cumul_error.error) {
|
|
|
|
case -EDESTADDRREQ:
|
|
|
|
case -EADDRNOTAVAIL:
|
|
|
|
case -ENETUNREACH:
|
|
|
|
case -EHOSTUNREACH:
|
2018-10-20 00:57:58 +01:00
|
|
|
afs_vl_dump_edestaddrreq(vc);
|
2023-10-25 17:53:33 +01:00
|
|
|
break;
|
|
|
|
}
|
2018-10-20 00:57:58 +01:00
|
|
|
|
2023-10-20 16:13:03 +01:00
|
|
|
if (vc->alist) {
|
|
|
|
if (vc->call_responded &&
|
|
|
|
vc->addr_index != vc->alist->preferred &&
|
|
|
|
test_bit(vc->alist->preferred, &vc->addr_tried))
|
|
|
|
WRITE_ONCE(vc->alist->preferred, vc->addr_index);
|
|
|
|
afs_put_addrlist(vc->alist, afs_alist_trace_put_vlrotate_end);
|
|
|
|
vc->alist = NULL;
|
|
|
|
}
|
2018-10-20 00:57:57 +01:00
|
|
|
afs_put_vlserverlist(net, vc->server_list);
|
2023-10-25 17:53:33 +01:00
|
|
|
return vc->cumul_error.error;
|
2018-10-20 00:57:57 +01:00
|
|
|
}
|