2019-02-18 11:35:19 +01:00
|
|
|
// SPDX-License-Identifier: GPL-2.0
|
2016-12-02 00:28:44 -08:00
|
|
|
/*
|
|
|
|
* Copyright (c) 2016 Avago Technologies. All rights reserved.
|
|
|
|
*/
|
|
|
|
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/parser.h>
|
|
|
|
#include <uapi/scsi/fc/fc_fs.h>
|
|
|
|
|
|
|
|
#include "../host/nvme.h"
|
|
|
|
#include "../target/nvmet.h"
|
|
|
|
#include <linux/nvme-fc-driver.h>
|
|
|
|
#include <linux/nvme-fc.h>
|
|
|
|
|
|
|
|
|
|
|
|
enum {
|
|
|
|
NVMF_OPT_ERR = 0,
|
|
|
|
NVMF_OPT_WWNN = 1 << 0,
|
|
|
|
NVMF_OPT_WWPN = 1 << 1,
|
|
|
|
NVMF_OPT_ROLES = 1 << 2,
|
|
|
|
NVMF_OPT_FCADDR = 1 << 3,
|
|
|
|
NVMF_OPT_LPWWNN = 1 << 4,
|
|
|
|
NVMF_OPT_LPWWPN = 1 << 5,
|
|
|
|
};
|
|
|
|
|
|
|
|
struct fcloop_ctrl_options {
|
|
|
|
int mask;
|
|
|
|
u64 wwnn;
|
|
|
|
u64 wwpn;
|
|
|
|
u32 roles;
|
|
|
|
u32 fcaddr;
|
|
|
|
u64 lpwwnn;
|
|
|
|
u64 lpwwpn;
|
|
|
|
};
|
|
|
|
|
|
|
|
static const match_table_t opt_tokens = {
|
|
|
|
{ NVMF_OPT_WWNN, "wwnn=%s" },
|
|
|
|
{ NVMF_OPT_WWPN, "wwpn=%s" },
|
|
|
|
{ NVMF_OPT_ROLES, "roles=%d" },
|
|
|
|
{ NVMF_OPT_FCADDR, "fcaddr=%x" },
|
|
|
|
{ NVMF_OPT_LPWWNN, "lpwwnn=%s" },
|
|
|
|
{ NVMF_OPT_LPWWPN, "lpwwpn=%s" },
|
|
|
|
{ NVMF_OPT_ERR, NULL }
|
|
|
|
};
|
|
|
|
|
2020-05-25 21:21:18 -07:00
|
|
|
static int fcloop_verify_addr(substring_t *s)
|
|
|
|
{
|
|
|
|
size_t blen = s->to - s->from + 1;
|
|
|
|
|
|
|
|
if (strnlen(s->from, blen) != NVME_FC_TRADDR_HEXNAMELEN + 2 ||
|
|
|
|
strncmp(s->from, "0x", 2))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-12-02 00:28:44 -08:00
|
|
|
static int
|
|
|
|
fcloop_parse_options(struct fcloop_ctrl_options *opts,
|
|
|
|
const char *buf)
|
|
|
|
{
|
|
|
|
substring_t args[MAX_OPT_ARGS];
|
|
|
|
char *options, *o, *p;
|
|
|
|
int token, ret = 0;
|
|
|
|
u64 token64;
|
|
|
|
|
|
|
|
options = o = kstrdup(buf, GFP_KERNEL);
|
|
|
|
if (!options)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
while ((p = strsep(&o, ",\n")) != NULL) {
|
|
|
|
if (!*p)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
token = match_token(p, opt_tokens, args);
|
|
|
|
opts->mask |= token;
|
|
|
|
switch (token) {
|
|
|
|
case NVMF_OPT_WWNN:
|
2020-05-25 21:21:18 -07:00
|
|
|
if (fcloop_verify_addr(args) ||
|
|
|
|
match_u64(args, &token64)) {
|
2016-12-02 00:28:44 -08:00
|
|
|
ret = -EINVAL;
|
|
|
|
goto out_free_options;
|
|
|
|
}
|
|
|
|
opts->wwnn = token64;
|
|
|
|
break;
|
|
|
|
case NVMF_OPT_WWPN:
|
2020-05-25 21:21:18 -07:00
|
|
|
if (fcloop_verify_addr(args) ||
|
|
|
|
match_u64(args, &token64)) {
|
2016-12-02 00:28:44 -08:00
|
|
|
ret = -EINVAL;
|
|
|
|
goto out_free_options;
|
|
|
|
}
|
|
|
|
opts->wwpn = token64;
|
|
|
|
break;
|
|
|
|
case NVMF_OPT_ROLES:
|
|
|
|
if (match_int(args, &token)) {
|
|
|
|
ret = -EINVAL;
|
|
|
|
goto out_free_options;
|
|
|
|
}
|
|
|
|
opts->roles = token;
|
|
|
|
break;
|
|
|
|
case NVMF_OPT_FCADDR:
|
|
|
|
if (match_hex(args, &token)) {
|
|
|
|
ret = -EINVAL;
|
|
|
|
goto out_free_options;
|
|
|
|
}
|
|
|
|
opts->fcaddr = token;
|
|
|
|
break;
|
|
|
|
case NVMF_OPT_LPWWNN:
|
2020-05-25 21:21:18 -07:00
|
|
|
if (fcloop_verify_addr(args) ||
|
|
|
|
match_u64(args, &token64)) {
|
2016-12-02 00:28:44 -08:00
|
|
|
ret = -EINVAL;
|
|
|
|
goto out_free_options;
|
|
|
|
}
|
|
|
|
opts->lpwwnn = token64;
|
|
|
|
break;
|
|
|
|
case NVMF_OPT_LPWWPN:
|
2020-05-25 21:21:18 -07:00
|
|
|
if (fcloop_verify_addr(args) ||
|
|
|
|
match_u64(args, &token64)) {
|
2016-12-02 00:28:44 -08:00
|
|
|
ret = -EINVAL;
|
|
|
|
goto out_free_options;
|
|
|
|
}
|
|
|
|
opts->lpwwpn = token64;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
pr_warn("unknown parameter or missing value '%s'\n", p);
|
|
|
|
ret = -EINVAL;
|
|
|
|
goto out_free_options;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
out_free_options:
|
|
|
|
kfree(options);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static int
|
|
|
|
fcloop_parse_nm_options(struct device *dev, u64 *nname, u64 *pname,
|
|
|
|
const char *buf)
|
|
|
|
{
|
|
|
|
substring_t args[MAX_OPT_ARGS];
|
|
|
|
char *options, *o, *p;
|
|
|
|
int token, ret = 0;
|
|
|
|
u64 token64;
|
|
|
|
|
|
|
|
*nname = -1;
|
|
|
|
*pname = -1;
|
|
|
|
|
|
|
|
options = o = kstrdup(buf, GFP_KERNEL);
|
|
|
|
if (!options)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
while ((p = strsep(&o, ",\n")) != NULL) {
|
|
|
|
if (!*p)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
token = match_token(p, opt_tokens, args);
|
|
|
|
switch (token) {
|
|
|
|
case NVMF_OPT_WWNN:
|
2020-05-25 21:21:18 -07:00
|
|
|
if (fcloop_verify_addr(args) ||
|
|
|
|
match_u64(args, &token64)) {
|
2016-12-02 00:28:44 -08:00
|
|
|
ret = -EINVAL;
|
|
|
|
goto out_free_options;
|
|
|
|
}
|
|
|
|
*nname = token64;
|
|
|
|
break;
|
|
|
|
case NVMF_OPT_WWPN:
|
2020-05-25 21:21:18 -07:00
|
|
|
if (fcloop_verify_addr(args) ||
|
|
|
|
match_u64(args, &token64)) {
|
2016-12-02 00:28:44 -08:00
|
|
|
ret = -EINVAL;
|
|
|
|
goto out_free_options;
|
|
|
|
}
|
|
|
|
*pname = token64;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
pr_warn("unknown parameter or missing value '%s'\n", p);
|
|
|
|
ret = -EINVAL;
|
|
|
|
goto out_free_options;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
out_free_options:
|
|
|
|
kfree(options);
|
|
|
|
|
|
|
|
if (!ret) {
|
|
|
|
if (*nname == -1)
|
|
|
|
return -EINVAL;
|
|
|
|
if (*pname == -1)
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
#define LPORT_OPTS (NVMF_OPT_WWNN | NVMF_OPT_WWPN)
|
|
|
|
|
|
|
|
#define RPORT_OPTS (NVMF_OPT_WWNN | NVMF_OPT_WWPN | \
|
|
|
|
NVMF_OPT_LPWWNN | NVMF_OPT_LPWWPN)
|
|
|
|
|
|
|
|
#define TGTPORT_OPTS (NVMF_OPT_WWNN | NVMF_OPT_WWPN)
|
|
|
|
|
|
|
|
|
|
|
|
static DEFINE_SPINLOCK(fcloop_lock);
|
|
|
|
static LIST_HEAD(fcloop_lports);
|
|
|
|
static LIST_HEAD(fcloop_nports);
|
|
|
|
|
|
|
|
struct fcloop_lport {
|
|
|
|
struct nvme_fc_local_port *localport;
|
|
|
|
struct list_head lport_list;
|
2025-04-08 17:29:05 +02:00
|
|
|
refcount_t ref;
|
2016-12-02 00:28:44 -08:00
|
|
|
};
|
|
|
|
|
2017-11-29 16:47:31 -08:00
|
|
|
struct fcloop_lport_priv {
|
|
|
|
struct fcloop_lport *lport;
|
|
|
|
};
|
|
|
|
|
2025-05-07 14:23:04 +02:00
|
|
|
/* The port is already being removed, avoid double free */
|
|
|
|
#define PORT_DELETED 0
|
|
|
|
|
2016-12-02 00:28:44 -08:00
|
|
|
struct fcloop_rport {
|
2020-03-18 14:41:12 -07:00
|
|
|
struct nvme_fc_remote_port *remoteport;
|
|
|
|
struct nvmet_fc_target_port *targetport;
|
|
|
|
struct fcloop_nport *nport;
|
|
|
|
struct fcloop_lport *lport;
|
|
|
|
spinlock_t lock;
|
|
|
|
struct list_head ls_list;
|
|
|
|
struct work_struct ls_work;
|
2025-05-07 14:23:04 +02:00
|
|
|
unsigned long flags;
|
2016-12-02 00:28:44 -08:00
|
|
|
};
|
|
|
|
|
|
|
|
struct fcloop_tport {
|
2020-03-31 09:50:01 -07:00
|
|
|
struct nvmet_fc_target_port *targetport;
|
|
|
|
struct nvme_fc_remote_port *remoteport;
|
|
|
|
struct fcloop_nport *nport;
|
|
|
|
struct fcloop_lport *lport;
|
|
|
|
spinlock_t lock;
|
|
|
|
struct list_head ls_list;
|
|
|
|
struct work_struct ls_work;
|
2025-05-07 14:23:04 +02:00
|
|
|
unsigned long flags;
|
2016-12-02 00:28:44 -08:00
|
|
|
};
|
|
|
|
|
|
|
|
struct fcloop_nport {
|
|
|
|
struct fcloop_rport *rport;
|
|
|
|
struct fcloop_tport *tport;
|
|
|
|
struct fcloop_lport *lport;
|
|
|
|
struct list_head nport_list;
|
2025-04-08 17:29:04 +02:00
|
|
|
refcount_t ref;
|
2016-12-02 00:28:44 -08:00
|
|
|
u64 node_name;
|
|
|
|
u64 port_name;
|
|
|
|
u32 port_role;
|
|
|
|
u32 port_id;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct fcloop_lsreq {
|
|
|
|
struct nvmefc_ls_req *lsreq;
|
2020-03-31 09:49:47 -07:00
|
|
|
struct nvmefc_ls_rsp ls_rsp;
|
2020-03-31 09:50:00 -07:00
|
|
|
int lsdir; /* H2T or T2H */
|
2016-12-02 00:28:44 -08:00
|
|
|
int status;
|
2020-03-18 14:41:12 -07:00
|
|
|
struct list_head ls_list; /* fcloop_rport->ls_list */
|
2016-12-02 00:28:44 -08:00
|
|
|
};
|
|
|
|
|
2019-05-14 14:58:04 -07:00
|
|
|
struct fcloop_rscn {
|
|
|
|
struct fcloop_tport *tport;
|
|
|
|
struct work_struct work;
|
|
|
|
};
|
|
|
|
|
2017-11-29 16:47:33 -08:00
|
|
|
enum {
|
|
|
|
INI_IO_START = 0,
|
|
|
|
INI_IO_ACTIVE = 1,
|
|
|
|
INI_IO_ABORTED = 2,
|
|
|
|
INI_IO_COMPLETED = 3,
|
|
|
|
};
|
|
|
|
|
2016-12-02 00:28:44 -08:00
|
|
|
struct fcloop_fcpreq {
|
|
|
|
struct fcloop_tport *tport;
|
|
|
|
struct nvmefc_fcp_req *fcpreq;
|
nvmet_fc: Rework target side abort handling
target transport:
----------------------
There are cases when there is a need to abort in-progress target
operations (writedata) so that controller termination or errors can
clean up. That can't happen currently as the abort is another target
op type, so it can't be used till the running one finishes (and it may
not). Solve by removing the abort op type and creating a separate
downcall from the transport to the lldd to request an io to be aborted.
The transport will abort ios on queue teardown or io errors. In general
the transport tries to call the lldd abort only when the io state is
idle. Meaning: ops that transmit data (readdata or rsp) will always
finish their transmit (or the lldd will see a state on the
link or initiator port that fails the transmit) and the done call for
the operation will occur. The transport will wait for the op done
upcall before calling the abort function, and as the io is idle, the
io can be cleaned up immediately after the abort call; Similarly, ios
that are not waiting for data or transmitting data must be in the nvmet
layer being processed. The transport will wait for the nvmet layer
completion before calling the abort function, and as the io is idle,
the io can be cleaned up immediately after the abort call; As for ops
that are waiting for data (writedata), they may be outstanding
indefinitely if the lldd doesn't see a condition where the initiatior
port or link is bad. In those cases, the transport will call the abort
function and wait for the lldd's op done upcall for the operation, where
it will then clean up the io.
Additionally, if a lldd receives an ABTS and matches it to an outstanding
request in the transport, A new new transport upcall was created to abort
the outstanding request in the transport. The transport expects any
outstanding op call (readdata or writedata) will completed by the lldd and
the operation upcall made. The transport doesn't act on the reported
abort (e.g. clean up the io) until an op done upcall occurs, a new op is
attempted, or the nvmet layer completes the io processing.
fcloop:
----------------------
Updated to support the new target apis.
On fcp io aborts from the initiator, the loopback context is updated to
NULL out the half that has completed. The initiator side is immediately
called after the abort request with an io completion (abort status).
On fcp io aborts from the target, the io is stopped and the initiator side
sees it as an aborted io. Target side ops, perhaps in progress while the
initiator side is done, continue but noop the data movement as there's no
structure on the initiator side to reference.
patch also contains:
----------------------
Revised lpfc to support the new abort api
commonized rsp buffer syncing and nulling of private data based on
calling paths.
errors in op done calls don't take action on the fod. They're bad
operations which implies the fod may be bad.
Signed-off-by: James Smart <james.smart@broadcom.com>
Signed-off-by: Sagi Grimberg <sagi@grimberg.me>
2017-04-11 11:32:31 -07:00
|
|
|
spinlock_t reqlock;
|
2016-12-02 00:28:44 -08:00
|
|
|
u16 status;
|
2017-11-29 16:47:33 -08:00
|
|
|
u32 inistate;
|
nvmet_fc: Rework target side abort handling
target transport:
----------------------
There are cases when there is a need to abort in-progress target
operations (writedata) so that controller termination or errors can
clean up. That can't happen currently as the abort is another target
op type, so it can't be used till the running one finishes (and it may
not). Solve by removing the abort op type and creating a separate
downcall from the transport to the lldd to request an io to be aborted.
The transport will abort ios on queue teardown or io errors. In general
the transport tries to call the lldd abort only when the io state is
idle. Meaning: ops that transmit data (readdata or rsp) will always
finish their transmit (or the lldd will see a state on the
link or initiator port that fails the transmit) and the done call for
the operation will occur. The transport will wait for the op done
upcall before calling the abort function, and as the io is idle, the
io can be cleaned up immediately after the abort call; Similarly, ios
that are not waiting for data or transmitting data must be in the nvmet
layer being processed. The transport will wait for the nvmet layer
completion before calling the abort function, and as the io is idle,
the io can be cleaned up immediately after the abort call; As for ops
that are waiting for data (writedata), they may be outstanding
indefinitely if the lldd doesn't see a condition where the initiatior
port or link is bad. In those cases, the transport will call the abort
function and wait for the lldd's op done upcall for the operation, where
it will then clean up the io.
Additionally, if a lldd receives an ABTS and matches it to an outstanding
request in the transport, A new new transport upcall was created to abort
the outstanding request in the transport. The transport expects any
outstanding op call (readdata or writedata) will completed by the lldd and
the operation upcall made. The transport doesn't act on the reported
abort (e.g. clean up the io) until an op done upcall occurs, a new op is
attempted, or the nvmet layer completes the io processing.
fcloop:
----------------------
Updated to support the new target apis.
On fcp io aborts from the initiator, the loopback context is updated to
NULL out the half that has completed. The initiator side is immediately
called after the abort request with an io completion (abort status).
On fcp io aborts from the target, the io is stopped and the initiator side
sees it as an aborted io. Target side ops, perhaps in progress while the
initiator side is done, continue but noop the data movement as there's no
structure on the initiator side to reference.
patch also contains:
----------------------
Revised lpfc to support the new abort api
commonized rsp buffer syncing and nulling of private data based on
calling paths.
errors in op done calls don't take action on the fod. They're bad
operations which implies the fod may be bad.
Signed-off-by: James Smart <james.smart@broadcom.com>
Signed-off-by: Sagi Grimberg <sagi@grimberg.me>
2017-04-11 11:32:31 -07:00
|
|
|
bool active;
|
|
|
|
bool aborted;
|
2025-04-08 17:29:04 +02:00
|
|
|
refcount_t ref;
|
2017-11-29 16:47:32 -08:00
|
|
|
struct work_struct fcp_rcv_work;
|
|
|
|
struct work_struct abort_rcv_work;
|
|
|
|
struct work_struct tio_done_work;
|
2016-12-02 00:28:44 -08:00
|
|
|
struct nvmefc_tgt_fcp_req tgt_fcp_req;
|
|
|
|
};
|
|
|
|
|
2017-04-11 11:32:30 -07:00
|
|
|
struct fcloop_ini_fcpreq {
|
|
|
|
struct nvmefc_fcp_req *fcpreq;
|
|
|
|
struct fcloop_fcpreq *tfcp_req;
|
2017-11-29 16:47:33 -08:00
|
|
|
spinlock_t inilock;
|
2017-04-11 11:32:30 -07:00
|
|
|
};
|
2016-12-02 00:28:44 -08:00
|
|
|
|
2025-05-07 14:23:05 +02:00
|
|
|
/* SLAB cache for fcloop_lsreq structures */
|
|
|
|
static struct kmem_cache *lsreq_cache;
|
|
|
|
|
2016-12-02 00:28:44 -08:00
|
|
|
static inline struct fcloop_lsreq *
|
2020-03-31 09:49:47 -07:00
|
|
|
ls_rsp_to_lsreq(struct nvmefc_ls_rsp *lsrsp)
|
2016-12-02 00:28:44 -08:00
|
|
|
{
|
2020-03-31 09:49:47 -07:00
|
|
|
return container_of(lsrsp, struct fcloop_lsreq, ls_rsp);
|
2016-12-02 00:28:44 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline struct fcloop_fcpreq *
|
|
|
|
tgt_fcp_req_to_fcpreq(struct nvmefc_tgt_fcp_req *tgt_fcpreq)
|
|
|
|
{
|
|
|
|
return container_of(tgt_fcpreq, struct fcloop_fcpreq, tgt_fcp_req);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static int
|
|
|
|
fcloop_create_queue(struct nvme_fc_local_port *localport,
|
|
|
|
unsigned int qidx, u16 qsize,
|
|
|
|
void **handle)
|
|
|
|
{
|
|
|
|
*handle = localport;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
fcloop_delete_queue(struct nvme_fc_local_port *localport,
|
|
|
|
unsigned int idx, void *handle)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2020-03-18 14:41:12 -07:00
|
|
|
fcloop_rport_lsrqst_work(struct work_struct *work)
|
2016-12-02 00:28:44 -08:00
|
|
|
{
|
2020-03-18 14:41:12 -07:00
|
|
|
struct fcloop_rport *rport =
|
|
|
|
container_of(work, struct fcloop_rport, ls_work);
|
|
|
|
struct fcloop_lsreq *tls_req;
|
|
|
|
|
|
|
|
spin_lock(&rport->lock);
|
|
|
|
for (;;) {
|
|
|
|
tls_req = list_first_entry_or_null(&rport->ls_list,
|
|
|
|
struct fcloop_lsreq, ls_list);
|
|
|
|
if (!tls_req)
|
|
|
|
break;
|
2016-12-02 00:28:44 -08:00
|
|
|
|
2020-03-18 14:41:12 -07:00
|
|
|
list_del(&tls_req->ls_list);
|
|
|
|
spin_unlock(&rport->lock);
|
|
|
|
|
|
|
|
tls_req->lsreq->done(tls_req->lsreq, tls_req->status);
|
|
|
|
/*
|
|
|
|
* callee may free memory containing tls_req.
|
|
|
|
* do not reference lsreq after this.
|
|
|
|
*/
|
2025-05-07 14:23:05 +02:00
|
|
|
kmem_cache_free(lsreq_cache, tls_req);
|
2020-03-18 14:41:12 -07:00
|
|
|
|
|
|
|
spin_lock(&rport->lock);
|
|
|
|
}
|
|
|
|
spin_unlock(&rport->lock);
|
2016-12-02 00:28:44 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
2020-03-31 09:50:00 -07:00
|
|
|
fcloop_h2t_ls_req(struct nvme_fc_local_port *localport,
|
2016-12-02 00:28:44 -08:00
|
|
|
struct nvme_fc_remote_port *remoteport,
|
|
|
|
struct nvmefc_ls_req *lsreq)
|
|
|
|
{
|
|
|
|
struct fcloop_rport *rport = remoteport->private;
|
2025-05-07 14:23:05 +02:00
|
|
|
struct fcloop_lsreq *tls_req;
|
2016-12-02 00:28:44 -08:00
|
|
|
int ret = 0;
|
|
|
|
|
2025-05-07 14:23:05 +02:00
|
|
|
tls_req = kmem_cache_alloc(lsreq_cache, GFP_KERNEL);
|
|
|
|
if (!tls_req)
|
|
|
|
return -ENOMEM;
|
2016-12-02 00:28:44 -08:00
|
|
|
tls_req->lsreq = lsreq;
|
2020-03-18 14:41:12 -07:00
|
|
|
INIT_LIST_HEAD(&tls_req->ls_list);
|
2016-12-02 00:28:44 -08:00
|
|
|
|
|
|
|
if (!rport->targetport) {
|
|
|
|
tls_req->status = -ECONNREFUSED;
|
2020-03-18 14:41:12 -07:00
|
|
|
spin_lock(&rport->lock);
|
2024-01-31 09:51:02 +01:00
|
|
|
list_add_tail(&tls_req->ls_list, &rport->ls_list);
|
2020-03-18 14:41:12 -07:00
|
|
|
spin_unlock(&rport->lock);
|
2022-03-21 13:57:27 +02:00
|
|
|
queue_work(nvmet_wq, &rport->ls_work);
|
2016-12-02 00:28:44 -08:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
tls_req->status = 0;
|
2020-03-31 09:50:01 -07:00
|
|
|
ret = nvmet_fc_rcv_ls_req(rport->targetport, rport,
|
|
|
|
&tls_req->ls_rsp,
|
|
|
|
lsreq->rqstaddr, lsreq->rqstlen);
|
2016-12-02 00:28:44 -08:00
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
2020-03-31 09:50:00 -07:00
|
|
|
fcloop_h2t_xmt_ls_rsp(struct nvmet_fc_target_port *targetport,
|
2020-03-31 09:49:47 -07:00
|
|
|
struct nvmefc_ls_rsp *lsrsp)
|
2016-12-02 00:28:44 -08:00
|
|
|
{
|
2020-03-31 09:49:47 -07:00
|
|
|
struct fcloop_lsreq *tls_req = ls_rsp_to_lsreq(lsrsp);
|
2016-12-02 00:28:44 -08:00
|
|
|
struct nvmefc_ls_req *lsreq = tls_req->lsreq;
|
2020-03-18 14:41:12 -07:00
|
|
|
struct fcloop_tport *tport = targetport->private;
|
|
|
|
struct nvme_fc_remote_port *remoteport = tport->remoteport;
|
|
|
|
struct fcloop_rport *rport;
|
2016-12-02 00:28:44 -08:00
|
|
|
|
2020-03-31 09:49:47 -07:00
|
|
|
memcpy(lsreq->rspaddr, lsrsp->rspbuf,
|
|
|
|
((lsreq->rsplen < lsrsp->rsplen) ?
|
|
|
|
lsreq->rsplen : lsrsp->rsplen));
|
2020-03-18 14:41:12 -07:00
|
|
|
|
2020-03-31 09:49:47 -07:00
|
|
|
lsrsp->done(lsrsp);
|
2016-12-02 00:28:44 -08:00
|
|
|
|
2025-05-07 14:23:05 +02:00
|
|
|
if (!remoteport) {
|
|
|
|
kmem_cache_free(lsreq_cache, tls_req);
|
|
|
|
return 0;
|
2020-03-18 14:41:12 -07:00
|
|
|
}
|
2016-12-02 00:28:44 -08:00
|
|
|
|
2025-05-07 14:23:05 +02:00
|
|
|
rport = remoteport->private;
|
|
|
|
spin_lock(&rport->lock);
|
|
|
|
list_add_tail(&tls_req->ls_list, &rport->ls_list);
|
|
|
|
spin_unlock(&rport->lock);
|
|
|
|
queue_work(nvmet_wq, &rport->ls_work);
|
|
|
|
|
2016-12-02 00:28:44 -08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-03-31 09:50:01 -07:00
|
|
|
static void
|
|
|
|
fcloop_tport_lsrqst_work(struct work_struct *work)
|
|
|
|
{
|
|
|
|
struct fcloop_tport *tport =
|
|
|
|
container_of(work, struct fcloop_tport, ls_work);
|
|
|
|
struct fcloop_lsreq *tls_req;
|
|
|
|
|
|
|
|
spin_lock(&tport->lock);
|
|
|
|
for (;;) {
|
|
|
|
tls_req = list_first_entry_or_null(&tport->ls_list,
|
|
|
|
struct fcloop_lsreq, ls_list);
|
|
|
|
if (!tls_req)
|
|
|
|
break;
|
|
|
|
|
|
|
|
list_del(&tls_req->ls_list);
|
|
|
|
spin_unlock(&tport->lock);
|
|
|
|
|
|
|
|
tls_req->lsreq->done(tls_req->lsreq, tls_req->status);
|
|
|
|
/*
|
|
|
|
* callee may free memory containing tls_req.
|
|
|
|
* do not reference lsreq after this.
|
|
|
|
*/
|
2025-05-07 14:23:05 +02:00
|
|
|
kmem_cache_free(lsreq_cache, tls_req);
|
2020-03-31 09:50:01 -07:00
|
|
|
|
|
|
|
spin_lock(&tport->lock);
|
|
|
|
}
|
|
|
|
spin_unlock(&tport->lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
fcloop_t2h_ls_req(struct nvmet_fc_target_port *targetport, void *hosthandle,
|
|
|
|
struct nvmefc_ls_req *lsreq)
|
|
|
|
{
|
|
|
|
struct fcloop_tport *tport = targetport->private;
|
2025-05-07 14:23:05 +02:00
|
|
|
struct fcloop_lsreq *tls_req;
|
2020-03-31 09:50:01 -07:00
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* hosthandle should be the dst.rport value.
|
|
|
|
* hosthandle ignored as fcloop currently is
|
|
|
|
* 1:1 tgtport vs remoteport
|
|
|
|
*/
|
2025-05-07 14:23:05 +02:00
|
|
|
|
|
|
|
tls_req = kmem_cache_alloc(lsreq_cache, GFP_KERNEL);
|
|
|
|
if (!tls_req)
|
|
|
|
return -ENOMEM;
|
2020-03-31 09:50:01 -07:00
|
|
|
tls_req->lsreq = lsreq;
|
|
|
|
INIT_LIST_HEAD(&tls_req->ls_list);
|
|
|
|
|
|
|
|
if (!tport->remoteport) {
|
|
|
|
tls_req->status = -ECONNREFUSED;
|
|
|
|
spin_lock(&tport->lock);
|
2024-01-31 09:51:02 +01:00
|
|
|
list_add_tail(&tls_req->ls_list, &tport->ls_list);
|
2020-03-31 09:50:01 -07:00
|
|
|
spin_unlock(&tport->lock);
|
2022-03-21 13:57:27 +02:00
|
|
|
queue_work(nvmet_wq, &tport->ls_work);
|
2020-03-31 09:50:01 -07:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
tls_req->status = 0;
|
|
|
|
ret = nvme_fc_rcv_ls_req(tport->remoteport, &tls_req->ls_rsp,
|
|
|
|
lsreq->rqstaddr, lsreq->rqstlen);
|
|
|
|
|
2025-05-07 14:23:05 +02:00
|
|
|
if (ret)
|
|
|
|
kmem_cache_free(lsreq_cache, tls_req);
|
|
|
|
|
2020-03-31 09:50:01 -07:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
fcloop_t2h_xmt_ls_rsp(struct nvme_fc_local_port *localport,
|
|
|
|
struct nvme_fc_remote_port *remoteport,
|
|
|
|
struct nvmefc_ls_rsp *lsrsp)
|
|
|
|
{
|
|
|
|
struct fcloop_lsreq *tls_req = ls_rsp_to_lsreq(lsrsp);
|
|
|
|
struct nvmefc_ls_req *lsreq = tls_req->lsreq;
|
|
|
|
struct fcloop_rport *rport = remoteport->private;
|
|
|
|
struct nvmet_fc_target_port *targetport = rport->targetport;
|
|
|
|
struct fcloop_tport *tport;
|
|
|
|
|
2025-05-07 14:23:05 +02:00
|
|
|
if (!targetport) {
|
2025-05-07 14:23:07 +02:00
|
|
|
/*
|
|
|
|
* The target port is gone. The target doesn't expect any
|
|
|
|
* response anymore and the ->done call is not valid
|
|
|
|
* because the resources have been freed by
|
|
|
|
* nvmet_fc_free_pending_reqs.
|
|
|
|
*
|
|
|
|
* We end up here from delete association exchange:
|
|
|
|
* nvmet_fc_xmt_disconnect_assoc sends an async request.
|
|
|
|
*/
|
2025-05-07 14:23:05 +02:00
|
|
|
kmem_cache_free(lsreq_cache, tls_req);
|
|
|
|
return 0;
|
2020-03-31 09:50:01 -07:00
|
|
|
}
|
|
|
|
|
2025-05-07 14:23:07 +02:00
|
|
|
memcpy(lsreq->rspaddr, lsrsp->rspbuf,
|
|
|
|
((lsreq->rsplen < lsrsp->rsplen) ?
|
|
|
|
lsreq->rsplen : lsrsp->rsplen));
|
|
|
|
lsrsp->done(lsrsp);
|
|
|
|
|
2025-05-07 14:23:05 +02:00
|
|
|
tport = targetport->private;
|
|
|
|
spin_lock(&tport->lock);
|
|
|
|
list_add_tail(&tls_req->ls_list, &tport->ls_list);
|
|
|
|
spin_unlock(&tport->lock);
|
|
|
|
queue_work(nvmet_wq, &tport->ls_work);
|
|
|
|
|
2020-03-31 09:50:01 -07:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
fcloop_t2h_host_release(void *hosthandle)
|
|
|
|
{
|
|
|
|
/* host handle ignored for now */
|
|
|
|
}
|
|
|
|
|
2024-05-27 07:15:24 +02:00
|
|
|
static int
|
|
|
|
fcloop_t2h_host_traddr(void *hosthandle, u64 *wwnn, u64 *wwpn)
|
|
|
|
{
|
|
|
|
struct fcloop_rport *rport = hosthandle;
|
|
|
|
|
|
|
|
*wwnn = rport->lport->localport->node_name;
|
|
|
|
*wwpn = rport->lport->localport->port_name;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-05-14 14:58:04 -07:00
|
|
|
/*
|
|
|
|
* Simulate reception of RSCN and converting it to a initiator transport
|
|
|
|
* call to rescan a remote port.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
fcloop_tgt_rscn_work(struct work_struct *work)
|
|
|
|
{
|
|
|
|
struct fcloop_rscn *tgt_rscn =
|
|
|
|
container_of(work, struct fcloop_rscn, work);
|
|
|
|
struct fcloop_tport *tport = tgt_rscn->tport;
|
|
|
|
|
|
|
|
if (tport->remoteport)
|
|
|
|
nvme_fc_rescan_remoteport(tport->remoteport);
|
|
|
|
kfree(tgt_rscn);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
fcloop_tgt_discovery_evt(struct nvmet_fc_target_port *tgtport)
|
|
|
|
{
|
|
|
|
struct fcloop_rscn *tgt_rscn;
|
|
|
|
|
|
|
|
tgt_rscn = kzalloc(sizeof(*tgt_rscn), GFP_KERNEL);
|
|
|
|
if (!tgt_rscn)
|
|
|
|
return;
|
|
|
|
|
|
|
|
tgt_rscn->tport = tgtport->private;
|
|
|
|
INIT_WORK(&tgt_rscn->work, fcloop_tgt_rscn_work);
|
|
|
|
|
2022-03-21 13:57:27 +02:00
|
|
|
queue_work(nvmet_wq, &tgt_rscn->work);
|
2019-05-14 14:58:04 -07:00
|
|
|
}
|
|
|
|
|
nvmet_fc: Rework target side abort handling
target transport:
----------------------
There are cases when there is a need to abort in-progress target
operations (writedata) so that controller termination or errors can
clean up. That can't happen currently as the abort is another target
op type, so it can't be used till the running one finishes (and it may
not). Solve by removing the abort op type and creating a separate
downcall from the transport to the lldd to request an io to be aborted.
The transport will abort ios on queue teardown or io errors. In general
the transport tries to call the lldd abort only when the io state is
idle. Meaning: ops that transmit data (readdata or rsp) will always
finish their transmit (or the lldd will see a state on the
link or initiator port that fails the transmit) and the done call for
the operation will occur. The transport will wait for the op done
upcall before calling the abort function, and as the io is idle, the
io can be cleaned up immediately after the abort call; Similarly, ios
that are not waiting for data or transmitting data must be in the nvmet
layer being processed. The transport will wait for the nvmet layer
completion before calling the abort function, and as the io is idle,
the io can be cleaned up immediately after the abort call; As for ops
that are waiting for data (writedata), they may be outstanding
indefinitely if the lldd doesn't see a condition where the initiatior
port or link is bad. In those cases, the transport will call the abort
function and wait for the lldd's op done upcall for the operation, where
it will then clean up the io.
Additionally, if a lldd receives an ABTS and matches it to an outstanding
request in the transport, A new new transport upcall was created to abort
the outstanding request in the transport. The transport expects any
outstanding op call (readdata or writedata) will completed by the lldd and
the operation upcall made. The transport doesn't act on the reported
abort (e.g. clean up the io) until an op done upcall occurs, a new op is
attempted, or the nvmet layer completes the io processing.
fcloop:
----------------------
Updated to support the new target apis.
On fcp io aborts from the initiator, the loopback context is updated to
NULL out the half that has completed. The initiator side is immediately
called after the abort request with an io completion (abort status).
On fcp io aborts from the target, the io is stopped and the initiator side
sees it as an aborted io. Target side ops, perhaps in progress while the
initiator side is done, continue but noop the data movement as there's no
structure on the initiator side to reference.
patch also contains:
----------------------
Revised lpfc to support the new abort api
commonized rsp buffer syncing and nulling of private data based on
calling paths.
errors in op done calls don't take action on the fod. They're bad
operations which implies the fod may be bad.
Signed-off-by: James Smart <james.smart@broadcom.com>
Signed-off-by: Sagi Grimberg <sagi@grimberg.me>
2017-04-11 11:32:31 -07:00
|
|
|
static void
|
2025-04-08 17:29:04 +02:00
|
|
|
fcloop_tfcp_req_put(struct fcloop_fcpreq *tfcp_req)
|
nvmet_fc: Rework target side abort handling
target transport:
----------------------
There are cases when there is a need to abort in-progress target
operations (writedata) so that controller termination or errors can
clean up. That can't happen currently as the abort is another target
op type, so it can't be used till the running one finishes (and it may
not). Solve by removing the abort op type and creating a separate
downcall from the transport to the lldd to request an io to be aborted.
The transport will abort ios on queue teardown or io errors. In general
the transport tries to call the lldd abort only when the io state is
idle. Meaning: ops that transmit data (readdata or rsp) will always
finish their transmit (or the lldd will see a state on the
link or initiator port that fails the transmit) and the done call for
the operation will occur. The transport will wait for the op done
upcall before calling the abort function, and as the io is idle, the
io can be cleaned up immediately after the abort call; Similarly, ios
that are not waiting for data or transmitting data must be in the nvmet
layer being processed. The transport will wait for the nvmet layer
completion before calling the abort function, and as the io is idle,
the io can be cleaned up immediately after the abort call; As for ops
that are waiting for data (writedata), they may be outstanding
indefinitely if the lldd doesn't see a condition where the initiatior
port or link is bad. In those cases, the transport will call the abort
function and wait for the lldd's op done upcall for the operation, where
it will then clean up the io.
Additionally, if a lldd receives an ABTS and matches it to an outstanding
request in the transport, A new new transport upcall was created to abort
the outstanding request in the transport. The transport expects any
outstanding op call (readdata or writedata) will completed by the lldd and
the operation upcall made. The transport doesn't act on the reported
abort (e.g. clean up the io) until an op done upcall occurs, a new op is
attempted, or the nvmet layer completes the io processing.
fcloop:
----------------------
Updated to support the new target apis.
On fcp io aborts from the initiator, the loopback context is updated to
NULL out the half that has completed. The initiator side is immediately
called after the abort request with an io completion (abort status).
On fcp io aborts from the target, the io is stopped and the initiator side
sees it as an aborted io. Target side ops, perhaps in progress while the
initiator side is done, continue but noop the data movement as there's no
structure on the initiator side to reference.
patch also contains:
----------------------
Revised lpfc to support the new abort api
commonized rsp buffer syncing and nulling of private data based on
calling paths.
errors in op done calls don't take action on the fod. They're bad
operations which implies the fod may be bad.
Signed-off-by: James Smart <james.smart@broadcom.com>
Signed-off-by: Sagi Grimberg <sagi@grimberg.me>
2017-04-11 11:32:31 -07:00
|
|
|
{
|
2025-04-08 17:29:04 +02:00
|
|
|
if (!refcount_dec_and_test(&tfcp_req->ref))
|
|
|
|
return;
|
nvmet_fc: Rework target side abort handling
target transport:
----------------------
There are cases when there is a need to abort in-progress target
operations (writedata) so that controller termination or errors can
clean up. That can't happen currently as the abort is another target
op type, so it can't be used till the running one finishes (and it may
not). Solve by removing the abort op type and creating a separate
downcall from the transport to the lldd to request an io to be aborted.
The transport will abort ios on queue teardown or io errors. In general
the transport tries to call the lldd abort only when the io state is
idle. Meaning: ops that transmit data (readdata or rsp) will always
finish their transmit (or the lldd will see a state on the
link or initiator port that fails the transmit) and the done call for
the operation will occur. The transport will wait for the op done
upcall before calling the abort function, and as the io is idle, the
io can be cleaned up immediately after the abort call; Similarly, ios
that are not waiting for data or transmitting data must be in the nvmet
layer being processed. The transport will wait for the nvmet layer
completion before calling the abort function, and as the io is idle,
the io can be cleaned up immediately after the abort call; As for ops
that are waiting for data (writedata), they may be outstanding
indefinitely if the lldd doesn't see a condition where the initiatior
port or link is bad. In those cases, the transport will call the abort
function and wait for the lldd's op done upcall for the operation, where
it will then clean up the io.
Additionally, if a lldd receives an ABTS and matches it to an outstanding
request in the transport, A new new transport upcall was created to abort
the outstanding request in the transport. The transport expects any
outstanding op call (readdata or writedata) will completed by the lldd and
the operation upcall made. The transport doesn't act on the reported
abort (e.g. clean up the io) until an op done upcall occurs, a new op is
attempted, or the nvmet layer completes the io processing.
fcloop:
----------------------
Updated to support the new target apis.
On fcp io aborts from the initiator, the loopback context is updated to
NULL out the half that has completed. The initiator side is immediately
called after the abort request with an io completion (abort status).
On fcp io aborts from the target, the io is stopped and the initiator side
sees it as an aborted io. Target side ops, perhaps in progress while the
initiator side is done, continue but noop the data movement as there's no
structure on the initiator side to reference.
patch also contains:
----------------------
Revised lpfc to support the new abort api
commonized rsp buffer syncing and nulling of private data based on
calling paths.
errors in op done calls don't take action on the fod. They're bad
operations which implies the fod may be bad.
Signed-off-by: James Smart <james.smart@broadcom.com>
Signed-off-by: Sagi Grimberg <sagi@grimberg.me>
2017-04-11 11:32:31 -07:00
|
|
|
|
2017-11-29 16:47:33 -08:00
|
|
|
kfree(tfcp_req);
|
|
|
|
}
|
2017-11-29 16:47:32 -08:00
|
|
|
|
2017-11-29 16:47:33 -08:00
|
|
|
static int
|
|
|
|
fcloop_tfcp_req_get(struct fcloop_fcpreq *tfcp_req)
|
|
|
|
{
|
2025-04-08 17:29:04 +02:00
|
|
|
return refcount_inc_not_zero(&tfcp_req->ref);
|
2017-11-29 16:47:32 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
fcloop_call_host_done(struct nvmefc_fcp_req *fcpreq,
|
|
|
|
struct fcloop_fcpreq *tfcp_req, int status)
|
nvmet_fc: Rework target side abort handling
target transport:
----------------------
There are cases when there is a need to abort in-progress target
operations (writedata) so that controller termination or errors can
clean up. That can't happen currently as the abort is another target
op type, so it can't be used till the running one finishes (and it may
not). Solve by removing the abort op type and creating a separate
downcall from the transport to the lldd to request an io to be aborted.
The transport will abort ios on queue teardown or io errors. In general
the transport tries to call the lldd abort only when the io state is
idle. Meaning: ops that transmit data (readdata or rsp) will always
finish their transmit (or the lldd will see a state on the
link or initiator port that fails the transmit) and the done call for
the operation will occur. The transport will wait for the op done
upcall before calling the abort function, and as the io is idle, the
io can be cleaned up immediately after the abort call; Similarly, ios
that are not waiting for data or transmitting data must be in the nvmet
layer being processed. The transport will wait for the nvmet layer
completion before calling the abort function, and as the io is idle,
the io can be cleaned up immediately after the abort call; As for ops
that are waiting for data (writedata), they may be outstanding
indefinitely if the lldd doesn't see a condition where the initiatior
port or link is bad. In those cases, the transport will call the abort
function and wait for the lldd's op done upcall for the operation, where
it will then clean up the io.
Additionally, if a lldd receives an ABTS and matches it to an outstanding
request in the transport, A new new transport upcall was created to abort
the outstanding request in the transport. The transport expects any
outstanding op call (readdata or writedata) will completed by the lldd and
the operation upcall made. The transport doesn't act on the reported
abort (e.g. clean up the io) until an op done upcall occurs, a new op is
attempted, or the nvmet layer completes the io processing.
fcloop:
----------------------
Updated to support the new target apis.
On fcp io aborts from the initiator, the loopback context is updated to
NULL out the half that has completed. The initiator side is immediately
called after the abort request with an io completion (abort status).
On fcp io aborts from the target, the io is stopped and the initiator side
sees it as an aborted io. Target side ops, perhaps in progress while the
initiator side is done, continue but noop the data movement as there's no
structure on the initiator side to reference.
patch also contains:
----------------------
Revised lpfc to support the new abort api
commonized rsp buffer syncing and nulling of private data based on
calling paths.
errors in op done calls don't take action on the fod. They're bad
operations which implies the fod may be bad.
Signed-off-by: James Smart <james.smart@broadcom.com>
Signed-off-by: Sagi Grimberg <sagi@grimberg.me>
2017-04-11 11:32:31 -07:00
|
|
|
{
|
2017-11-29 16:47:32 -08:00
|
|
|
struct fcloop_ini_fcpreq *inireq = NULL;
|
nvmet_fc: Rework target side abort handling
target transport:
----------------------
There are cases when there is a need to abort in-progress target
operations (writedata) so that controller termination or errors can
clean up. That can't happen currently as the abort is another target
op type, so it can't be used till the running one finishes (and it may
not). Solve by removing the abort op type and creating a separate
downcall from the transport to the lldd to request an io to be aborted.
The transport will abort ios on queue teardown or io errors. In general
the transport tries to call the lldd abort only when the io state is
idle. Meaning: ops that transmit data (readdata or rsp) will always
finish their transmit (or the lldd will see a state on the
link or initiator port that fails the transmit) and the done call for
the operation will occur. The transport will wait for the op done
upcall before calling the abort function, and as the io is idle, the
io can be cleaned up immediately after the abort call; Similarly, ios
that are not waiting for data or transmitting data must be in the nvmet
layer being processed. The transport will wait for the nvmet layer
completion before calling the abort function, and as the io is idle,
the io can be cleaned up immediately after the abort call; As for ops
that are waiting for data (writedata), they may be outstanding
indefinitely if the lldd doesn't see a condition where the initiatior
port or link is bad. In those cases, the transport will call the abort
function and wait for the lldd's op done upcall for the operation, where
it will then clean up the io.
Additionally, if a lldd receives an ABTS and matches it to an outstanding
request in the transport, A new new transport upcall was created to abort
the outstanding request in the transport. The transport expects any
outstanding op call (readdata or writedata) will completed by the lldd and
the operation upcall made. The transport doesn't act on the reported
abort (e.g. clean up the io) until an op done upcall occurs, a new op is
attempted, or the nvmet layer completes the io processing.
fcloop:
----------------------
Updated to support the new target apis.
On fcp io aborts from the initiator, the loopback context is updated to
NULL out the half that has completed. The initiator side is immediately
called after the abort request with an io completion (abort status).
On fcp io aborts from the target, the io is stopped and the initiator side
sees it as an aborted io. Target side ops, perhaps in progress while the
initiator side is done, continue but noop the data movement as there's no
structure on the initiator side to reference.
patch also contains:
----------------------
Revised lpfc to support the new abort api
commonized rsp buffer syncing and nulling of private data based on
calling paths.
errors in op done calls don't take action on the fod. They're bad
operations which implies the fod may be bad.
Signed-off-by: James Smart <james.smart@broadcom.com>
Signed-off-by: Sagi Grimberg <sagi@grimberg.me>
2017-04-11 11:32:31 -07:00
|
|
|
|
2017-11-29 16:47:32 -08:00
|
|
|
if (fcpreq) {
|
|
|
|
inireq = fcpreq->private;
|
2017-11-29 16:47:33 -08:00
|
|
|
spin_lock(&inireq->inilock);
|
2017-11-29 16:47:32 -08:00
|
|
|
inireq->tfcp_req = NULL;
|
2017-11-29 16:47:33 -08:00
|
|
|
spin_unlock(&inireq->inilock);
|
2017-11-29 16:47:32 -08:00
|
|
|
|
|
|
|
fcpreq->status = status;
|
|
|
|
fcpreq->done(fcpreq);
|
|
|
|
}
|
2017-11-29 16:47:33 -08:00
|
|
|
|
|
|
|
/* release original io reference on tgt struct */
|
2025-05-07 14:23:01 +02:00
|
|
|
if (tfcp_req)
|
|
|
|
fcloop_tfcp_req_put(tfcp_req);
|
2017-11-29 16:47:33 -08:00
|
|
|
}
|
|
|
|
|
2020-10-16 14:28:38 -07:00
|
|
|
static bool drop_fabric_opcode;
|
|
|
|
#define DROP_OPCODE_MASK 0x00FF
|
|
|
|
/* fabrics opcode will have a bit set above 1st byte */
|
|
|
|
static int drop_opcode = -1;
|
|
|
|
static int drop_instance;
|
|
|
|
static int drop_amount;
|
|
|
|
static int drop_current_cnt;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Routine to parse io and determine if the io is to be dropped.
|
|
|
|
* Returns:
|
|
|
|
* 0 if io is not obstructed
|
|
|
|
* 1 if io was dropped
|
|
|
|
*/
|
|
|
|
static int check_for_drop(struct fcloop_fcpreq *tfcp_req)
|
|
|
|
{
|
|
|
|
struct nvmefc_fcp_req *fcpreq = tfcp_req->fcpreq;
|
|
|
|
struct nvme_fc_cmd_iu *cmdiu = fcpreq->cmdaddr;
|
|
|
|
struct nvme_command *sqe = &cmdiu->sqe;
|
|
|
|
|
|
|
|
if (drop_opcode == -1)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
pr_info("%s: seq opcd x%02x fctype x%02x: drop F %s op x%02x "
|
|
|
|
"inst %d start %d amt %d\n",
|
|
|
|
__func__, sqe->common.opcode, sqe->fabrics.fctype,
|
|
|
|
drop_fabric_opcode ? "y" : "n",
|
|
|
|
drop_opcode, drop_current_cnt, drop_instance, drop_amount);
|
|
|
|
|
|
|
|
if ((drop_fabric_opcode &&
|
|
|
|
(sqe->common.opcode != nvme_fabrics_command ||
|
|
|
|
sqe->fabrics.fctype != drop_opcode)) ||
|
|
|
|
(!drop_fabric_opcode && sqe->common.opcode != drop_opcode))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (++drop_current_cnt >= drop_instance) {
|
|
|
|
if (drop_current_cnt >= drop_instance + drop_amount)
|
|
|
|
drop_opcode = -1;
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-11-29 16:47:33 -08:00
|
|
|
static void
|
|
|
|
fcloop_fcp_recv_work(struct work_struct *work)
|
|
|
|
{
|
|
|
|
struct fcloop_fcpreq *tfcp_req =
|
|
|
|
container_of(work, struct fcloop_fcpreq, fcp_rcv_work);
|
2025-05-07 14:23:03 +02:00
|
|
|
struct nvmefc_fcp_req *fcpreq;
|
2023-04-12 16:49:04 +08:00
|
|
|
unsigned long flags;
|
2017-11-29 16:47:33 -08:00
|
|
|
int ret = 0;
|
|
|
|
bool aborted = false;
|
|
|
|
|
2023-04-12 16:49:04 +08:00
|
|
|
spin_lock_irqsave(&tfcp_req->reqlock, flags);
|
2025-05-07 14:23:03 +02:00
|
|
|
fcpreq = tfcp_req->fcpreq;
|
2017-11-29 16:47:33 -08:00
|
|
|
switch (tfcp_req->inistate) {
|
|
|
|
case INI_IO_START:
|
|
|
|
tfcp_req->inistate = INI_IO_ACTIVE;
|
|
|
|
break;
|
|
|
|
case INI_IO_ABORTED:
|
|
|
|
aborted = true;
|
|
|
|
break;
|
|
|
|
default:
|
2023-04-12 16:49:04 +08:00
|
|
|
spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
|
2017-11-29 16:47:33 -08:00
|
|
|
WARN_ON(1);
|
|
|
|
return;
|
|
|
|
}
|
2023-04-12 16:49:04 +08:00
|
|
|
spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
|
2017-11-29 16:47:33 -08:00
|
|
|
|
2025-05-07 14:23:03 +02:00
|
|
|
if (unlikely(aborted)) {
|
|
|
|
/* the abort handler will call fcloop_call_host_done */
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (unlikely(check_for_drop(tfcp_req))) {
|
|
|
|
pr_info("%s: dropped command ********\n", __func__);
|
|
|
|
return;
|
2020-10-16 14:28:38 -07:00
|
|
|
}
|
2025-05-07 14:23:03 +02:00
|
|
|
|
|
|
|
ret = nvmet_fc_rcv_fcp_req(tfcp_req->tport->targetport,
|
|
|
|
&tfcp_req->tgt_fcp_req,
|
|
|
|
fcpreq->cmdaddr, fcpreq->cmdlen);
|
2017-11-29 16:47:33 -08:00
|
|
|
if (ret)
|
|
|
|
fcloop_call_host_done(fcpreq, tfcp_req, ret);
|
2017-11-29 16:47:32 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
fcloop_fcp_abort_recv_work(struct work_struct *work)
|
|
|
|
{
|
|
|
|
struct fcloop_fcpreq *tfcp_req =
|
|
|
|
container_of(work, struct fcloop_fcpreq, abort_rcv_work);
|
2017-11-29 16:47:33 -08:00
|
|
|
struct nvmefc_fcp_req *fcpreq;
|
|
|
|
bool completed = false;
|
2023-04-12 16:49:04 +08:00
|
|
|
unsigned long flags;
|
2017-11-29 16:47:33 -08:00
|
|
|
|
2023-04-12 16:49:04 +08:00
|
|
|
spin_lock_irqsave(&tfcp_req->reqlock, flags);
|
2017-11-29 16:47:33 -08:00
|
|
|
switch (tfcp_req->inistate) {
|
|
|
|
case INI_IO_ABORTED:
|
2025-05-07 14:23:03 +02:00
|
|
|
fcpreq = tfcp_req->fcpreq;
|
|
|
|
tfcp_req->fcpreq = NULL;
|
2017-11-29 16:47:33 -08:00
|
|
|
break;
|
|
|
|
case INI_IO_COMPLETED:
|
|
|
|
completed = true;
|
|
|
|
break;
|
|
|
|
default:
|
2023-04-12 16:49:04 +08:00
|
|
|
spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
|
2025-05-07 14:23:01 +02:00
|
|
|
fcloop_tfcp_req_put(tfcp_req);
|
2017-11-29 16:47:33 -08:00
|
|
|
WARN_ON(1);
|
|
|
|
return;
|
|
|
|
}
|
2023-04-12 16:49:04 +08:00
|
|
|
spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
|
2017-11-29 16:47:33 -08:00
|
|
|
|
|
|
|
if (unlikely(completed)) {
|
|
|
|
/* remove reference taken in original abort downcall */
|
|
|
|
fcloop_tfcp_req_put(tfcp_req);
|
|
|
|
return;
|
|
|
|
}
|
2017-11-29 16:47:32 -08:00
|
|
|
|
|
|
|
if (tfcp_req->tport->targetport)
|
|
|
|
nvmet_fc_rcv_fcp_abort(tfcp_req->tport->targetport,
|
|
|
|
&tfcp_req->tgt_fcp_req);
|
|
|
|
|
|
|
|
fcloop_call_host_done(fcpreq, tfcp_req, -ECANCELED);
|
2017-11-29 16:47:33 -08:00
|
|
|
/* call_host_done releases reference for abort downcall */
|
nvmet_fc: Rework target side abort handling
target transport:
----------------------
There are cases when there is a need to abort in-progress target
operations (writedata) so that controller termination or errors can
clean up. That can't happen currently as the abort is another target
op type, so it can't be used till the running one finishes (and it may
not). Solve by removing the abort op type and creating a separate
downcall from the transport to the lldd to request an io to be aborted.
The transport will abort ios on queue teardown or io errors. In general
the transport tries to call the lldd abort only when the io state is
idle. Meaning: ops that transmit data (readdata or rsp) will always
finish their transmit (or the lldd will see a state on the
link or initiator port that fails the transmit) and the done call for
the operation will occur. The transport will wait for the op done
upcall before calling the abort function, and as the io is idle, the
io can be cleaned up immediately after the abort call; Similarly, ios
that are not waiting for data or transmitting data must be in the nvmet
layer being processed. The transport will wait for the nvmet layer
completion before calling the abort function, and as the io is idle,
the io can be cleaned up immediately after the abort call; As for ops
that are waiting for data (writedata), they may be outstanding
indefinitely if the lldd doesn't see a condition where the initiatior
port or link is bad. In those cases, the transport will call the abort
function and wait for the lldd's op done upcall for the operation, where
it will then clean up the io.
Additionally, if a lldd receives an ABTS and matches it to an outstanding
request in the transport, A new new transport upcall was created to abort
the outstanding request in the transport. The transport expects any
outstanding op call (readdata or writedata) will completed by the lldd and
the operation upcall made. The transport doesn't act on the reported
abort (e.g. clean up the io) until an op done upcall occurs, a new op is
attempted, or the nvmet layer completes the io processing.
fcloop:
----------------------
Updated to support the new target apis.
On fcp io aborts from the initiator, the loopback context is updated to
NULL out the half that has completed. The initiator side is immediately
called after the abort request with an io completion (abort status).
On fcp io aborts from the target, the io is stopped and the initiator side
sees it as an aborted io. Target side ops, perhaps in progress while the
initiator side is done, continue but noop the data movement as there's no
structure on the initiator side to reference.
patch also contains:
----------------------
Revised lpfc to support the new abort api
commonized rsp buffer syncing and nulling of private data based on
calling paths.
errors in op done calls don't take action on the fod. They're bad
operations which implies the fod may be bad.
Signed-off-by: James Smart <james.smart@broadcom.com>
Signed-off-by: Sagi Grimberg <sagi@grimberg.me>
2017-04-11 11:32:31 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* FCP IO operation done by target completion.
|
|
|
|
* call back up initiator "done" flows.
|
2016-12-02 00:28:44 -08:00
|
|
|
*/
|
|
|
|
static void
|
|
|
|
fcloop_tgt_fcprqst_done_work(struct work_struct *work)
|
|
|
|
{
|
|
|
|
struct fcloop_fcpreq *tfcp_req =
|
2017-11-29 16:47:32 -08:00
|
|
|
container_of(work, struct fcloop_fcpreq, tio_done_work);
|
nvmet_fc: Rework target side abort handling
target transport:
----------------------
There are cases when there is a need to abort in-progress target
operations (writedata) so that controller termination or errors can
clean up. That can't happen currently as the abort is another target
op type, so it can't be used till the running one finishes (and it may
not). Solve by removing the abort op type and creating a separate
downcall from the transport to the lldd to request an io to be aborted.
The transport will abort ios on queue teardown or io errors. In general
the transport tries to call the lldd abort only when the io state is
idle. Meaning: ops that transmit data (readdata or rsp) will always
finish their transmit (or the lldd will see a state on the
link or initiator port that fails the transmit) and the done call for
the operation will occur. The transport will wait for the op done
upcall before calling the abort function, and as the io is idle, the
io can be cleaned up immediately after the abort call; Similarly, ios
that are not waiting for data or transmitting data must be in the nvmet
layer being processed. The transport will wait for the nvmet layer
completion before calling the abort function, and as the io is idle,
the io can be cleaned up immediately after the abort call; As for ops
that are waiting for data (writedata), they may be outstanding
indefinitely if the lldd doesn't see a condition where the initiatior
port or link is bad. In those cases, the transport will call the abort
function and wait for the lldd's op done upcall for the operation, where
it will then clean up the io.
Additionally, if a lldd receives an ABTS and matches it to an outstanding
request in the transport, A new new transport upcall was created to abort
the outstanding request in the transport. The transport expects any
outstanding op call (readdata or writedata) will completed by the lldd and
the operation upcall made. The transport doesn't act on the reported
abort (e.g. clean up the io) until an op done upcall occurs, a new op is
attempted, or the nvmet layer completes the io processing.
fcloop:
----------------------
Updated to support the new target apis.
On fcp io aborts from the initiator, the loopback context is updated to
NULL out the half that has completed. The initiator side is immediately
called after the abort request with an io completion (abort status).
On fcp io aborts from the target, the io is stopped and the initiator side
sees it as an aborted io. Target side ops, perhaps in progress while the
initiator side is done, continue but noop the data movement as there's no
structure on the initiator side to reference.
patch also contains:
----------------------
Revised lpfc to support the new abort api
commonized rsp buffer syncing and nulling of private data based on
calling paths.
errors in op done calls don't take action on the fod. They're bad
operations which implies the fod may be bad.
Signed-off-by: James Smart <james.smart@broadcom.com>
Signed-off-by: Sagi Grimberg <sagi@grimberg.me>
2017-04-11 11:32:31 -07:00
|
|
|
struct nvmefc_fcp_req *fcpreq;
|
2023-04-12 16:49:04 +08:00
|
|
|
unsigned long flags;
|
2016-12-02 00:28:44 -08:00
|
|
|
|
2023-04-12 16:49:04 +08:00
|
|
|
spin_lock_irqsave(&tfcp_req->reqlock, flags);
|
nvmet_fc: Rework target side abort handling
target transport:
----------------------
There are cases when there is a need to abort in-progress target
operations (writedata) so that controller termination or errors can
clean up. That can't happen currently as the abort is another target
op type, so it can't be used till the running one finishes (and it may
not). Solve by removing the abort op type and creating a separate
downcall from the transport to the lldd to request an io to be aborted.
The transport will abort ios on queue teardown or io errors. In general
the transport tries to call the lldd abort only when the io state is
idle. Meaning: ops that transmit data (readdata or rsp) will always
finish their transmit (or the lldd will see a state on the
link or initiator port that fails the transmit) and the done call for
the operation will occur. The transport will wait for the op done
upcall before calling the abort function, and as the io is idle, the
io can be cleaned up immediately after the abort call; Similarly, ios
that are not waiting for data or transmitting data must be in the nvmet
layer being processed. The transport will wait for the nvmet layer
completion before calling the abort function, and as the io is idle,
the io can be cleaned up immediately after the abort call; As for ops
that are waiting for data (writedata), they may be outstanding
indefinitely if the lldd doesn't see a condition where the initiatior
port or link is bad. In those cases, the transport will call the abort
function and wait for the lldd's op done upcall for the operation, where
it will then clean up the io.
Additionally, if a lldd receives an ABTS and matches it to an outstanding
request in the transport, A new new transport upcall was created to abort
the outstanding request in the transport. The transport expects any
outstanding op call (readdata or writedata) will completed by the lldd and
the operation upcall made. The transport doesn't act on the reported
abort (e.g. clean up the io) until an op done upcall occurs, a new op is
attempted, or the nvmet layer completes the io processing.
fcloop:
----------------------
Updated to support the new target apis.
On fcp io aborts from the initiator, the loopback context is updated to
NULL out the half that has completed. The initiator side is immediately
called after the abort request with an io completion (abort status).
On fcp io aborts from the target, the io is stopped and the initiator side
sees it as an aborted io. Target side ops, perhaps in progress while the
initiator side is done, continue but noop the data movement as there's no
structure on the initiator side to reference.
patch also contains:
----------------------
Revised lpfc to support the new abort api
commonized rsp buffer syncing and nulling of private data based on
calling paths.
errors in op done calls don't take action on the fod. They're bad
operations which implies the fod may be bad.
Signed-off-by: James Smart <james.smart@broadcom.com>
Signed-off-by: Sagi Grimberg <sagi@grimberg.me>
2017-04-11 11:32:31 -07:00
|
|
|
fcpreq = tfcp_req->fcpreq;
|
2017-11-29 16:47:33 -08:00
|
|
|
tfcp_req->inistate = INI_IO_COMPLETED;
|
2023-04-12 16:49:04 +08:00
|
|
|
spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
|
nvmet_fc: Rework target side abort handling
target transport:
----------------------
There are cases when there is a need to abort in-progress target
operations (writedata) so that controller termination or errors can
clean up. That can't happen currently as the abort is another target
op type, so it can't be used till the running one finishes (and it may
not). Solve by removing the abort op type and creating a separate
downcall from the transport to the lldd to request an io to be aborted.
The transport will abort ios on queue teardown or io errors. In general
the transport tries to call the lldd abort only when the io state is
idle. Meaning: ops that transmit data (readdata or rsp) will always
finish their transmit (or the lldd will see a state on the
link or initiator port that fails the transmit) and the done call for
the operation will occur. The transport will wait for the op done
upcall before calling the abort function, and as the io is idle, the
io can be cleaned up immediately after the abort call; Similarly, ios
that are not waiting for data or transmitting data must be in the nvmet
layer being processed. The transport will wait for the nvmet layer
completion before calling the abort function, and as the io is idle,
the io can be cleaned up immediately after the abort call; As for ops
that are waiting for data (writedata), they may be outstanding
indefinitely if the lldd doesn't see a condition where the initiatior
port or link is bad. In those cases, the transport will call the abort
function and wait for the lldd's op done upcall for the operation, where
it will then clean up the io.
Additionally, if a lldd receives an ABTS and matches it to an outstanding
request in the transport, A new new transport upcall was created to abort
the outstanding request in the transport. The transport expects any
outstanding op call (readdata or writedata) will completed by the lldd and
the operation upcall made. The transport doesn't act on the reported
abort (e.g. clean up the io) until an op done upcall occurs, a new op is
attempted, or the nvmet layer completes the io processing.
fcloop:
----------------------
Updated to support the new target apis.
On fcp io aborts from the initiator, the loopback context is updated to
NULL out the half that has completed. The initiator side is immediately
called after the abort request with an io completion (abort status).
On fcp io aborts from the target, the io is stopped and the initiator side
sees it as an aborted io. Target side ops, perhaps in progress while the
initiator side is done, continue but noop the data movement as there's no
structure on the initiator side to reference.
patch also contains:
----------------------
Revised lpfc to support the new abort api
commonized rsp buffer syncing and nulling of private data based on
calling paths.
errors in op done calls don't take action on the fod. They're bad
operations which implies the fod may be bad.
Signed-off-by: James Smart <james.smart@broadcom.com>
Signed-off-by: Sagi Grimberg <sagi@grimberg.me>
2017-04-11 11:32:31 -07:00
|
|
|
|
2017-11-29 16:47:32 -08:00
|
|
|
fcloop_call_host_done(fcpreq, tfcp_req, tfcp_req->status);
|
2016-12-02 00:28:44 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static int
|
|
|
|
fcloop_fcp_req(struct nvme_fc_local_port *localport,
|
|
|
|
struct nvme_fc_remote_port *remoteport,
|
|
|
|
void *hw_queue_handle,
|
|
|
|
struct nvmefc_fcp_req *fcpreq)
|
|
|
|
{
|
|
|
|
struct fcloop_rport *rport = remoteport->private;
|
2017-04-11 11:32:30 -07:00
|
|
|
struct fcloop_ini_fcpreq *inireq = fcpreq->private;
|
|
|
|
struct fcloop_fcpreq *tfcp_req;
|
2016-12-02 00:28:44 -08:00
|
|
|
|
2017-04-11 11:32:30 -07:00
|
|
|
if (!rport->targetport)
|
|
|
|
return -ECONNREFUSED;
|
2016-12-02 00:28:44 -08:00
|
|
|
|
2019-06-20 13:17:01 -07:00
|
|
|
tfcp_req = kzalloc(sizeof(*tfcp_req), GFP_ATOMIC);
|
2017-04-11 11:32:30 -07:00
|
|
|
if (!tfcp_req)
|
|
|
|
return -ENOMEM;
|
2016-12-02 00:28:44 -08:00
|
|
|
|
2017-04-11 11:32:30 -07:00
|
|
|
inireq->fcpreq = fcpreq;
|
|
|
|
inireq->tfcp_req = tfcp_req;
|
2017-11-29 16:47:33 -08:00
|
|
|
spin_lock_init(&inireq->inilock);
|
|
|
|
|
2016-12-02 00:28:44 -08:00
|
|
|
tfcp_req->fcpreq = fcpreq;
|
|
|
|
tfcp_req->tport = rport->targetport->private;
|
2017-11-29 16:47:33 -08:00
|
|
|
tfcp_req->inistate = INI_IO_START;
|
nvmet_fc: Rework target side abort handling
target transport:
----------------------
There are cases when there is a need to abort in-progress target
operations (writedata) so that controller termination or errors can
clean up. That can't happen currently as the abort is another target
op type, so it can't be used till the running one finishes (and it may
not). Solve by removing the abort op type and creating a separate
downcall from the transport to the lldd to request an io to be aborted.
The transport will abort ios on queue teardown or io errors. In general
the transport tries to call the lldd abort only when the io state is
idle. Meaning: ops that transmit data (readdata or rsp) will always
finish their transmit (or the lldd will see a state on the
link or initiator port that fails the transmit) and the done call for
the operation will occur. The transport will wait for the op done
upcall before calling the abort function, and as the io is idle, the
io can be cleaned up immediately after the abort call; Similarly, ios
that are not waiting for data or transmitting data must be in the nvmet
layer being processed. The transport will wait for the nvmet layer
completion before calling the abort function, and as the io is idle,
the io can be cleaned up immediately after the abort call; As for ops
that are waiting for data (writedata), they may be outstanding
indefinitely if the lldd doesn't see a condition where the initiatior
port or link is bad. In those cases, the transport will call the abort
function and wait for the lldd's op done upcall for the operation, where
it will then clean up the io.
Additionally, if a lldd receives an ABTS and matches it to an outstanding
request in the transport, A new new transport upcall was created to abort
the outstanding request in the transport. The transport expects any
outstanding op call (readdata or writedata) will completed by the lldd and
the operation upcall made. The transport doesn't act on the reported
abort (e.g. clean up the io) until an op done upcall occurs, a new op is
attempted, or the nvmet layer completes the io processing.
fcloop:
----------------------
Updated to support the new target apis.
On fcp io aborts from the initiator, the loopback context is updated to
NULL out the half that has completed. The initiator side is immediately
called after the abort request with an io completion (abort status).
On fcp io aborts from the target, the io is stopped and the initiator side
sees it as an aborted io. Target side ops, perhaps in progress while the
initiator side is done, continue but noop the data movement as there's no
structure on the initiator side to reference.
patch also contains:
----------------------
Revised lpfc to support the new abort api
commonized rsp buffer syncing and nulling of private data based on
calling paths.
errors in op done calls don't take action on the fod. They're bad
operations which implies the fod may be bad.
Signed-off-by: James Smart <james.smart@broadcom.com>
Signed-off-by: Sagi Grimberg <sagi@grimberg.me>
2017-04-11 11:32:31 -07:00
|
|
|
spin_lock_init(&tfcp_req->reqlock);
|
2017-11-29 16:47:32 -08:00
|
|
|
INIT_WORK(&tfcp_req->fcp_rcv_work, fcloop_fcp_recv_work);
|
|
|
|
INIT_WORK(&tfcp_req->abort_rcv_work, fcloop_fcp_abort_recv_work);
|
|
|
|
INIT_WORK(&tfcp_req->tio_done_work, fcloop_tgt_fcprqst_done_work);
|
2025-04-08 17:29:04 +02:00
|
|
|
refcount_set(&tfcp_req->ref, 1);
|
2016-12-02 00:28:44 -08:00
|
|
|
|
2022-03-21 13:57:27 +02:00
|
|
|
queue_work(nvmet_wq, &tfcp_req->fcp_rcv_work);
|
2016-12-02 00:28:44 -08:00
|
|
|
|
2017-11-29 16:47:32 -08:00
|
|
|
return 0;
|
2016-12-02 00:28:44 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
fcloop_fcp_copy_data(u8 op, struct scatterlist *data_sg,
|
|
|
|
struct scatterlist *io_sg, u32 offset, u32 length)
|
|
|
|
{
|
|
|
|
void *data_p, *io_p;
|
|
|
|
u32 data_len, io_len, tlen;
|
|
|
|
|
|
|
|
io_p = sg_virt(io_sg);
|
|
|
|
io_len = io_sg->length;
|
|
|
|
|
|
|
|
for ( ; offset; ) {
|
|
|
|
tlen = min_t(u32, offset, io_len);
|
|
|
|
offset -= tlen;
|
|
|
|
io_len -= tlen;
|
|
|
|
if (!io_len) {
|
|
|
|
io_sg = sg_next(io_sg);
|
|
|
|
io_p = sg_virt(io_sg);
|
|
|
|
io_len = io_sg->length;
|
|
|
|
} else
|
|
|
|
io_p += tlen;
|
|
|
|
}
|
|
|
|
|
|
|
|
data_p = sg_virt(data_sg);
|
|
|
|
data_len = data_sg->length;
|
|
|
|
|
|
|
|
for ( ; length; ) {
|
|
|
|
tlen = min_t(u32, io_len, data_len);
|
|
|
|
tlen = min_t(u32, tlen, length);
|
|
|
|
|
|
|
|
if (op == NVMET_FCOP_WRITEDATA)
|
|
|
|
memcpy(data_p, io_p, tlen);
|
|
|
|
else
|
|
|
|
memcpy(io_p, data_p, tlen);
|
|
|
|
|
|
|
|
length -= tlen;
|
|
|
|
|
|
|
|
io_len -= tlen;
|
|
|
|
if ((!io_len) && (length)) {
|
|
|
|
io_sg = sg_next(io_sg);
|
|
|
|
io_p = sg_virt(io_sg);
|
|
|
|
io_len = io_sg->length;
|
|
|
|
} else
|
|
|
|
io_p += tlen;
|
|
|
|
|
|
|
|
data_len -= tlen;
|
|
|
|
if ((!data_len) && (length)) {
|
|
|
|
data_sg = sg_next(data_sg);
|
|
|
|
data_p = sg_virt(data_sg);
|
|
|
|
data_len = data_sg->length;
|
|
|
|
} else
|
|
|
|
data_p += tlen;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
fcloop_fcp_op(struct nvmet_fc_target_port *tgtport,
|
|
|
|
struct nvmefc_tgt_fcp_req *tgt_fcpreq)
|
|
|
|
{
|
|
|
|
struct fcloop_fcpreq *tfcp_req = tgt_fcp_req_to_fcpreq(tgt_fcpreq);
|
nvmet_fc: Rework target side abort handling
target transport:
----------------------
There are cases when there is a need to abort in-progress target
operations (writedata) so that controller termination or errors can
clean up. That can't happen currently as the abort is another target
op type, so it can't be used till the running one finishes (and it may
not). Solve by removing the abort op type and creating a separate
downcall from the transport to the lldd to request an io to be aborted.
The transport will abort ios on queue teardown or io errors. In general
the transport tries to call the lldd abort only when the io state is
idle. Meaning: ops that transmit data (readdata or rsp) will always
finish their transmit (or the lldd will see a state on the
link or initiator port that fails the transmit) and the done call for
the operation will occur. The transport will wait for the op done
upcall before calling the abort function, and as the io is idle, the
io can be cleaned up immediately after the abort call; Similarly, ios
that are not waiting for data or transmitting data must be in the nvmet
layer being processed. The transport will wait for the nvmet layer
completion before calling the abort function, and as the io is idle,
the io can be cleaned up immediately after the abort call; As for ops
that are waiting for data (writedata), they may be outstanding
indefinitely if the lldd doesn't see a condition where the initiatior
port or link is bad. In those cases, the transport will call the abort
function and wait for the lldd's op done upcall for the operation, where
it will then clean up the io.
Additionally, if a lldd receives an ABTS and matches it to an outstanding
request in the transport, A new new transport upcall was created to abort
the outstanding request in the transport. The transport expects any
outstanding op call (readdata or writedata) will completed by the lldd and
the operation upcall made. The transport doesn't act on the reported
abort (e.g. clean up the io) until an op done upcall occurs, a new op is
attempted, or the nvmet layer completes the io processing.
fcloop:
----------------------
Updated to support the new target apis.
On fcp io aborts from the initiator, the loopback context is updated to
NULL out the half that has completed. The initiator side is immediately
called after the abort request with an io completion (abort status).
On fcp io aborts from the target, the io is stopped and the initiator side
sees it as an aborted io. Target side ops, perhaps in progress while the
initiator side is done, continue but noop the data movement as there's no
structure on the initiator side to reference.
patch also contains:
----------------------
Revised lpfc to support the new abort api
commonized rsp buffer syncing and nulling of private data based on
calling paths.
errors in op done calls don't take action on the fod. They're bad
operations which implies the fod may be bad.
Signed-off-by: James Smart <james.smart@broadcom.com>
Signed-off-by: Sagi Grimberg <sagi@grimberg.me>
2017-04-11 11:32:31 -07:00
|
|
|
struct nvmefc_fcp_req *fcpreq;
|
2016-12-02 00:28:44 -08:00
|
|
|
u32 rsplen = 0, xfrlen = 0;
|
nvmet_fc: Rework target side abort handling
target transport:
----------------------
There are cases when there is a need to abort in-progress target
operations (writedata) so that controller termination or errors can
clean up. That can't happen currently as the abort is another target
op type, so it can't be used till the running one finishes (and it may
not). Solve by removing the abort op type and creating a separate
downcall from the transport to the lldd to request an io to be aborted.
The transport will abort ios on queue teardown or io errors. In general
the transport tries to call the lldd abort only when the io state is
idle. Meaning: ops that transmit data (readdata or rsp) will always
finish their transmit (or the lldd will see a state on the
link or initiator port that fails the transmit) and the done call for
the operation will occur. The transport will wait for the op done
upcall before calling the abort function, and as the io is idle, the
io can be cleaned up immediately after the abort call; Similarly, ios
that are not waiting for data or transmitting data must be in the nvmet
layer being processed. The transport will wait for the nvmet layer
completion before calling the abort function, and as the io is idle,
the io can be cleaned up immediately after the abort call; As for ops
that are waiting for data (writedata), they may be outstanding
indefinitely if the lldd doesn't see a condition where the initiatior
port or link is bad. In those cases, the transport will call the abort
function and wait for the lldd's op done upcall for the operation, where
it will then clean up the io.
Additionally, if a lldd receives an ABTS and matches it to an outstanding
request in the transport, A new new transport upcall was created to abort
the outstanding request in the transport. The transport expects any
outstanding op call (readdata or writedata) will completed by the lldd and
the operation upcall made. The transport doesn't act on the reported
abort (e.g. clean up the io) until an op done upcall occurs, a new op is
attempted, or the nvmet layer completes the io processing.
fcloop:
----------------------
Updated to support the new target apis.
On fcp io aborts from the initiator, the loopback context is updated to
NULL out the half that has completed. The initiator side is immediately
called after the abort request with an io completion (abort status).
On fcp io aborts from the target, the io is stopped and the initiator side
sees it as an aborted io. Target side ops, perhaps in progress while the
initiator side is done, continue but noop the data movement as there's no
structure on the initiator side to reference.
patch also contains:
----------------------
Revised lpfc to support the new abort api
commonized rsp buffer syncing and nulling of private data based on
calling paths.
errors in op done calls don't take action on the fod. They're bad
operations which implies the fod may be bad.
Signed-off-by: James Smart <james.smart@broadcom.com>
Signed-off-by: Sagi Grimberg <sagi@grimberg.me>
2017-04-11 11:32:31 -07:00
|
|
|
int fcp_err = 0, active, aborted;
|
2016-12-02 00:28:44 -08:00
|
|
|
u8 op = tgt_fcpreq->op;
|
2023-04-12 16:49:04 +08:00
|
|
|
unsigned long flags;
|
2016-12-02 00:28:44 -08:00
|
|
|
|
2023-04-12 16:49:04 +08:00
|
|
|
spin_lock_irqsave(&tfcp_req->reqlock, flags);
|
nvmet_fc: Rework target side abort handling
target transport:
----------------------
There are cases when there is a need to abort in-progress target
operations (writedata) so that controller termination or errors can
clean up. That can't happen currently as the abort is another target
op type, so it can't be used till the running one finishes (and it may
not). Solve by removing the abort op type and creating a separate
downcall from the transport to the lldd to request an io to be aborted.
The transport will abort ios on queue teardown or io errors. In general
the transport tries to call the lldd abort only when the io state is
idle. Meaning: ops that transmit data (readdata or rsp) will always
finish their transmit (or the lldd will see a state on the
link or initiator port that fails the transmit) and the done call for
the operation will occur. The transport will wait for the op done
upcall before calling the abort function, and as the io is idle, the
io can be cleaned up immediately after the abort call; Similarly, ios
that are not waiting for data or transmitting data must be in the nvmet
layer being processed. The transport will wait for the nvmet layer
completion before calling the abort function, and as the io is idle,
the io can be cleaned up immediately after the abort call; As for ops
that are waiting for data (writedata), they may be outstanding
indefinitely if the lldd doesn't see a condition where the initiatior
port or link is bad. In those cases, the transport will call the abort
function and wait for the lldd's op done upcall for the operation, where
it will then clean up the io.
Additionally, if a lldd receives an ABTS and matches it to an outstanding
request in the transport, A new new transport upcall was created to abort
the outstanding request in the transport. The transport expects any
outstanding op call (readdata or writedata) will completed by the lldd and
the operation upcall made. The transport doesn't act on the reported
abort (e.g. clean up the io) until an op done upcall occurs, a new op is
attempted, or the nvmet layer completes the io processing.
fcloop:
----------------------
Updated to support the new target apis.
On fcp io aborts from the initiator, the loopback context is updated to
NULL out the half that has completed. The initiator side is immediately
called after the abort request with an io completion (abort status).
On fcp io aborts from the target, the io is stopped and the initiator side
sees it as an aborted io. Target side ops, perhaps in progress while the
initiator side is done, continue but noop the data movement as there's no
structure on the initiator side to reference.
patch also contains:
----------------------
Revised lpfc to support the new abort api
commonized rsp buffer syncing and nulling of private data based on
calling paths.
errors in op done calls don't take action on the fod. They're bad
operations which implies the fod may be bad.
Signed-off-by: James Smart <james.smart@broadcom.com>
Signed-off-by: Sagi Grimberg <sagi@grimberg.me>
2017-04-11 11:32:31 -07:00
|
|
|
fcpreq = tfcp_req->fcpreq;
|
|
|
|
active = tfcp_req->active;
|
|
|
|
aborted = tfcp_req->aborted;
|
|
|
|
tfcp_req->active = true;
|
2023-04-12 16:49:04 +08:00
|
|
|
spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
|
nvmet_fc: Rework target side abort handling
target transport:
----------------------
There are cases when there is a need to abort in-progress target
operations (writedata) so that controller termination or errors can
clean up. That can't happen currently as the abort is another target
op type, so it can't be used till the running one finishes (and it may
not). Solve by removing the abort op type and creating a separate
downcall from the transport to the lldd to request an io to be aborted.
The transport will abort ios on queue teardown or io errors. In general
the transport tries to call the lldd abort only when the io state is
idle. Meaning: ops that transmit data (readdata or rsp) will always
finish their transmit (or the lldd will see a state on the
link or initiator port that fails the transmit) and the done call for
the operation will occur. The transport will wait for the op done
upcall before calling the abort function, and as the io is idle, the
io can be cleaned up immediately after the abort call; Similarly, ios
that are not waiting for data or transmitting data must be in the nvmet
layer being processed. The transport will wait for the nvmet layer
completion before calling the abort function, and as the io is idle,
the io can be cleaned up immediately after the abort call; As for ops
that are waiting for data (writedata), they may be outstanding
indefinitely if the lldd doesn't see a condition where the initiatior
port or link is bad. In those cases, the transport will call the abort
function and wait for the lldd's op done upcall for the operation, where
it will then clean up the io.
Additionally, if a lldd receives an ABTS and matches it to an outstanding
request in the transport, A new new transport upcall was created to abort
the outstanding request in the transport. The transport expects any
outstanding op call (readdata or writedata) will completed by the lldd and
the operation upcall made. The transport doesn't act on the reported
abort (e.g. clean up the io) until an op done upcall occurs, a new op is
attempted, or the nvmet layer completes the io processing.
fcloop:
----------------------
Updated to support the new target apis.
On fcp io aborts from the initiator, the loopback context is updated to
NULL out the half that has completed. The initiator side is immediately
called after the abort request with an io completion (abort status).
On fcp io aborts from the target, the io is stopped and the initiator side
sees it as an aborted io. Target side ops, perhaps in progress while the
initiator side is done, continue but noop the data movement as there's no
structure on the initiator side to reference.
patch also contains:
----------------------
Revised lpfc to support the new abort api
commonized rsp buffer syncing and nulling of private data based on
calling paths.
errors in op done calls don't take action on the fod. They're bad
operations which implies the fod may be bad.
Signed-off-by: James Smart <james.smart@broadcom.com>
Signed-off-by: Sagi Grimberg <sagi@grimberg.me>
2017-04-11 11:32:31 -07:00
|
|
|
|
|
|
|
if (unlikely(active))
|
|
|
|
/* illegal - call while i/o active */
|
|
|
|
return -EALREADY;
|
|
|
|
|
|
|
|
if (unlikely(aborted)) {
|
|
|
|
/* target transport has aborted i/o prior */
|
2023-04-12 16:49:04 +08:00
|
|
|
spin_lock_irqsave(&tfcp_req->reqlock, flags);
|
nvmet_fc: Rework target side abort handling
target transport:
----------------------
There are cases when there is a need to abort in-progress target
operations (writedata) so that controller termination or errors can
clean up. That can't happen currently as the abort is another target
op type, so it can't be used till the running one finishes (and it may
not). Solve by removing the abort op type and creating a separate
downcall from the transport to the lldd to request an io to be aborted.
The transport will abort ios on queue teardown or io errors. In general
the transport tries to call the lldd abort only when the io state is
idle. Meaning: ops that transmit data (readdata or rsp) will always
finish their transmit (or the lldd will see a state on the
link or initiator port that fails the transmit) and the done call for
the operation will occur. The transport will wait for the op done
upcall before calling the abort function, and as the io is idle, the
io can be cleaned up immediately after the abort call; Similarly, ios
that are not waiting for data or transmitting data must be in the nvmet
layer being processed. The transport will wait for the nvmet layer
completion before calling the abort function, and as the io is idle,
the io can be cleaned up immediately after the abort call; As for ops
that are waiting for data (writedata), they may be outstanding
indefinitely if the lldd doesn't see a condition where the initiatior
port or link is bad. In those cases, the transport will call the abort
function and wait for the lldd's op done upcall for the operation, where
it will then clean up the io.
Additionally, if a lldd receives an ABTS and matches it to an outstanding
request in the transport, A new new transport upcall was created to abort
the outstanding request in the transport. The transport expects any
outstanding op call (readdata or writedata) will completed by the lldd and
the operation upcall made. The transport doesn't act on the reported
abort (e.g. clean up the io) until an op done upcall occurs, a new op is
attempted, or the nvmet layer completes the io processing.
fcloop:
----------------------
Updated to support the new target apis.
On fcp io aborts from the initiator, the loopback context is updated to
NULL out the half that has completed. The initiator side is immediately
called after the abort request with an io completion (abort status).
On fcp io aborts from the target, the io is stopped and the initiator side
sees it as an aborted io. Target side ops, perhaps in progress while the
initiator side is done, continue but noop the data movement as there's no
structure on the initiator side to reference.
patch also contains:
----------------------
Revised lpfc to support the new abort api
commonized rsp buffer syncing and nulling of private data based on
calling paths.
errors in op done calls don't take action on the fod. They're bad
operations which implies the fod may be bad.
Signed-off-by: James Smart <james.smart@broadcom.com>
Signed-off-by: Sagi Grimberg <sagi@grimberg.me>
2017-04-11 11:32:31 -07:00
|
|
|
tfcp_req->active = false;
|
2023-04-12 16:49:04 +08:00
|
|
|
spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
|
nvmet_fc: Rework target side abort handling
target transport:
----------------------
There are cases when there is a need to abort in-progress target
operations (writedata) so that controller termination or errors can
clean up. That can't happen currently as the abort is another target
op type, so it can't be used till the running one finishes (and it may
not). Solve by removing the abort op type and creating a separate
downcall from the transport to the lldd to request an io to be aborted.
The transport will abort ios on queue teardown or io errors. In general
the transport tries to call the lldd abort only when the io state is
idle. Meaning: ops that transmit data (readdata or rsp) will always
finish their transmit (or the lldd will see a state on the
link or initiator port that fails the transmit) and the done call for
the operation will occur. The transport will wait for the op done
upcall before calling the abort function, and as the io is idle, the
io can be cleaned up immediately after the abort call; Similarly, ios
that are not waiting for data or transmitting data must be in the nvmet
layer being processed. The transport will wait for the nvmet layer
completion before calling the abort function, and as the io is idle,
the io can be cleaned up immediately after the abort call; As for ops
that are waiting for data (writedata), they may be outstanding
indefinitely if the lldd doesn't see a condition where the initiatior
port or link is bad. In those cases, the transport will call the abort
function and wait for the lldd's op done upcall for the operation, where
it will then clean up the io.
Additionally, if a lldd receives an ABTS and matches it to an outstanding
request in the transport, A new new transport upcall was created to abort
the outstanding request in the transport. The transport expects any
outstanding op call (readdata or writedata) will completed by the lldd and
the operation upcall made. The transport doesn't act on the reported
abort (e.g. clean up the io) until an op done upcall occurs, a new op is
attempted, or the nvmet layer completes the io processing.
fcloop:
----------------------
Updated to support the new target apis.
On fcp io aborts from the initiator, the loopback context is updated to
NULL out the half that has completed. The initiator side is immediately
called after the abort request with an io completion (abort status).
On fcp io aborts from the target, the io is stopped and the initiator side
sees it as an aborted io. Target side ops, perhaps in progress while the
initiator side is done, continue but noop the data movement as there's no
structure on the initiator side to reference.
patch also contains:
----------------------
Revised lpfc to support the new abort api
commonized rsp buffer syncing and nulling of private data based on
calling paths.
errors in op done calls don't take action on the fod. They're bad
operations which implies the fod may be bad.
Signed-off-by: James Smart <james.smart@broadcom.com>
Signed-off-by: Sagi Grimberg <sagi@grimberg.me>
2017-04-11 11:32:31 -07:00
|
|
|
tgt_fcpreq->transferred_length = 0;
|
|
|
|
tgt_fcpreq->fcp_error = -ECANCELED;
|
|
|
|
tgt_fcpreq->done(tgt_fcpreq);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* if fcpreq is NULL, the I/O has been aborted (from
|
|
|
|
* initiator side). For the target side, act as if all is well
|
|
|
|
* but don't actually move data.
|
|
|
|
*/
|
|
|
|
|
2016-12-02 00:28:44 -08:00
|
|
|
switch (op) {
|
|
|
|
case NVMET_FCOP_WRITEDATA:
|
|
|
|
xfrlen = tgt_fcpreq->transfer_length;
|
nvmet_fc: Rework target side abort handling
target transport:
----------------------
There are cases when there is a need to abort in-progress target
operations (writedata) so that controller termination or errors can
clean up. That can't happen currently as the abort is another target
op type, so it can't be used till the running one finishes (and it may
not). Solve by removing the abort op type and creating a separate
downcall from the transport to the lldd to request an io to be aborted.
The transport will abort ios on queue teardown or io errors. In general
the transport tries to call the lldd abort only when the io state is
idle. Meaning: ops that transmit data (readdata or rsp) will always
finish their transmit (or the lldd will see a state on the
link or initiator port that fails the transmit) and the done call for
the operation will occur. The transport will wait for the op done
upcall before calling the abort function, and as the io is idle, the
io can be cleaned up immediately after the abort call; Similarly, ios
that are not waiting for data or transmitting data must be in the nvmet
layer being processed. The transport will wait for the nvmet layer
completion before calling the abort function, and as the io is idle,
the io can be cleaned up immediately after the abort call; As for ops
that are waiting for data (writedata), they may be outstanding
indefinitely if the lldd doesn't see a condition where the initiatior
port or link is bad. In those cases, the transport will call the abort
function and wait for the lldd's op done upcall for the operation, where
it will then clean up the io.
Additionally, if a lldd receives an ABTS and matches it to an outstanding
request in the transport, A new new transport upcall was created to abort
the outstanding request in the transport. The transport expects any
outstanding op call (readdata or writedata) will completed by the lldd and
the operation upcall made. The transport doesn't act on the reported
abort (e.g. clean up the io) until an op done upcall occurs, a new op is
attempted, or the nvmet layer completes the io processing.
fcloop:
----------------------
Updated to support the new target apis.
On fcp io aborts from the initiator, the loopback context is updated to
NULL out the half that has completed. The initiator side is immediately
called after the abort request with an io completion (abort status).
On fcp io aborts from the target, the io is stopped and the initiator side
sees it as an aborted io. Target side ops, perhaps in progress while the
initiator side is done, continue but noop the data movement as there's no
structure on the initiator side to reference.
patch also contains:
----------------------
Revised lpfc to support the new abort api
commonized rsp buffer syncing and nulling of private data based on
calling paths.
errors in op done calls don't take action on the fod. They're bad
operations which implies the fod may be bad.
Signed-off-by: James Smart <james.smart@broadcom.com>
Signed-off-by: Sagi Grimberg <sagi@grimberg.me>
2017-04-11 11:32:31 -07:00
|
|
|
if (fcpreq) {
|
|
|
|
fcloop_fcp_copy_data(op, tgt_fcpreq->sg,
|
|
|
|
fcpreq->first_sgl, tgt_fcpreq->offset,
|
|
|
|
xfrlen);
|
|
|
|
fcpreq->transferred_length += xfrlen;
|
|
|
|
}
|
2016-12-02 00:28:44 -08:00
|
|
|
break;
|
|
|
|
|
|
|
|
case NVMET_FCOP_READDATA:
|
|
|
|
case NVMET_FCOP_READDATA_RSP:
|
|
|
|
xfrlen = tgt_fcpreq->transfer_length;
|
nvmet_fc: Rework target side abort handling
target transport:
----------------------
There are cases when there is a need to abort in-progress target
operations (writedata) so that controller termination or errors can
clean up. That can't happen currently as the abort is another target
op type, so it can't be used till the running one finishes (and it may
not). Solve by removing the abort op type and creating a separate
downcall from the transport to the lldd to request an io to be aborted.
The transport will abort ios on queue teardown or io errors. In general
the transport tries to call the lldd abort only when the io state is
idle. Meaning: ops that transmit data (readdata or rsp) will always
finish their transmit (or the lldd will see a state on the
link or initiator port that fails the transmit) and the done call for
the operation will occur. The transport will wait for the op done
upcall before calling the abort function, and as the io is idle, the
io can be cleaned up immediately after the abort call; Similarly, ios
that are not waiting for data or transmitting data must be in the nvmet
layer being processed. The transport will wait for the nvmet layer
completion before calling the abort function, and as the io is idle,
the io can be cleaned up immediately after the abort call; As for ops
that are waiting for data (writedata), they may be outstanding
indefinitely if the lldd doesn't see a condition where the initiatior
port or link is bad. In those cases, the transport will call the abort
function and wait for the lldd's op done upcall for the operation, where
it will then clean up the io.
Additionally, if a lldd receives an ABTS and matches it to an outstanding
request in the transport, A new new transport upcall was created to abort
the outstanding request in the transport. The transport expects any
outstanding op call (readdata or writedata) will completed by the lldd and
the operation upcall made. The transport doesn't act on the reported
abort (e.g. clean up the io) until an op done upcall occurs, a new op is
attempted, or the nvmet layer completes the io processing.
fcloop:
----------------------
Updated to support the new target apis.
On fcp io aborts from the initiator, the loopback context is updated to
NULL out the half that has completed. The initiator side is immediately
called after the abort request with an io completion (abort status).
On fcp io aborts from the target, the io is stopped and the initiator side
sees it as an aborted io. Target side ops, perhaps in progress while the
initiator side is done, continue but noop the data movement as there's no
structure on the initiator side to reference.
patch also contains:
----------------------
Revised lpfc to support the new abort api
commonized rsp buffer syncing and nulling of private data based on
calling paths.
errors in op done calls don't take action on the fod. They're bad
operations which implies the fod may be bad.
Signed-off-by: James Smart <james.smart@broadcom.com>
Signed-off-by: Sagi Grimberg <sagi@grimberg.me>
2017-04-11 11:32:31 -07:00
|
|
|
if (fcpreq) {
|
|
|
|
fcloop_fcp_copy_data(op, tgt_fcpreq->sg,
|
|
|
|
fcpreq->first_sgl, tgt_fcpreq->offset,
|
|
|
|
xfrlen);
|
|
|
|
fcpreq->transferred_length += xfrlen;
|
|
|
|
}
|
2016-12-02 00:28:44 -08:00
|
|
|
if (op == NVMET_FCOP_READDATA)
|
|
|
|
break;
|
|
|
|
|
|
|
|
/* Fall-Thru to RSP handling */
|
2020-08-23 17:36:59 -05:00
|
|
|
fallthrough;
|
2016-12-02 00:28:44 -08:00
|
|
|
|
|
|
|
case NVMET_FCOP_RSP:
|
nvmet_fc: Rework target side abort handling
target transport:
----------------------
There are cases when there is a need to abort in-progress target
operations (writedata) so that controller termination or errors can
clean up. That can't happen currently as the abort is another target
op type, so it can't be used till the running one finishes (and it may
not). Solve by removing the abort op type and creating a separate
downcall from the transport to the lldd to request an io to be aborted.
The transport will abort ios on queue teardown or io errors. In general
the transport tries to call the lldd abort only when the io state is
idle. Meaning: ops that transmit data (readdata or rsp) will always
finish their transmit (or the lldd will see a state on the
link or initiator port that fails the transmit) and the done call for
the operation will occur. The transport will wait for the op done
upcall before calling the abort function, and as the io is idle, the
io can be cleaned up immediately after the abort call; Similarly, ios
that are not waiting for data or transmitting data must be in the nvmet
layer being processed. The transport will wait for the nvmet layer
completion before calling the abort function, and as the io is idle,
the io can be cleaned up immediately after the abort call; As for ops
that are waiting for data (writedata), they may be outstanding
indefinitely if the lldd doesn't see a condition where the initiatior
port or link is bad. In those cases, the transport will call the abort
function and wait for the lldd's op done upcall for the operation, where
it will then clean up the io.
Additionally, if a lldd receives an ABTS and matches it to an outstanding
request in the transport, A new new transport upcall was created to abort
the outstanding request in the transport. The transport expects any
outstanding op call (readdata or writedata) will completed by the lldd and
the operation upcall made. The transport doesn't act on the reported
abort (e.g. clean up the io) until an op done upcall occurs, a new op is
attempted, or the nvmet layer completes the io processing.
fcloop:
----------------------
Updated to support the new target apis.
On fcp io aborts from the initiator, the loopback context is updated to
NULL out the half that has completed. The initiator side is immediately
called after the abort request with an io completion (abort status).
On fcp io aborts from the target, the io is stopped and the initiator side
sees it as an aborted io. Target side ops, perhaps in progress while the
initiator side is done, continue but noop the data movement as there's no
structure on the initiator side to reference.
patch also contains:
----------------------
Revised lpfc to support the new abort api
commonized rsp buffer syncing and nulling of private data based on
calling paths.
errors in op done calls don't take action on the fod. They're bad
operations which implies the fod may be bad.
Signed-off-by: James Smart <james.smart@broadcom.com>
Signed-off-by: Sagi Grimberg <sagi@grimberg.me>
2017-04-11 11:32:31 -07:00
|
|
|
if (fcpreq) {
|
|
|
|
rsplen = ((fcpreq->rsplen < tgt_fcpreq->rsplen) ?
|
|
|
|
fcpreq->rsplen : tgt_fcpreq->rsplen);
|
|
|
|
memcpy(fcpreq->rspaddr, tgt_fcpreq->rspaddr, rsplen);
|
|
|
|
if (rsplen < tgt_fcpreq->rsplen)
|
|
|
|
fcp_err = -E2BIG;
|
|
|
|
fcpreq->rcv_rsplen = rsplen;
|
|
|
|
fcpreq->status = 0;
|
|
|
|
}
|
2016-12-02 00:28:44 -08:00
|
|
|
tfcp_req->status = 0;
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
fcp_err = -EINVAL;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2023-04-12 16:49:04 +08:00
|
|
|
spin_lock_irqsave(&tfcp_req->reqlock, flags);
|
nvmet_fc: Rework target side abort handling
target transport:
----------------------
There are cases when there is a need to abort in-progress target
operations (writedata) so that controller termination or errors can
clean up. That can't happen currently as the abort is another target
op type, so it can't be used till the running one finishes (and it may
not). Solve by removing the abort op type and creating a separate
downcall from the transport to the lldd to request an io to be aborted.
The transport will abort ios on queue teardown or io errors. In general
the transport tries to call the lldd abort only when the io state is
idle. Meaning: ops that transmit data (readdata or rsp) will always
finish their transmit (or the lldd will see a state on the
link or initiator port that fails the transmit) and the done call for
the operation will occur. The transport will wait for the op done
upcall before calling the abort function, and as the io is idle, the
io can be cleaned up immediately after the abort call; Similarly, ios
that are not waiting for data or transmitting data must be in the nvmet
layer being processed. The transport will wait for the nvmet layer
completion before calling the abort function, and as the io is idle,
the io can be cleaned up immediately after the abort call; As for ops
that are waiting for data (writedata), they may be outstanding
indefinitely if the lldd doesn't see a condition where the initiatior
port or link is bad. In those cases, the transport will call the abort
function and wait for the lldd's op done upcall for the operation, where
it will then clean up the io.
Additionally, if a lldd receives an ABTS and matches it to an outstanding
request in the transport, A new new transport upcall was created to abort
the outstanding request in the transport. The transport expects any
outstanding op call (readdata or writedata) will completed by the lldd and
the operation upcall made. The transport doesn't act on the reported
abort (e.g. clean up the io) until an op done upcall occurs, a new op is
attempted, or the nvmet layer completes the io processing.
fcloop:
----------------------
Updated to support the new target apis.
On fcp io aborts from the initiator, the loopback context is updated to
NULL out the half that has completed. The initiator side is immediately
called after the abort request with an io completion (abort status).
On fcp io aborts from the target, the io is stopped and the initiator side
sees it as an aborted io. Target side ops, perhaps in progress while the
initiator side is done, continue but noop the data movement as there's no
structure on the initiator side to reference.
patch also contains:
----------------------
Revised lpfc to support the new abort api
commonized rsp buffer syncing and nulling of private data based on
calling paths.
errors in op done calls don't take action on the fod. They're bad
operations which implies the fod may be bad.
Signed-off-by: James Smart <james.smart@broadcom.com>
Signed-off-by: Sagi Grimberg <sagi@grimberg.me>
2017-04-11 11:32:31 -07:00
|
|
|
tfcp_req->active = false;
|
2023-04-12 16:49:04 +08:00
|
|
|
spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
|
nvmet_fc: Rework target side abort handling
target transport:
----------------------
There are cases when there is a need to abort in-progress target
operations (writedata) so that controller termination or errors can
clean up. That can't happen currently as the abort is another target
op type, so it can't be used till the running one finishes (and it may
not). Solve by removing the abort op type and creating a separate
downcall from the transport to the lldd to request an io to be aborted.
The transport will abort ios on queue teardown or io errors. In general
the transport tries to call the lldd abort only when the io state is
idle. Meaning: ops that transmit data (readdata or rsp) will always
finish their transmit (or the lldd will see a state on the
link or initiator port that fails the transmit) and the done call for
the operation will occur. The transport will wait for the op done
upcall before calling the abort function, and as the io is idle, the
io can be cleaned up immediately after the abort call; Similarly, ios
that are not waiting for data or transmitting data must be in the nvmet
layer being processed. The transport will wait for the nvmet layer
completion before calling the abort function, and as the io is idle,
the io can be cleaned up immediately after the abort call; As for ops
that are waiting for data (writedata), they may be outstanding
indefinitely if the lldd doesn't see a condition where the initiatior
port or link is bad. In those cases, the transport will call the abort
function and wait for the lldd's op done upcall for the operation, where
it will then clean up the io.
Additionally, if a lldd receives an ABTS and matches it to an outstanding
request in the transport, A new new transport upcall was created to abort
the outstanding request in the transport. The transport expects any
outstanding op call (readdata or writedata) will completed by the lldd and
the operation upcall made. The transport doesn't act on the reported
abort (e.g. clean up the io) until an op done upcall occurs, a new op is
attempted, or the nvmet layer completes the io processing.
fcloop:
----------------------
Updated to support the new target apis.
On fcp io aborts from the initiator, the loopback context is updated to
NULL out the half that has completed. The initiator side is immediately
called after the abort request with an io completion (abort status).
On fcp io aborts from the target, the io is stopped and the initiator side
sees it as an aborted io. Target side ops, perhaps in progress while the
initiator side is done, continue but noop the data movement as there's no
structure on the initiator side to reference.
patch also contains:
----------------------
Revised lpfc to support the new abort api
commonized rsp buffer syncing and nulling of private data based on
calling paths.
errors in op done calls don't take action on the fod. They're bad
operations which implies the fod may be bad.
Signed-off-by: James Smart <james.smart@broadcom.com>
Signed-off-by: Sagi Grimberg <sagi@grimberg.me>
2017-04-11 11:32:31 -07:00
|
|
|
|
2016-12-02 00:28:44 -08:00
|
|
|
tgt_fcpreq->transferred_length = xfrlen;
|
|
|
|
tgt_fcpreq->fcp_error = fcp_err;
|
|
|
|
tgt_fcpreq->done(tgt_fcpreq);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
nvmet_fc: Rework target side abort handling
target transport:
----------------------
There are cases when there is a need to abort in-progress target
operations (writedata) so that controller termination or errors can
clean up. That can't happen currently as the abort is another target
op type, so it can't be used till the running one finishes (and it may
not). Solve by removing the abort op type and creating a separate
downcall from the transport to the lldd to request an io to be aborted.
The transport will abort ios on queue teardown or io errors. In general
the transport tries to call the lldd abort only when the io state is
idle. Meaning: ops that transmit data (readdata or rsp) will always
finish their transmit (or the lldd will see a state on the
link or initiator port that fails the transmit) and the done call for
the operation will occur. The transport will wait for the op done
upcall before calling the abort function, and as the io is idle, the
io can be cleaned up immediately after the abort call; Similarly, ios
that are not waiting for data or transmitting data must be in the nvmet
layer being processed. The transport will wait for the nvmet layer
completion before calling the abort function, and as the io is idle,
the io can be cleaned up immediately after the abort call; As for ops
that are waiting for data (writedata), they may be outstanding
indefinitely if the lldd doesn't see a condition where the initiatior
port or link is bad. In those cases, the transport will call the abort
function and wait for the lldd's op done upcall for the operation, where
it will then clean up the io.
Additionally, if a lldd receives an ABTS and matches it to an outstanding
request in the transport, A new new transport upcall was created to abort
the outstanding request in the transport. The transport expects any
outstanding op call (readdata or writedata) will completed by the lldd and
the operation upcall made. The transport doesn't act on the reported
abort (e.g. clean up the io) until an op done upcall occurs, a new op is
attempted, or the nvmet layer completes the io processing.
fcloop:
----------------------
Updated to support the new target apis.
On fcp io aborts from the initiator, the loopback context is updated to
NULL out the half that has completed. The initiator side is immediately
called after the abort request with an io completion (abort status).
On fcp io aborts from the target, the io is stopped and the initiator side
sees it as an aborted io. Target side ops, perhaps in progress while the
initiator side is done, continue but noop the data movement as there's no
structure on the initiator side to reference.
patch also contains:
----------------------
Revised lpfc to support the new abort api
commonized rsp buffer syncing and nulling of private data based on
calling paths.
errors in op done calls don't take action on the fod. They're bad
operations which implies the fod may be bad.
Signed-off-by: James Smart <james.smart@broadcom.com>
Signed-off-by: Sagi Grimberg <sagi@grimberg.me>
2017-04-11 11:32:31 -07:00
|
|
|
static void
|
|
|
|
fcloop_tgt_fcp_abort(struct nvmet_fc_target_port *tgtport,
|
|
|
|
struct nvmefc_tgt_fcp_req *tgt_fcpreq)
|
|
|
|
{
|
|
|
|
struct fcloop_fcpreq *tfcp_req = tgt_fcp_req_to_fcpreq(tgt_fcpreq);
|
2023-04-12 16:49:04 +08:00
|
|
|
unsigned long flags;
|
nvmet_fc: Rework target side abort handling
target transport:
----------------------
There are cases when there is a need to abort in-progress target
operations (writedata) so that controller termination or errors can
clean up. That can't happen currently as the abort is another target
op type, so it can't be used till the running one finishes (and it may
not). Solve by removing the abort op type and creating a separate
downcall from the transport to the lldd to request an io to be aborted.
The transport will abort ios on queue teardown or io errors. In general
the transport tries to call the lldd abort only when the io state is
idle. Meaning: ops that transmit data (readdata or rsp) will always
finish their transmit (or the lldd will see a state on the
link or initiator port that fails the transmit) and the done call for
the operation will occur. The transport will wait for the op done
upcall before calling the abort function, and as the io is idle, the
io can be cleaned up immediately after the abort call; Similarly, ios
that are not waiting for data or transmitting data must be in the nvmet
layer being processed. The transport will wait for the nvmet layer
completion before calling the abort function, and as the io is idle,
the io can be cleaned up immediately after the abort call; As for ops
that are waiting for data (writedata), they may be outstanding
indefinitely if the lldd doesn't see a condition where the initiatior
port or link is bad. In those cases, the transport will call the abort
function and wait for the lldd's op done upcall for the operation, where
it will then clean up the io.
Additionally, if a lldd receives an ABTS and matches it to an outstanding
request in the transport, A new new transport upcall was created to abort
the outstanding request in the transport. The transport expects any
outstanding op call (readdata or writedata) will completed by the lldd and
the operation upcall made. The transport doesn't act on the reported
abort (e.g. clean up the io) until an op done upcall occurs, a new op is
attempted, or the nvmet layer completes the io processing.
fcloop:
----------------------
Updated to support the new target apis.
On fcp io aborts from the initiator, the loopback context is updated to
NULL out the half that has completed. The initiator side is immediately
called after the abort request with an io completion (abort status).
On fcp io aborts from the target, the io is stopped and the initiator side
sees it as an aborted io. Target side ops, perhaps in progress while the
initiator side is done, continue but noop the data movement as there's no
structure on the initiator side to reference.
patch also contains:
----------------------
Revised lpfc to support the new abort api
commonized rsp buffer syncing and nulling of private data based on
calling paths.
errors in op done calls don't take action on the fod. They're bad
operations which implies the fod may be bad.
Signed-off-by: James Smart <james.smart@broadcom.com>
Signed-off-by: Sagi Grimberg <sagi@grimberg.me>
2017-04-11 11:32:31 -07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* mark aborted only in case there were 2 threads in transport
|
|
|
|
* (one doing io, other doing abort) and only kills ops posted
|
|
|
|
* after the abort request
|
|
|
|
*/
|
2023-04-12 16:49:04 +08:00
|
|
|
spin_lock_irqsave(&tfcp_req->reqlock, flags);
|
nvmet_fc: Rework target side abort handling
target transport:
----------------------
There are cases when there is a need to abort in-progress target
operations (writedata) so that controller termination or errors can
clean up. That can't happen currently as the abort is another target
op type, so it can't be used till the running one finishes (and it may
not). Solve by removing the abort op type and creating a separate
downcall from the transport to the lldd to request an io to be aborted.
The transport will abort ios on queue teardown or io errors. In general
the transport tries to call the lldd abort only when the io state is
idle. Meaning: ops that transmit data (readdata or rsp) will always
finish their transmit (or the lldd will see a state on the
link or initiator port that fails the transmit) and the done call for
the operation will occur. The transport will wait for the op done
upcall before calling the abort function, and as the io is idle, the
io can be cleaned up immediately after the abort call; Similarly, ios
that are not waiting for data or transmitting data must be in the nvmet
layer being processed. The transport will wait for the nvmet layer
completion before calling the abort function, and as the io is idle,
the io can be cleaned up immediately after the abort call; As for ops
that are waiting for data (writedata), they may be outstanding
indefinitely if the lldd doesn't see a condition where the initiatior
port or link is bad. In those cases, the transport will call the abort
function and wait for the lldd's op done upcall for the operation, where
it will then clean up the io.
Additionally, if a lldd receives an ABTS and matches it to an outstanding
request in the transport, A new new transport upcall was created to abort
the outstanding request in the transport. The transport expects any
outstanding op call (readdata or writedata) will completed by the lldd and
the operation upcall made. The transport doesn't act on the reported
abort (e.g. clean up the io) until an op done upcall occurs, a new op is
attempted, or the nvmet layer completes the io processing.
fcloop:
----------------------
Updated to support the new target apis.
On fcp io aborts from the initiator, the loopback context is updated to
NULL out the half that has completed. The initiator side is immediately
called after the abort request with an io completion (abort status).
On fcp io aborts from the target, the io is stopped and the initiator side
sees it as an aborted io. Target side ops, perhaps in progress while the
initiator side is done, continue but noop the data movement as there's no
structure on the initiator side to reference.
patch also contains:
----------------------
Revised lpfc to support the new abort api
commonized rsp buffer syncing and nulling of private data based on
calling paths.
errors in op done calls don't take action on the fod. They're bad
operations which implies the fod may be bad.
Signed-off-by: James Smart <james.smart@broadcom.com>
Signed-off-by: Sagi Grimberg <sagi@grimberg.me>
2017-04-11 11:32:31 -07:00
|
|
|
tfcp_req->aborted = true;
|
2023-04-12 16:49:04 +08:00
|
|
|
spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
|
nvmet_fc: Rework target side abort handling
target transport:
----------------------
There are cases when there is a need to abort in-progress target
operations (writedata) so that controller termination or errors can
clean up. That can't happen currently as the abort is another target
op type, so it can't be used till the running one finishes (and it may
not). Solve by removing the abort op type and creating a separate
downcall from the transport to the lldd to request an io to be aborted.
The transport will abort ios on queue teardown or io errors. In general
the transport tries to call the lldd abort only when the io state is
idle. Meaning: ops that transmit data (readdata or rsp) will always
finish their transmit (or the lldd will see a state on the
link or initiator port that fails the transmit) and the done call for
the operation will occur. The transport will wait for the op done
upcall before calling the abort function, and as the io is idle, the
io can be cleaned up immediately after the abort call; Similarly, ios
that are not waiting for data or transmitting data must be in the nvmet
layer being processed. The transport will wait for the nvmet layer
completion before calling the abort function, and as the io is idle,
the io can be cleaned up immediately after the abort call; As for ops
that are waiting for data (writedata), they may be outstanding
indefinitely if the lldd doesn't see a condition where the initiatior
port or link is bad. In those cases, the transport will call the abort
function and wait for the lldd's op done upcall for the operation, where
it will then clean up the io.
Additionally, if a lldd receives an ABTS and matches it to an outstanding
request in the transport, A new new transport upcall was created to abort
the outstanding request in the transport. The transport expects any
outstanding op call (readdata or writedata) will completed by the lldd and
the operation upcall made. The transport doesn't act on the reported
abort (e.g. clean up the io) until an op done upcall occurs, a new op is
attempted, or the nvmet layer completes the io processing.
fcloop:
----------------------
Updated to support the new target apis.
On fcp io aborts from the initiator, the loopback context is updated to
NULL out the half that has completed. The initiator side is immediately
called after the abort request with an io completion (abort status).
On fcp io aborts from the target, the io is stopped and the initiator side
sees it as an aborted io. Target side ops, perhaps in progress while the
initiator side is done, continue but noop the data movement as there's no
structure on the initiator side to reference.
patch also contains:
----------------------
Revised lpfc to support the new abort api
commonized rsp buffer syncing and nulling of private data based on
calling paths.
errors in op done calls don't take action on the fod. They're bad
operations which implies the fod may be bad.
Signed-off-by: James Smart <james.smart@broadcom.com>
Signed-off-by: Sagi Grimberg <sagi@grimberg.me>
2017-04-11 11:32:31 -07:00
|
|
|
|
2017-09-07 16:27:28 -07:00
|
|
|
tfcp_req->status = NVME_SC_INTERNAL;
|
nvmet_fc: Rework target side abort handling
target transport:
----------------------
There are cases when there is a need to abort in-progress target
operations (writedata) so that controller termination or errors can
clean up. That can't happen currently as the abort is another target
op type, so it can't be used till the running one finishes (and it may
not). Solve by removing the abort op type and creating a separate
downcall from the transport to the lldd to request an io to be aborted.
The transport will abort ios on queue teardown or io errors. In general
the transport tries to call the lldd abort only when the io state is
idle. Meaning: ops that transmit data (readdata or rsp) will always
finish their transmit (or the lldd will see a state on the
link or initiator port that fails the transmit) and the done call for
the operation will occur. The transport will wait for the op done
upcall before calling the abort function, and as the io is idle, the
io can be cleaned up immediately after the abort call; Similarly, ios
that are not waiting for data or transmitting data must be in the nvmet
layer being processed. The transport will wait for the nvmet layer
completion before calling the abort function, and as the io is idle,
the io can be cleaned up immediately after the abort call; As for ops
that are waiting for data (writedata), they may be outstanding
indefinitely if the lldd doesn't see a condition where the initiatior
port or link is bad. In those cases, the transport will call the abort
function and wait for the lldd's op done upcall for the operation, where
it will then clean up the io.
Additionally, if a lldd receives an ABTS and matches it to an outstanding
request in the transport, A new new transport upcall was created to abort
the outstanding request in the transport. The transport expects any
outstanding op call (readdata or writedata) will completed by the lldd and
the operation upcall made. The transport doesn't act on the reported
abort (e.g. clean up the io) until an op done upcall occurs, a new op is
attempted, or the nvmet layer completes the io processing.
fcloop:
----------------------
Updated to support the new target apis.
On fcp io aborts from the initiator, the loopback context is updated to
NULL out the half that has completed. The initiator side is immediately
called after the abort request with an io completion (abort status).
On fcp io aborts from the target, the io is stopped and the initiator side
sees it as an aborted io. Target side ops, perhaps in progress while the
initiator side is done, continue but noop the data movement as there's no
structure on the initiator side to reference.
patch also contains:
----------------------
Revised lpfc to support the new abort api
commonized rsp buffer syncing and nulling of private data based on
calling paths.
errors in op done calls don't take action on the fod. They're bad
operations which implies the fod may be bad.
Signed-off-by: James Smart <james.smart@broadcom.com>
Signed-off-by: Sagi Grimberg <sagi@grimberg.me>
2017-04-11 11:32:31 -07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* nothing more to do. If io wasn't active, the transport should
|
|
|
|
* immediately call the req_release. If it was active, the op
|
|
|
|
* will complete, and the lldd should call req_release.
|
|
|
|
*/
|
|
|
|
}
|
|
|
|
|
2017-04-11 11:32:29 -07:00
|
|
|
static void
|
|
|
|
fcloop_fcp_req_release(struct nvmet_fc_target_port *tgtport,
|
|
|
|
struct nvmefc_tgt_fcp_req *tgt_fcpreq)
|
|
|
|
{
|
|
|
|
struct fcloop_fcpreq *tfcp_req = tgt_fcp_req_to_fcpreq(tgt_fcpreq);
|
|
|
|
|
2022-03-21 13:57:27 +02:00
|
|
|
queue_work(nvmet_wq, &tfcp_req->tio_done_work);
|
2017-04-11 11:32:29 -07:00
|
|
|
}
|
|
|
|
|
2016-12-02 00:28:44 -08:00
|
|
|
static void
|
2020-03-31 09:50:00 -07:00
|
|
|
fcloop_h2t_ls_abort(struct nvme_fc_local_port *localport,
|
2016-12-02 00:28:44 -08:00
|
|
|
struct nvme_fc_remote_port *remoteport,
|
|
|
|
struct nvmefc_ls_req *lsreq)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2020-03-31 09:50:01 -07:00
|
|
|
static void
|
|
|
|
fcloop_t2h_ls_abort(struct nvmet_fc_target_port *targetport,
|
|
|
|
void *hosthandle, struct nvmefc_ls_req *lsreq)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2016-12-02 00:28:44 -08:00
|
|
|
static void
|
|
|
|
fcloop_fcp_abort(struct nvme_fc_local_port *localport,
|
|
|
|
struct nvme_fc_remote_port *remoteport,
|
|
|
|
void *hw_queue_handle,
|
|
|
|
struct nvmefc_fcp_req *fcpreq)
|
|
|
|
{
|
nvmet_fc: Rework target side abort handling
target transport:
----------------------
There are cases when there is a need to abort in-progress target
operations (writedata) so that controller termination or errors can
clean up. That can't happen currently as the abort is another target
op type, so it can't be used till the running one finishes (and it may
not). Solve by removing the abort op type and creating a separate
downcall from the transport to the lldd to request an io to be aborted.
The transport will abort ios on queue teardown or io errors. In general
the transport tries to call the lldd abort only when the io state is
idle. Meaning: ops that transmit data (readdata or rsp) will always
finish their transmit (or the lldd will see a state on the
link or initiator port that fails the transmit) and the done call for
the operation will occur. The transport will wait for the op done
upcall before calling the abort function, and as the io is idle, the
io can be cleaned up immediately after the abort call; Similarly, ios
that are not waiting for data or transmitting data must be in the nvmet
layer being processed. The transport will wait for the nvmet layer
completion before calling the abort function, and as the io is idle,
the io can be cleaned up immediately after the abort call; As for ops
that are waiting for data (writedata), they may be outstanding
indefinitely if the lldd doesn't see a condition where the initiatior
port or link is bad. In those cases, the transport will call the abort
function and wait for the lldd's op done upcall for the operation, where
it will then clean up the io.
Additionally, if a lldd receives an ABTS and matches it to an outstanding
request in the transport, A new new transport upcall was created to abort
the outstanding request in the transport. The transport expects any
outstanding op call (readdata or writedata) will completed by the lldd and
the operation upcall made. The transport doesn't act on the reported
abort (e.g. clean up the io) until an op done upcall occurs, a new op is
attempted, or the nvmet layer completes the io processing.
fcloop:
----------------------
Updated to support the new target apis.
On fcp io aborts from the initiator, the loopback context is updated to
NULL out the half that has completed. The initiator side is immediately
called after the abort request with an io completion (abort status).
On fcp io aborts from the target, the io is stopped and the initiator side
sees it as an aborted io. Target side ops, perhaps in progress while the
initiator side is done, continue but noop the data movement as there's no
structure on the initiator side to reference.
patch also contains:
----------------------
Revised lpfc to support the new abort api
commonized rsp buffer syncing and nulling of private data based on
calling paths.
errors in op done calls don't take action on the fod. They're bad
operations which implies the fod may be bad.
Signed-off-by: James Smart <james.smart@broadcom.com>
Signed-off-by: Sagi Grimberg <sagi@grimberg.me>
2017-04-11 11:32:31 -07:00
|
|
|
struct fcloop_ini_fcpreq *inireq = fcpreq->private;
|
2017-11-29 16:47:33 -08:00
|
|
|
struct fcloop_fcpreq *tfcp_req;
|
|
|
|
bool abortio = true;
|
2023-04-12 16:49:04 +08:00
|
|
|
unsigned long flags;
|
2017-11-29 16:47:33 -08:00
|
|
|
|
|
|
|
spin_lock(&inireq->inilock);
|
|
|
|
tfcp_req = inireq->tfcp_req;
|
2025-05-07 14:23:01 +02:00
|
|
|
if (tfcp_req) {
|
|
|
|
if (!fcloop_tfcp_req_get(tfcp_req))
|
|
|
|
tfcp_req = NULL;
|
|
|
|
}
|
2017-11-29 16:47:33 -08:00
|
|
|
spin_unlock(&inireq->inilock);
|
nvmet_fc: Rework target side abort handling
target transport:
----------------------
There are cases when there is a need to abort in-progress target
operations (writedata) so that controller termination or errors can
clean up. That can't happen currently as the abort is another target
op type, so it can't be used till the running one finishes (and it may
not). Solve by removing the abort op type and creating a separate
downcall from the transport to the lldd to request an io to be aborted.
The transport will abort ios on queue teardown or io errors. In general
the transport tries to call the lldd abort only when the io state is
idle. Meaning: ops that transmit data (readdata or rsp) will always
finish their transmit (or the lldd will see a state on the
link or initiator port that fails the transmit) and the done call for
the operation will occur. The transport will wait for the op done
upcall before calling the abort function, and as the io is idle, the
io can be cleaned up immediately after the abort call; Similarly, ios
that are not waiting for data or transmitting data must be in the nvmet
layer being processed. The transport will wait for the nvmet layer
completion before calling the abort function, and as the io is idle,
the io can be cleaned up immediately after the abort call; As for ops
that are waiting for data (writedata), they may be outstanding
indefinitely if the lldd doesn't see a condition where the initiatior
port or link is bad. In those cases, the transport will call the abort
function and wait for the lldd's op done upcall for the operation, where
it will then clean up the io.
Additionally, if a lldd receives an ABTS and matches it to an outstanding
request in the transport, A new new transport upcall was created to abort
the outstanding request in the transport. The transport expects any
outstanding op call (readdata or writedata) will completed by the lldd and
the operation upcall made. The transport doesn't act on the reported
abort (e.g. clean up the io) until an op done upcall occurs, a new op is
attempted, or the nvmet layer completes the io processing.
fcloop:
----------------------
Updated to support the new target apis.
On fcp io aborts from the initiator, the loopback context is updated to
NULL out the half that has completed. The initiator side is immediately
called after the abort request with an io completion (abort status).
On fcp io aborts from the target, the io is stopped and the initiator side
sees it as an aborted io. Target side ops, perhaps in progress while the
initiator side is done, continue but noop the data movement as there's no
structure on the initiator side to reference.
patch also contains:
----------------------
Revised lpfc to support the new abort api
commonized rsp buffer syncing and nulling of private data based on
calling paths.
errors in op done calls don't take action on the fod. They're bad
operations which implies the fod may be bad.
Signed-off-by: James Smart <james.smart@broadcom.com>
Signed-off-by: Sagi Grimberg <sagi@grimberg.me>
2017-04-11 11:32:31 -07:00
|
|
|
|
2025-05-07 14:23:02 +02:00
|
|
|
if (!tfcp_req) {
|
nvmet_fc: Rework target side abort handling
target transport:
----------------------
There are cases when there is a need to abort in-progress target
operations (writedata) so that controller termination or errors can
clean up. That can't happen currently as the abort is another target
op type, so it can't be used till the running one finishes (and it may
not). Solve by removing the abort op type and creating a separate
downcall from the transport to the lldd to request an io to be aborted.
The transport will abort ios on queue teardown or io errors. In general
the transport tries to call the lldd abort only when the io state is
idle. Meaning: ops that transmit data (readdata or rsp) will always
finish their transmit (or the lldd will see a state on the
link or initiator port that fails the transmit) and the done call for
the operation will occur. The transport will wait for the op done
upcall before calling the abort function, and as the io is idle, the
io can be cleaned up immediately after the abort call; Similarly, ios
that are not waiting for data or transmitting data must be in the nvmet
layer being processed. The transport will wait for the nvmet layer
completion before calling the abort function, and as the io is idle,
the io can be cleaned up immediately after the abort call; As for ops
that are waiting for data (writedata), they may be outstanding
indefinitely if the lldd doesn't see a condition where the initiatior
port or link is bad. In those cases, the transport will call the abort
function and wait for the lldd's op done upcall for the operation, where
it will then clean up the io.
Additionally, if a lldd receives an ABTS and matches it to an outstanding
request in the transport, A new new transport upcall was created to abort
the outstanding request in the transport. The transport expects any
outstanding op call (readdata or writedata) will completed by the lldd and
the operation upcall made. The transport doesn't act on the reported
abort (e.g. clean up the io) until an op done upcall occurs, a new op is
attempted, or the nvmet layer completes the io processing.
fcloop:
----------------------
Updated to support the new target apis.
On fcp io aborts from the initiator, the loopback context is updated to
NULL out the half that has completed. The initiator side is immediately
called after the abort request with an io completion (abort status).
On fcp io aborts from the target, the io is stopped and the initiator side
sees it as an aborted io. Target side ops, perhaps in progress while the
initiator side is done, continue but noop the data movement as there's no
structure on the initiator side to reference.
patch also contains:
----------------------
Revised lpfc to support the new abort api
commonized rsp buffer syncing and nulling of private data based on
calling paths.
errors in op done calls don't take action on the fod. They're bad
operations which implies the fod may be bad.
Signed-off-by: James Smart <james.smart@broadcom.com>
Signed-off-by: Sagi Grimberg <sagi@grimberg.me>
2017-04-11 11:32:31 -07:00
|
|
|
/* abort has already been called */
|
2025-05-07 14:23:02 +02:00
|
|
|
goto out_host_done;
|
|
|
|
}
|
nvmet_fc: Rework target side abort handling
target transport:
----------------------
There are cases when there is a need to abort in-progress target
operations (writedata) so that controller termination or errors can
clean up. That can't happen currently as the abort is another target
op type, so it can't be used till the running one finishes (and it may
not). Solve by removing the abort op type and creating a separate
downcall from the transport to the lldd to request an io to be aborted.
The transport will abort ios on queue teardown or io errors. In general
the transport tries to call the lldd abort only when the io state is
idle. Meaning: ops that transmit data (readdata or rsp) will always
finish their transmit (or the lldd will see a state on the
link or initiator port that fails the transmit) and the done call for
the operation will occur. The transport will wait for the op done
upcall before calling the abort function, and as the io is idle, the
io can be cleaned up immediately after the abort call; Similarly, ios
that are not waiting for data or transmitting data must be in the nvmet
layer being processed. The transport will wait for the nvmet layer
completion before calling the abort function, and as the io is idle,
the io can be cleaned up immediately after the abort call; As for ops
that are waiting for data (writedata), they may be outstanding
indefinitely if the lldd doesn't see a condition where the initiatior
port or link is bad. In those cases, the transport will call the abort
function and wait for the lldd's op done upcall for the operation, where
it will then clean up the io.
Additionally, if a lldd receives an ABTS and matches it to an outstanding
request in the transport, A new new transport upcall was created to abort
the outstanding request in the transport. The transport expects any
outstanding op call (readdata or writedata) will completed by the lldd and
the operation upcall made. The transport doesn't act on the reported
abort (e.g. clean up the io) until an op done upcall occurs, a new op is
attempted, or the nvmet layer completes the io processing.
fcloop:
----------------------
Updated to support the new target apis.
On fcp io aborts from the initiator, the loopback context is updated to
NULL out the half that has completed. The initiator side is immediately
called after the abort request with an io completion (abort status).
On fcp io aborts from the target, the io is stopped and the initiator side
sees it as an aborted io. Target side ops, perhaps in progress while the
initiator side is done, continue but noop the data movement as there's no
structure on the initiator side to reference.
patch also contains:
----------------------
Revised lpfc to support the new abort api
commonized rsp buffer syncing and nulling of private data based on
calling paths.
errors in op done calls don't take action on the fod. They're bad
operations which implies the fod may be bad.
Signed-off-by: James Smart <james.smart@broadcom.com>
Signed-off-by: Sagi Grimberg <sagi@grimberg.me>
2017-04-11 11:32:31 -07:00
|
|
|
|
|
|
|
/* break initiator/target relationship for io */
|
2023-04-12 16:49:04 +08:00
|
|
|
spin_lock_irqsave(&tfcp_req->reqlock, flags);
|
2017-11-29 16:47:33 -08:00
|
|
|
switch (tfcp_req->inistate) {
|
|
|
|
case INI_IO_START:
|
|
|
|
case INI_IO_ACTIVE:
|
|
|
|
tfcp_req->inistate = INI_IO_ABORTED;
|
|
|
|
break;
|
|
|
|
case INI_IO_COMPLETED:
|
|
|
|
abortio = false;
|
|
|
|
break;
|
|
|
|
default:
|
2023-04-12 16:49:04 +08:00
|
|
|
spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
|
2017-11-29 16:47:33 -08:00
|
|
|
WARN_ON(1);
|
2025-05-07 14:23:02 +02:00
|
|
|
goto out_host_done;
|
2017-11-29 16:47:33 -08:00
|
|
|
}
|
2023-04-12 16:49:04 +08:00
|
|
|
spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
|
nvmet_fc: Rework target side abort handling
target transport:
----------------------
There are cases when there is a need to abort in-progress target
operations (writedata) so that controller termination or errors can
clean up. That can't happen currently as the abort is another target
op type, so it can't be used till the running one finishes (and it may
not). Solve by removing the abort op type and creating a separate
downcall from the transport to the lldd to request an io to be aborted.
The transport will abort ios on queue teardown or io errors. In general
the transport tries to call the lldd abort only when the io state is
idle. Meaning: ops that transmit data (readdata or rsp) will always
finish their transmit (or the lldd will see a state on the
link or initiator port that fails the transmit) and the done call for
the operation will occur. The transport will wait for the op done
upcall before calling the abort function, and as the io is idle, the
io can be cleaned up immediately after the abort call; Similarly, ios
that are not waiting for data or transmitting data must be in the nvmet
layer being processed. The transport will wait for the nvmet layer
completion before calling the abort function, and as the io is idle,
the io can be cleaned up immediately after the abort call; As for ops
that are waiting for data (writedata), they may be outstanding
indefinitely if the lldd doesn't see a condition where the initiatior
port or link is bad. In those cases, the transport will call the abort
function and wait for the lldd's op done upcall for the operation, where
it will then clean up the io.
Additionally, if a lldd receives an ABTS and matches it to an outstanding
request in the transport, A new new transport upcall was created to abort
the outstanding request in the transport. The transport expects any
outstanding op call (readdata or writedata) will completed by the lldd and
the operation upcall made. The transport doesn't act on the reported
abort (e.g. clean up the io) until an op done upcall occurs, a new op is
attempted, or the nvmet layer completes the io processing.
fcloop:
----------------------
Updated to support the new target apis.
On fcp io aborts from the initiator, the loopback context is updated to
NULL out the half that has completed. The initiator side is immediately
called after the abort request with an io completion (abort status).
On fcp io aborts from the target, the io is stopped and the initiator side
sees it as an aborted io. Target side ops, perhaps in progress while the
initiator side is done, continue but noop the data movement as there's no
structure on the initiator side to reference.
patch also contains:
----------------------
Revised lpfc to support the new abort api
commonized rsp buffer syncing and nulling of private data based on
calling paths.
errors in op done calls don't take action on the fod. They're bad
operations which implies the fod may be bad.
Signed-off-by: James Smart <james.smart@broadcom.com>
Signed-off-by: Sagi Grimberg <sagi@grimberg.me>
2017-04-11 11:32:31 -07:00
|
|
|
|
2017-11-29 16:47:33 -08:00
|
|
|
if (abortio)
|
|
|
|
/* leave the reference while the work item is scheduled */
|
2022-03-21 13:57:27 +02:00
|
|
|
WARN_ON(!queue_work(nvmet_wq, &tfcp_req->abort_rcv_work));
|
2017-11-29 16:47:33 -08:00
|
|
|
else {
|
|
|
|
/*
|
|
|
|
* as the io has already had the done callback made,
|
|
|
|
* nothing more to do. So release the reference taken above
|
|
|
|
*/
|
|
|
|
fcloop_tfcp_req_put(tfcp_req);
|
|
|
|
}
|
2025-05-07 14:23:02 +02:00
|
|
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
out_host_done:
|
|
|
|
fcloop_call_host_done(fcpreq, tfcp_req, -ECANCELED);
|
2016-12-02 00:28:44 -08:00
|
|
|
}
|
|
|
|
|
2025-04-08 17:29:05 +02:00
|
|
|
static void
|
|
|
|
fcloop_lport_put(struct fcloop_lport *lport)
|
|
|
|
{
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
if (!refcount_dec_and_test(&lport->ref))
|
|
|
|
return;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&fcloop_lock, flags);
|
|
|
|
list_del(&lport->lport_list);
|
|
|
|
spin_unlock_irqrestore(&fcloop_lock, flags);
|
|
|
|
|
|
|
|
kfree(lport);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
fcloop_lport_get(struct fcloop_lport *lport)
|
|
|
|
{
|
|
|
|
return refcount_inc_not_zero(&lport->ref);
|
|
|
|
}
|
|
|
|
|
2017-09-19 14:01:50 -07:00
|
|
|
static void
|
2025-04-08 17:29:04 +02:00
|
|
|
fcloop_nport_put(struct fcloop_nport *nport)
|
2017-09-19 14:01:50 -07:00
|
|
|
{
|
2025-05-07 14:22:58 +02:00
|
|
|
unsigned long flags;
|
|
|
|
|
2025-04-08 17:29:04 +02:00
|
|
|
if (!refcount_dec_and_test(&nport->ref))
|
|
|
|
return;
|
2017-09-19 14:01:50 -07:00
|
|
|
|
2025-05-07 14:22:58 +02:00
|
|
|
spin_lock_irqsave(&fcloop_lock, flags);
|
|
|
|
list_del(&nport->nport_list);
|
|
|
|
spin_unlock_irqrestore(&fcloop_lock, flags);
|
|
|
|
|
2025-05-07 14:22:59 +02:00
|
|
|
if (nport->lport)
|
|
|
|
fcloop_lport_put(nport->lport);
|
|
|
|
|
2017-09-19 14:01:50 -07:00
|
|
|
kfree(nport);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
fcloop_nport_get(struct fcloop_nport *nport)
|
|
|
|
{
|
2025-04-08 17:29:04 +02:00
|
|
|
return refcount_inc_not_zero(&nport->ref);
|
2017-09-19 14:01:50 -07:00
|
|
|
}
|
|
|
|
|
2016-12-02 00:28:44 -08:00
|
|
|
static void
|
|
|
|
fcloop_localport_delete(struct nvme_fc_local_port *localport)
|
|
|
|
{
|
2017-11-29 16:47:31 -08:00
|
|
|
struct fcloop_lport_priv *lport_priv = localport->private;
|
|
|
|
struct fcloop_lport *lport = lport_priv->lport;
|
2016-12-02 00:28:44 -08:00
|
|
|
|
2025-04-08 17:29:05 +02:00
|
|
|
fcloop_lport_put(lport);
|
2016-12-02 00:28:44 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
fcloop_remoteport_delete(struct nvme_fc_remote_port *remoteport)
|
|
|
|
{
|
|
|
|
struct fcloop_rport *rport = remoteport->private;
|
2025-05-07 14:23:04 +02:00
|
|
|
bool put_port = false;
|
2025-05-07 14:22:57 +02:00
|
|
|
unsigned long flags;
|
2016-12-02 00:28:44 -08:00
|
|
|
|
2020-03-18 14:41:12 -07:00
|
|
|
flush_work(&rport->ls_work);
|
2025-05-07 14:22:57 +02:00
|
|
|
|
|
|
|
spin_lock_irqsave(&fcloop_lock, flags);
|
2025-05-07 14:23:04 +02:00
|
|
|
if (!test_and_set_bit(PORT_DELETED, &rport->flags))
|
|
|
|
put_port = true;
|
2025-05-07 14:22:57 +02:00
|
|
|
rport->nport->rport = NULL;
|
|
|
|
spin_unlock_irqrestore(&fcloop_lock, flags);
|
|
|
|
|
2025-05-07 14:23:04 +02:00
|
|
|
if (put_port)
|
|
|
|
fcloop_nport_put(rport->nport);
|
2016-12-02 00:28:44 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
fcloop_targetport_delete(struct nvmet_fc_target_port *targetport)
|
|
|
|
{
|
|
|
|
struct fcloop_tport *tport = targetport->private;
|
2025-05-07 14:23:04 +02:00
|
|
|
bool put_port = false;
|
2025-05-07 14:22:57 +02:00
|
|
|
unsigned long flags;
|
2016-12-02 00:28:44 -08:00
|
|
|
|
2020-03-31 09:50:01 -07:00
|
|
|
flush_work(&tport->ls_work);
|
2025-05-07 14:22:57 +02:00
|
|
|
|
|
|
|
spin_lock_irqsave(&fcloop_lock, flags);
|
2025-05-07 14:23:04 +02:00
|
|
|
if (!test_and_set_bit(PORT_DELETED, &tport->flags))
|
|
|
|
put_port = true;
|
2025-05-07 14:22:57 +02:00
|
|
|
tport->nport->tport = NULL;
|
|
|
|
spin_unlock_irqrestore(&fcloop_lock, flags);
|
|
|
|
|
2025-05-07 14:23:04 +02:00
|
|
|
if (put_port)
|
|
|
|
fcloop_nport_put(tport->nport);
|
2016-12-02 00:28:44 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
#define FCLOOP_HW_QUEUES 4
|
|
|
|
#define FCLOOP_SGL_SEGS 256
|
|
|
|
#define FCLOOP_DMABOUND_4G 0xFFFFFFFF
|
|
|
|
|
2017-04-21 10:37:25 +02:00
|
|
|
static struct nvme_fc_port_template fctemplate = {
|
2016-12-02 00:28:44 -08:00
|
|
|
.localport_delete = fcloop_localport_delete,
|
|
|
|
.remoteport_delete = fcloop_remoteport_delete,
|
|
|
|
.create_queue = fcloop_create_queue,
|
|
|
|
.delete_queue = fcloop_delete_queue,
|
2020-03-31 09:50:00 -07:00
|
|
|
.ls_req = fcloop_h2t_ls_req,
|
2016-12-02 00:28:44 -08:00
|
|
|
.fcp_io = fcloop_fcp_req,
|
2020-03-31 09:50:00 -07:00
|
|
|
.ls_abort = fcloop_h2t_ls_abort,
|
2016-12-02 00:28:44 -08:00
|
|
|
.fcp_abort = fcloop_fcp_abort,
|
2020-03-31 09:50:01 -07:00
|
|
|
.xmt_ls_rsp = fcloop_t2h_xmt_ls_rsp,
|
2016-12-02 00:28:44 -08:00
|
|
|
.max_hw_queues = FCLOOP_HW_QUEUES,
|
|
|
|
.max_sgl_segments = FCLOOP_SGL_SEGS,
|
|
|
|
.max_dif_sgl_segments = FCLOOP_SGL_SEGS,
|
|
|
|
.dma_boundary = FCLOOP_DMABOUND_4G,
|
|
|
|
/* sizes of additional private data for data structures */
|
2017-11-29 16:47:31 -08:00
|
|
|
.local_priv_sz = sizeof(struct fcloop_lport_priv),
|
2016-12-02 00:28:44 -08:00
|
|
|
.remote_priv_sz = sizeof(struct fcloop_rport),
|
2017-04-11 11:32:30 -07:00
|
|
|
.fcprqst_priv_sz = sizeof(struct fcloop_ini_fcpreq),
|
2016-12-02 00:28:44 -08:00
|
|
|
};
|
|
|
|
|
2017-04-21 10:37:25 +02:00
|
|
|
static struct nvmet_fc_target_template tgttemplate = {
|
2016-12-02 00:28:44 -08:00
|
|
|
.targetport_delete = fcloop_targetport_delete,
|
2020-03-31 09:50:00 -07:00
|
|
|
.xmt_ls_rsp = fcloop_h2t_xmt_ls_rsp,
|
2016-12-02 00:28:44 -08:00
|
|
|
.fcp_op = fcloop_fcp_op,
|
nvmet_fc: Rework target side abort handling
target transport:
----------------------
There are cases when there is a need to abort in-progress target
operations (writedata) so that controller termination or errors can
clean up. That can't happen currently as the abort is another target
op type, so it can't be used till the running one finishes (and it may
not). Solve by removing the abort op type and creating a separate
downcall from the transport to the lldd to request an io to be aborted.
The transport will abort ios on queue teardown or io errors. In general
the transport tries to call the lldd abort only when the io state is
idle. Meaning: ops that transmit data (readdata or rsp) will always
finish their transmit (or the lldd will see a state on the
link or initiator port that fails the transmit) and the done call for
the operation will occur. The transport will wait for the op done
upcall before calling the abort function, and as the io is idle, the
io can be cleaned up immediately after the abort call; Similarly, ios
that are not waiting for data or transmitting data must be in the nvmet
layer being processed. The transport will wait for the nvmet layer
completion before calling the abort function, and as the io is idle,
the io can be cleaned up immediately after the abort call; As for ops
that are waiting for data (writedata), they may be outstanding
indefinitely if the lldd doesn't see a condition where the initiatior
port or link is bad. In those cases, the transport will call the abort
function and wait for the lldd's op done upcall for the operation, where
it will then clean up the io.
Additionally, if a lldd receives an ABTS and matches it to an outstanding
request in the transport, A new new transport upcall was created to abort
the outstanding request in the transport. The transport expects any
outstanding op call (readdata or writedata) will completed by the lldd and
the operation upcall made. The transport doesn't act on the reported
abort (e.g. clean up the io) until an op done upcall occurs, a new op is
attempted, or the nvmet layer completes the io processing.
fcloop:
----------------------
Updated to support the new target apis.
On fcp io aborts from the initiator, the loopback context is updated to
NULL out the half that has completed. The initiator side is immediately
called after the abort request with an io completion (abort status).
On fcp io aborts from the target, the io is stopped and the initiator side
sees it as an aborted io. Target side ops, perhaps in progress while the
initiator side is done, continue but noop the data movement as there's no
structure on the initiator side to reference.
patch also contains:
----------------------
Revised lpfc to support the new abort api
commonized rsp buffer syncing and nulling of private data based on
calling paths.
errors in op done calls don't take action on the fod. They're bad
operations which implies the fod may be bad.
Signed-off-by: James Smart <james.smart@broadcom.com>
Signed-off-by: Sagi Grimberg <sagi@grimberg.me>
2017-04-11 11:32:31 -07:00
|
|
|
.fcp_abort = fcloop_tgt_fcp_abort,
|
2017-04-11 11:32:29 -07:00
|
|
|
.fcp_req_release = fcloop_fcp_req_release,
|
2019-05-14 14:58:04 -07:00
|
|
|
.discovery_event = fcloop_tgt_discovery_evt,
|
2020-03-31 09:50:01 -07:00
|
|
|
.ls_req = fcloop_t2h_ls_req,
|
|
|
|
.ls_abort = fcloop_t2h_ls_abort,
|
|
|
|
.host_release = fcloop_t2h_host_release,
|
2024-05-27 07:15:24 +02:00
|
|
|
.host_traddr = fcloop_t2h_host_traddr,
|
2016-12-02 00:28:44 -08:00
|
|
|
.max_hw_queues = FCLOOP_HW_QUEUES,
|
|
|
|
.max_sgl_segments = FCLOOP_SGL_SEGS,
|
|
|
|
.max_dif_sgl_segments = FCLOOP_SGL_SEGS,
|
|
|
|
.dma_boundary = FCLOOP_DMABOUND_4G,
|
|
|
|
/* optional features */
|
2017-11-29 16:47:32 -08:00
|
|
|
.target_features = 0,
|
2016-12-02 00:28:44 -08:00
|
|
|
/* sizes of additional private data for data structures */
|
|
|
|
.target_priv_sz = sizeof(struct fcloop_tport),
|
|
|
|
};
|
|
|
|
|
|
|
|
static ssize_t
|
|
|
|
fcloop_create_local_port(struct device *dev, struct device_attribute *attr,
|
|
|
|
const char *buf, size_t count)
|
|
|
|
{
|
|
|
|
struct nvme_fc_port_info pinfo;
|
|
|
|
struct fcloop_ctrl_options *opts;
|
|
|
|
struct nvme_fc_local_port *localport;
|
|
|
|
struct fcloop_lport *lport;
|
2017-11-29 16:47:31 -08:00
|
|
|
struct fcloop_lport_priv *lport_priv;
|
|
|
|
unsigned long flags;
|
|
|
|
int ret = -ENOMEM;
|
|
|
|
|
|
|
|
lport = kzalloc(sizeof(*lport), GFP_KERNEL);
|
|
|
|
if (!lport)
|
|
|
|
return -ENOMEM;
|
2016-12-02 00:28:44 -08:00
|
|
|
|
|
|
|
opts = kzalloc(sizeof(*opts), GFP_KERNEL);
|
|
|
|
if (!opts)
|
2017-11-29 16:47:31 -08:00
|
|
|
goto out_free_lport;
|
2016-12-02 00:28:44 -08:00
|
|
|
|
|
|
|
ret = fcloop_parse_options(opts, buf);
|
|
|
|
if (ret)
|
|
|
|
goto out_free_opts;
|
|
|
|
|
|
|
|
/* everything there ? */
|
|
|
|
if ((opts->mask & LPORT_OPTS) != LPORT_OPTS) {
|
|
|
|
ret = -EINVAL;
|
|
|
|
goto out_free_opts;
|
|
|
|
}
|
|
|
|
|
2017-09-19 14:01:50 -07:00
|
|
|
memset(&pinfo, 0, sizeof(pinfo));
|
2016-12-02 00:28:44 -08:00
|
|
|
pinfo.node_name = opts->wwnn;
|
|
|
|
pinfo.port_name = opts->wwpn;
|
|
|
|
pinfo.port_role = opts->roles;
|
|
|
|
pinfo.port_id = opts->fcaddr;
|
|
|
|
|
|
|
|
ret = nvme_fc_register_localport(&pinfo, &fctemplate, NULL, &localport);
|
|
|
|
if (!ret) {
|
|
|
|
/* success */
|
2017-11-29 16:47:31 -08:00
|
|
|
lport_priv = localport->private;
|
|
|
|
lport_priv->lport = lport;
|
|
|
|
|
2016-12-02 00:28:44 -08:00
|
|
|
lport->localport = localport;
|
|
|
|
INIT_LIST_HEAD(&lport->lport_list);
|
2025-04-08 17:29:05 +02:00
|
|
|
refcount_set(&lport->ref, 1);
|
2016-12-02 00:28:44 -08:00
|
|
|
|
|
|
|
spin_lock_irqsave(&fcloop_lock, flags);
|
|
|
|
list_add_tail(&lport->lport_list, &fcloop_lports);
|
|
|
|
spin_unlock_irqrestore(&fcloop_lock, flags);
|
|
|
|
}
|
|
|
|
|
|
|
|
out_free_opts:
|
|
|
|
kfree(opts);
|
2017-11-29 16:47:31 -08:00
|
|
|
out_free_lport:
|
|
|
|
/* free only if we're going to fail */
|
|
|
|
if (ret)
|
|
|
|
kfree(lport);
|
|
|
|
|
2016-12-02 00:28:44 -08:00
|
|
|
return ret ? ret : count;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
2025-05-07 14:23:06 +02:00
|
|
|
__localport_unreg(struct fcloop_lport *lport)
|
2016-12-02 00:28:44 -08:00
|
|
|
{
|
2025-05-07 14:23:06 +02:00
|
|
|
return nvme_fc_unregister_localport(lport->localport);
|
2016-12-02 00:28:44 -08:00
|
|
|
}
|
|
|
|
|
2025-05-07 14:22:57 +02:00
|
|
|
static struct fcloop_nport *
|
|
|
|
__fcloop_nport_lookup(u64 node_name, u64 port_name)
|
|
|
|
{
|
|
|
|
struct fcloop_nport *nport;
|
|
|
|
|
|
|
|
list_for_each_entry(nport, &fcloop_nports, nport_list) {
|
|
|
|
if (nport->node_name != node_name ||
|
|
|
|
nport->port_name != port_name)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (fcloop_nport_get(nport))
|
|
|
|
return nport;
|
|
|
|
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct fcloop_nport *
|
|
|
|
fcloop_nport_lookup(u64 node_name, u64 port_name)
|
|
|
|
{
|
|
|
|
struct fcloop_nport *nport;
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&fcloop_lock, flags);
|
|
|
|
nport = __fcloop_nport_lookup(node_name, port_name);
|
|
|
|
spin_unlock_irqrestore(&fcloop_lock, flags);
|
|
|
|
|
|
|
|
return nport;
|
|
|
|
}
|
2016-12-02 00:28:44 -08:00
|
|
|
|
2025-05-07 14:22:59 +02:00
|
|
|
static struct fcloop_lport *
|
|
|
|
__fcloop_lport_lookup(u64 node_name, u64 port_name)
|
|
|
|
{
|
|
|
|
struct fcloop_lport *lport;
|
|
|
|
|
|
|
|
list_for_each_entry(lport, &fcloop_lports, lport_list) {
|
|
|
|
if (lport->localport->node_name != node_name ||
|
|
|
|
lport->localport->port_name != port_name)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (fcloop_lport_get(lport))
|
|
|
|
return lport;
|
|
|
|
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2025-05-07 14:23:00 +02:00
|
|
|
static struct fcloop_lport *
|
|
|
|
fcloop_lport_lookup(u64 node_name, u64 port_name)
|
|
|
|
{
|
|
|
|
struct fcloop_lport *lport;
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&fcloop_lock, flags);
|
|
|
|
lport = __fcloop_lport_lookup(node_name, port_name);
|
|
|
|
spin_unlock_irqrestore(&fcloop_lock, flags);
|
|
|
|
|
|
|
|
return lport;
|
|
|
|
}
|
|
|
|
|
2016-12-02 00:28:44 -08:00
|
|
|
static ssize_t
|
|
|
|
fcloop_delete_local_port(struct device *dev, struct device_attribute *attr,
|
|
|
|
const char *buf, size_t count)
|
|
|
|
{
|
2025-05-07 14:23:00 +02:00
|
|
|
struct fcloop_lport *lport;
|
2016-12-02 00:28:44 -08:00
|
|
|
u64 nodename, portname;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = fcloop_parse_nm_options(dev, &nodename, &portname, buf);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
2025-05-07 14:23:00 +02:00
|
|
|
lport = fcloop_lport_lookup(nodename, portname);
|
2016-12-02 00:28:44 -08:00
|
|
|
if (!lport)
|
|
|
|
return -ENOENT;
|
|
|
|
|
2025-05-07 14:23:06 +02:00
|
|
|
ret = __localport_unreg(lport);
|
2025-04-08 17:29:05 +02:00
|
|
|
fcloop_lport_put(lport);
|
2016-12-02 00:28:44 -08:00
|
|
|
|
|
|
|
return ret ? ret : count;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct fcloop_nport *
|
|
|
|
fcloop_alloc_nport(const char *buf, size_t count, bool remoteport)
|
|
|
|
{
|
2025-05-07 14:22:59 +02:00
|
|
|
struct fcloop_nport *newnport, *nport;
|
|
|
|
struct fcloop_lport *lport;
|
2016-12-02 00:28:44 -08:00
|
|
|
struct fcloop_ctrl_options *opts;
|
|
|
|
unsigned long flags;
|
|
|
|
u32 opts_mask = (remoteport) ? RPORT_OPTS : TGTPORT_OPTS;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
opts = kzalloc(sizeof(*opts), GFP_KERNEL);
|
|
|
|
if (!opts)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
ret = fcloop_parse_options(opts, buf);
|
|
|
|
if (ret)
|
|
|
|
goto out_free_opts;
|
|
|
|
|
|
|
|
/* everything there ? */
|
2025-05-07 14:22:59 +02:00
|
|
|
if ((opts->mask & opts_mask) != opts_mask)
|
2016-12-02 00:28:44 -08:00
|
|
|
goto out_free_opts;
|
|
|
|
|
|
|
|
newnport = kzalloc(sizeof(*newnport), GFP_KERNEL);
|
|
|
|
if (!newnport)
|
|
|
|
goto out_free_opts;
|
|
|
|
|
|
|
|
INIT_LIST_HEAD(&newnport->nport_list);
|
|
|
|
newnport->node_name = opts->wwnn;
|
|
|
|
newnport->port_name = opts->wwpn;
|
|
|
|
if (opts->mask & NVMF_OPT_ROLES)
|
|
|
|
newnport->port_role = opts->roles;
|
|
|
|
if (opts->mask & NVMF_OPT_FCADDR)
|
|
|
|
newnport->port_id = opts->fcaddr;
|
2025-04-08 17:29:04 +02:00
|
|
|
refcount_set(&newnport->ref, 1);
|
2016-12-02 00:28:44 -08:00
|
|
|
|
|
|
|
spin_lock_irqsave(&fcloop_lock, flags);
|
2025-05-07 14:22:59 +02:00
|
|
|
lport = __fcloop_lport_lookup(opts->wwnn, opts->wwpn);
|
|
|
|
if (lport) {
|
|
|
|
/* invalid configuration */
|
|
|
|
fcloop_lport_put(lport);
|
|
|
|
goto out_free_newnport;
|
2016-12-02 00:28:44 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (remoteport) {
|
2025-05-07 14:22:59 +02:00
|
|
|
lport = __fcloop_lport_lookup(opts->lpwwnn, opts->lpwwpn);
|
|
|
|
if (!lport) {
|
|
|
|
/* invalid configuration */
|
2016-12-02 00:28:44 -08:00
|
|
|
goto out_free_newnport;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2025-05-07 14:22:59 +02:00
|
|
|
nport = __fcloop_nport_lookup(opts->wwnn, opts->wwpn);
|
|
|
|
if (nport) {
|
|
|
|
if ((remoteport && nport->rport) ||
|
|
|
|
(!remoteport && nport->tport)) {
|
|
|
|
/* invalid configuration */
|
|
|
|
goto out_put_nport;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* found existing nport, discard the new nport */
|
|
|
|
kfree(newnport);
|
|
|
|
} else {
|
|
|
|
list_add_tail(&newnport->nport_list, &fcloop_nports);
|
|
|
|
nport = newnport;
|
|
|
|
}
|
2016-12-02 00:28:44 -08:00
|
|
|
|
2025-05-07 14:22:59 +02:00
|
|
|
if (opts->mask & NVMF_OPT_ROLES)
|
|
|
|
nport->port_role = opts->roles;
|
|
|
|
if (opts->mask & NVMF_OPT_FCADDR)
|
|
|
|
nport->port_id = opts->fcaddr;
|
|
|
|
if (lport) {
|
|
|
|
if (!nport->lport)
|
|
|
|
nport->lport = lport;
|
|
|
|
else
|
|
|
|
fcloop_lport_put(lport);
|
|
|
|
}
|
2016-12-02 00:28:44 -08:00
|
|
|
spin_unlock_irqrestore(&fcloop_lock, flags);
|
|
|
|
|
|
|
|
kfree(opts);
|
2025-05-07 14:22:59 +02:00
|
|
|
return nport;
|
2016-12-02 00:28:44 -08:00
|
|
|
|
2025-05-07 14:22:59 +02:00
|
|
|
out_put_nport:
|
|
|
|
if (lport)
|
|
|
|
fcloop_lport_put(lport);
|
|
|
|
fcloop_nport_put(nport);
|
2016-12-02 00:28:44 -08:00
|
|
|
out_free_newnport:
|
2025-05-07 14:22:59 +02:00
|
|
|
spin_unlock_irqrestore(&fcloop_lock, flags);
|
2016-12-02 00:28:44 -08:00
|
|
|
kfree(newnport);
|
|
|
|
out_free_opts:
|
|
|
|
kfree(opts);
|
2025-05-07 14:22:59 +02:00
|
|
|
return NULL;
|
2016-12-02 00:28:44 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
static ssize_t
|
|
|
|
fcloop_create_remote_port(struct device *dev, struct device_attribute *attr,
|
|
|
|
const char *buf, size_t count)
|
|
|
|
{
|
|
|
|
struct nvme_fc_remote_port *remoteport;
|
|
|
|
struct fcloop_nport *nport;
|
|
|
|
struct fcloop_rport *rport;
|
|
|
|
struct nvme_fc_port_info pinfo;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
nport = fcloop_alloc_nport(buf, count, true);
|
|
|
|
if (!nport)
|
|
|
|
return -EIO;
|
|
|
|
|
2017-09-19 14:01:50 -07:00
|
|
|
memset(&pinfo, 0, sizeof(pinfo));
|
2016-12-02 00:28:44 -08:00
|
|
|
pinfo.node_name = nport->node_name;
|
|
|
|
pinfo.port_name = nport->port_name;
|
|
|
|
pinfo.port_role = nport->port_role;
|
|
|
|
pinfo.port_id = nport->port_id;
|
|
|
|
|
|
|
|
ret = nvme_fc_register_remoteport(nport->lport->localport,
|
|
|
|
&pinfo, &remoteport);
|
|
|
|
if (ret || !remoteport) {
|
|
|
|
fcloop_nport_put(nport);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* success */
|
|
|
|
rport = remoteport->private;
|
|
|
|
rport->remoteport = remoteport;
|
|
|
|
rport->targetport = (nport->tport) ? nport->tport->targetport : NULL;
|
|
|
|
if (nport->tport) {
|
|
|
|
nport->tport->remoteport = remoteport;
|
|
|
|
nport->tport->lport = nport->lport;
|
|
|
|
}
|
|
|
|
rport->nport = nport;
|
|
|
|
rport->lport = nport->lport;
|
|
|
|
nport->rport = rport;
|
2025-05-07 14:23:04 +02:00
|
|
|
rport->flags = 0;
|
2020-03-18 14:41:12 -07:00
|
|
|
spin_lock_init(&rport->lock);
|
|
|
|
INIT_WORK(&rport->ls_work, fcloop_rport_lsrqst_work);
|
|
|
|
INIT_LIST_HEAD(&rport->ls_list);
|
2016-12-02 00:28:44 -08:00
|
|
|
|
2016-12-09 14:59:47 +00:00
|
|
|
return count;
|
2016-12-02 00:28:44 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static struct fcloop_rport *
|
|
|
|
__unlink_remote_port(struct fcloop_nport *nport)
|
|
|
|
{
|
|
|
|
struct fcloop_rport *rport = nport->rport;
|
|
|
|
|
2025-05-07 14:22:57 +02:00
|
|
|
lockdep_assert_held(&fcloop_lock);
|
|
|
|
|
2016-12-02 00:28:44 -08:00
|
|
|
if (rport && nport->tport)
|
|
|
|
nport->tport->remoteport = NULL;
|
|
|
|
nport->rport = NULL;
|
|
|
|
|
|
|
|
return rport;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
2017-09-19 14:01:50 -07:00
|
|
|
__remoteport_unreg(struct fcloop_nport *nport, struct fcloop_rport *rport)
|
2016-12-02 00:28:44 -08:00
|
|
|
{
|
2017-09-19 14:01:50 -07:00
|
|
|
return nvme_fc_unregister_remoteport(rport->remoteport);
|
2016-12-02 00:28:44 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
static ssize_t
|
|
|
|
fcloop_delete_remote_port(struct device *dev, struct device_attribute *attr,
|
|
|
|
const char *buf, size_t count)
|
|
|
|
{
|
2025-05-07 14:22:57 +02:00
|
|
|
struct fcloop_nport *nport;
|
|
|
|
struct fcloop_rport *rport;
|
2016-12-02 00:28:44 -08:00
|
|
|
u64 nodename, portname;
|
|
|
|
unsigned long flags;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = fcloop_parse_nm_options(dev, &nodename, &portname, buf);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
2025-05-07 14:22:57 +02:00
|
|
|
nport = fcloop_nport_lookup(nodename, portname);
|
|
|
|
if (!nport)
|
|
|
|
return -ENOENT;
|
2016-12-02 00:28:44 -08:00
|
|
|
|
2025-05-07 14:22:57 +02:00
|
|
|
spin_lock_irqsave(&fcloop_lock, flags);
|
|
|
|
rport = __unlink_remote_port(nport);
|
2016-12-02 00:28:44 -08:00
|
|
|
spin_unlock_irqrestore(&fcloop_lock, flags);
|
|
|
|
|
2025-05-07 14:22:57 +02:00
|
|
|
if (!rport) {
|
|
|
|
ret = -ENOENT;
|
|
|
|
goto out_nport_put;
|
|
|
|
}
|
2016-12-02 00:28:44 -08:00
|
|
|
|
2017-09-19 14:01:50 -07:00
|
|
|
ret = __remoteport_unreg(nport, rport);
|
2016-12-02 00:28:44 -08:00
|
|
|
|
2025-05-07 14:22:57 +02:00
|
|
|
out_nport_put:
|
|
|
|
fcloop_nport_put(nport);
|
|
|
|
|
2016-12-02 00:28:44 -08:00
|
|
|
return ret ? ret : count;
|
|
|
|
}
|
|
|
|
|
|
|
|
static ssize_t
|
|
|
|
fcloop_create_target_port(struct device *dev, struct device_attribute *attr,
|
|
|
|
const char *buf, size_t count)
|
|
|
|
{
|
|
|
|
struct nvmet_fc_target_port *targetport;
|
|
|
|
struct fcloop_nport *nport;
|
|
|
|
struct fcloop_tport *tport;
|
|
|
|
struct nvmet_fc_port_info tinfo;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
nport = fcloop_alloc_nport(buf, count, false);
|
|
|
|
if (!nport)
|
|
|
|
return -EIO;
|
|
|
|
|
|
|
|
tinfo.node_name = nport->node_name;
|
|
|
|
tinfo.port_name = nport->port_name;
|
|
|
|
tinfo.port_id = nport->port_id;
|
|
|
|
|
|
|
|
ret = nvmet_fc_register_targetport(&tinfo, &tgttemplate, NULL,
|
|
|
|
&targetport);
|
|
|
|
if (ret) {
|
|
|
|
fcloop_nport_put(nport);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* success */
|
|
|
|
tport = targetport->private;
|
|
|
|
tport->targetport = targetport;
|
|
|
|
tport->remoteport = (nport->rport) ? nport->rport->remoteport : NULL;
|
|
|
|
if (nport->rport)
|
|
|
|
nport->rport->targetport = targetport;
|
|
|
|
tport->nport = nport;
|
|
|
|
tport->lport = nport->lport;
|
|
|
|
nport->tport = tport;
|
2025-05-07 14:23:04 +02:00
|
|
|
tport->flags = 0;
|
2020-03-31 09:50:01 -07:00
|
|
|
spin_lock_init(&tport->lock);
|
|
|
|
INIT_WORK(&tport->ls_work, fcloop_tport_lsrqst_work);
|
|
|
|
INIT_LIST_HEAD(&tport->ls_list);
|
2016-12-02 00:28:44 -08:00
|
|
|
|
2016-12-09 14:59:47 +00:00
|
|
|
return count;
|
2016-12-02 00:28:44 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static struct fcloop_tport *
|
|
|
|
__unlink_target_port(struct fcloop_nport *nport)
|
|
|
|
{
|
|
|
|
struct fcloop_tport *tport = nport->tport;
|
|
|
|
|
2025-05-07 14:22:57 +02:00
|
|
|
lockdep_assert_held(&fcloop_lock);
|
|
|
|
|
2016-12-02 00:28:44 -08:00
|
|
|
if (tport && nport->rport)
|
|
|
|
nport->rport->targetport = NULL;
|
|
|
|
nport->tport = NULL;
|
|
|
|
|
|
|
|
return tport;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
2017-09-19 14:01:50 -07:00
|
|
|
__targetport_unreg(struct fcloop_nport *nport, struct fcloop_tport *tport)
|
2016-12-02 00:28:44 -08:00
|
|
|
{
|
2017-09-19 14:01:50 -07:00
|
|
|
return nvmet_fc_unregister_targetport(tport->targetport);
|
2016-12-02 00:28:44 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
static ssize_t
|
|
|
|
fcloop_delete_target_port(struct device *dev, struct device_attribute *attr,
|
|
|
|
const char *buf, size_t count)
|
|
|
|
{
|
2025-05-07 14:22:57 +02:00
|
|
|
struct fcloop_nport *nport;
|
|
|
|
struct fcloop_tport *tport;
|
2016-12-02 00:28:44 -08:00
|
|
|
u64 nodename, portname;
|
|
|
|
unsigned long flags;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = fcloop_parse_nm_options(dev, &nodename, &portname, buf);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
2025-05-07 14:22:57 +02:00
|
|
|
nport = fcloop_nport_lookup(nodename, portname);
|
|
|
|
if (!nport)
|
|
|
|
return -ENOENT;
|
2016-12-02 00:28:44 -08:00
|
|
|
|
2025-05-07 14:22:57 +02:00
|
|
|
spin_lock_irqsave(&fcloop_lock, flags);
|
|
|
|
tport = __unlink_target_port(nport);
|
2016-12-02 00:28:44 -08:00
|
|
|
spin_unlock_irqrestore(&fcloop_lock, flags);
|
|
|
|
|
2025-05-07 14:22:57 +02:00
|
|
|
if (!tport) {
|
|
|
|
ret = -ENOENT;
|
|
|
|
goto out_nport_put;
|
|
|
|
}
|
2016-12-02 00:28:44 -08:00
|
|
|
|
2017-09-19 14:01:50 -07:00
|
|
|
ret = __targetport_unreg(nport, tport);
|
2016-12-02 00:28:44 -08:00
|
|
|
|
2025-05-07 14:22:57 +02:00
|
|
|
out_nport_put:
|
|
|
|
fcloop_nport_put(nport);
|
|
|
|
|
2016-12-02 00:28:44 -08:00
|
|
|
return ret ? ret : count;
|
|
|
|
}
|
|
|
|
|
2020-10-16 14:28:38 -07:00
|
|
|
static ssize_t
|
|
|
|
fcloop_set_cmd_drop(struct device *dev, struct device_attribute *attr,
|
|
|
|
const char *buf, size_t count)
|
|
|
|
{
|
2020-12-07 12:29:40 -08:00
|
|
|
unsigned int opcode;
|
|
|
|
int starting, amount;
|
2020-10-16 14:28:38 -07:00
|
|
|
|
|
|
|
if (sscanf(buf, "%x:%d:%d", &opcode, &starting, &amount) != 3)
|
|
|
|
return -EBADRQC;
|
|
|
|
|
|
|
|
drop_current_cnt = 0;
|
|
|
|
drop_fabric_opcode = (opcode & ~DROP_OPCODE_MASK) ? true : false;
|
|
|
|
drop_opcode = (opcode & DROP_OPCODE_MASK);
|
|
|
|
drop_instance = starting;
|
|
|
|
/* the check to drop routine uses instance + count to know when
|
|
|
|
* to end. Thus, if dropping 1 instance, count should be 0.
|
|
|
|
* so subtract 1 from the count.
|
|
|
|
*/
|
|
|
|
drop_amount = amount - 1;
|
|
|
|
|
|
|
|
pr_info("%s: DROP: Starting at instance %d of%s opcode x%x drop +%d "
|
|
|
|
"instances\n",
|
|
|
|
__func__, drop_instance, drop_fabric_opcode ? " fabric" : "",
|
|
|
|
drop_opcode, drop_amount);
|
|
|
|
|
|
|
|
return count;
|
|
|
|
}
|
|
|
|
|
2016-12-02 00:28:44 -08:00
|
|
|
|
|
|
|
static DEVICE_ATTR(add_local_port, 0200, NULL, fcloop_create_local_port);
|
|
|
|
static DEVICE_ATTR(del_local_port, 0200, NULL, fcloop_delete_local_port);
|
|
|
|
static DEVICE_ATTR(add_remote_port, 0200, NULL, fcloop_create_remote_port);
|
|
|
|
static DEVICE_ATTR(del_remote_port, 0200, NULL, fcloop_delete_remote_port);
|
|
|
|
static DEVICE_ATTR(add_target_port, 0200, NULL, fcloop_create_target_port);
|
|
|
|
static DEVICE_ATTR(del_target_port, 0200, NULL, fcloop_delete_target_port);
|
2020-10-16 14:28:38 -07:00
|
|
|
static DEVICE_ATTR(set_cmd_drop, 0200, NULL, fcloop_set_cmd_drop);
|
2016-12-02 00:28:44 -08:00
|
|
|
|
|
|
|
static struct attribute *fcloop_dev_attrs[] = {
|
|
|
|
&dev_attr_add_local_port.attr,
|
|
|
|
&dev_attr_del_local_port.attr,
|
|
|
|
&dev_attr_add_remote_port.attr,
|
|
|
|
&dev_attr_del_remote_port.attr,
|
|
|
|
&dev_attr_add_target_port.attr,
|
|
|
|
&dev_attr_del_target_port.attr,
|
2020-10-16 14:28:38 -07:00
|
|
|
&dev_attr_set_cmd_drop.attr,
|
2016-12-02 00:28:44 -08:00
|
|
|
NULL
|
|
|
|
};
|
|
|
|
|
2021-01-09 00:41:47 +01:00
|
|
|
static const struct attribute_group fclopp_dev_attrs_group = {
|
2016-12-02 00:28:44 -08:00
|
|
|
.attrs = fcloop_dev_attrs,
|
|
|
|
};
|
|
|
|
|
|
|
|
static const struct attribute_group *fcloop_dev_attr_groups[] = {
|
|
|
|
&fclopp_dev_attrs_group,
|
|
|
|
NULL,
|
|
|
|
};
|
|
|
|
|
2024-03-05 10:15:58 -03:00
|
|
|
static const struct class fcloop_class = {
|
|
|
|
.name = "fcloop",
|
|
|
|
};
|
2016-12-02 00:28:44 -08:00
|
|
|
static struct device *fcloop_device;
|
|
|
|
|
|
|
|
static int __init fcloop_init(void)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
2025-05-07 14:23:05 +02:00
|
|
|
lsreq_cache = kmem_cache_create("lsreq_cache",
|
|
|
|
sizeof(struct fcloop_lsreq), 0,
|
|
|
|
0, NULL);
|
|
|
|
if (!lsreq_cache)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
2024-03-05 10:15:58 -03:00
|
|
|
ret = class_register(&fcloop_class);
|
|
|
|
if (ret) {
|
2016-12-02 00:28:44 -08:00
|
|
|
pr_err("couldn't register class fcloop\n");
|
2025-05-07 14:23:05 +02:00
|
|
|
goto out_destroy_cache;
|
2016-12-02 00:28:44 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
fcloop_device = device_create_with_groups(
|
2024-03-05 10:15:58 -03:00
|
|
|
&fcloop_class, NULL, MKDEV(0, 0), NULL,
|
2016-12-02 00:28:44 -08:00
|
|
|
fcloop_dev_attr_groups, "ctl");
|
|
|
|
if (IS_ERR(fcloop_device)) {
|
|
|
|
pr_err("couldn't create ctl device!\n");
|
|
|
|
ret = PTR_ERR(fcloop_device);
|
|
|
|
goto out_destroy_class;
|
|
|
|
}
|
|
|
|
|
|
|
|
get_device(fcloop_device);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
out_destroy_class:
|
2024-03-05 10:15:58 -03:00
|
|
|
class_unregister(&fcloop_class);
|
2025-05-07 14:23:05 +02:00
|
|
|
out_destroy_cache:
|
|
|
|
kmem_cache_destroy(lsreq_cache);
|
2016-12-02 00:28:44 -08:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void __exit fcloop_exit(void)
|
|
|
|
{
|
2025-05-07 14:22:57 +02:00
|
|
|
struct fcloop_lport *lport;
|
|
|
|
struct fcloop_nport *nport;
|
2016-12-02 00:28:44 -08:00
|
|
|
struct fcloop_tport *tport;
|
|
|
|
struct fcloop_rport *rport;
|
|
|
|
unsigned long flags;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&fcloop_lock, flags);
|
|
|
|
|
|
|
|
for (;;) {
|
|
|
|
nport = list_first_entry_or_null(&fcloop_nports,
|
|
|
|
typeof(*nport), nport_list);
|
2025-05-07 14:22:57 +02:00
|
|
|
if (!nport || !fcloop_nport_get(nport))
|
2016-12-02 00:28:44 -08:00
|
|
|
break;
|
|
|
|
|
|
|
|
tport = __unlink_target_port(nport);
|
|
|
|
rport = __unlink_remote_port(nport);
|
|
|
|
|
|
|
|
spin_unlock_irqrestore(&fcloop_lock, flags);
|
|
|
|
|
2025-05-07 14:22:57 +02:00
|
|
|
if (tport) {
|
|
|
|
ret = __targetport_unreg(nport, tport);
|
|
|
|
if (ret)
|
|
|
|
pr_warn("%s: Failed deleting target port\n",
|
|
|
|
__func__);
|
|
|
|
}
|
2016-12-02 00:28:44 -08:00
|
|
|
|
2025-05-07 14:22:57 +02:00
|
|
|
if (rport) {
|
|
|
|
ret = __remoteport_unreg(nport, rport);
|
|
|
|
if (ret)
|
|
|
|
pr_warn("%s: Failed deleting remote port\n",
|
|
|
|
__func__);
|
|
|
|
}
|
|
|
|
|
|
|
|
fcloop_nport_put(nport);
|
2016-12-02 00:28:44 -08:00
|
|
|
|
|
|
|
spin_lock_irqsave(&fcloop_lock, flags);
|
|
|
|
}
|
|
|
|
|
|
|
|
for (;;) {
|
|
|
|
lport = list_first_entry_or_null(&fcloop_lports,
|
|
|
|
typeof(*lport), lport_list);
|
2025-04-08 17:29:05 +02:00
|
|
|
if (!lport || !fcloop_lport_get(lport))
|
2016-12-02 00:28:44 -08:00
|
|
|
break;
|
|
|
|
|
|
|
|
spin_unlock_irqrestore(&fcloop_lock, flags);
|
|
|
|
|
2025-05-07 14:23:06 +02:00
|
|
|
ret = __localport_unreg(lport);
|
2016-12-02 00:28:44 -08:00
|
|
|
if (ret)
|
|
|
|
pr_warn("%s: Failed deleting local port\n", __func__);
|
|
|
|
|
2025-04-08 17:29:05 +02:00
|
|
|
fcloop_lport_put(lport);
|
|
|
|
|
2016-12-02 00:28:44 -08:00
|
|
|
spin_lock_irqsave(&fcloop_lock, flags);
|
|
|
|
}
|
|
|
|
|
|
|
|
spin_unlock_irqrestore(&fcloop_lock, flags);
|
|
|
|
|
|
|
|
put_device(fcloop_device);
|
|
|
|
|
2024-03-05 10:15:58 -03:00
|
|
|
device_destroy(&fcloop_class, MKDEV(0, 0));
|
|
|
|
class_unregister(&fcloop_class);
|
2025-05-07 14:23:05 +02:00
|
|
|
kmem_cache_destroy(lsreq_cache);
|
2016-12-02 00:28:44 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
module_init(fcloop_init);
|
|
|
|
module_exit(fcloop_exit);
|
|
|
|
|
2024-01-23 14:13:41 -08:00
|
|
|
MODULE_DESCRIPTION("NVMe target FC loop transport driver");
|
2016-12-02 00:28:44 -08:00
|
|
|
MODULE_LICENSE("GPL v2");
|