2020-08-27 09:54:40 -05:00
|
|
|
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
|
2016-06-16 16:45:23 +03:00
|
|
|
/*
|
|
|
|
* Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
|
|
|
|
* Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/skbuff.h>
|
|
|
|
#include <linux/delay.h>
|
|
|
|
#include <linux/sched.h>
|
2018-09-30 01:57:42 -04:00
|
|
|
#include <linux/vmalloc.h>
|
2019-02-07 18:44:49 +02:00
|
|
|
#include <rdma/uverbs_ioctl.h>
|
2016-06-16 16:45:23 +03:00
|
|
|
|
|
|
|
#include "rxe.h"
|
|
|
|
#include "rxe_loc.h"
|
|
|
|
#include "rxe_queue.h"
|
|
|
|
#include "rxe_task.h"
|
|
|
|
|
|
|
|
static int rxe_qp_chk_cap(struct rxe_dev *rxe, struct ib_qp_cap *cap,
|
|
|
|
int has_srq)
|
|
|
|
{
|
|
|
|
if (cap->max_send_wr > rxe->attr.max_qp_wr) {
|
2023-03-03 16:16:22 -06:00
|
|
|
rxe_dbg_dev(rxe, "invalid send wr = %u > %d\n",
|
2022-09-07 02:48:21 +00:00
|
|
|
cap->max_send_wr, rxe->attr.max_qp_wr);
|
2016-06-16 16:45:23 +03:00
|
|
|
goto err1;
|
|
|
|
}
|
|
|
|
|
2018-06-18 08:05:26 -07:00
|
|
|
if (cap->max_send_sge > rxe->attr.max_send_sge) {
|
2023-03-03 16:16:22 -06:00
|
|
|
rxe_dbg_dev(rxe, "invalid send sge = %u > %d\n",
|
2022-09-07 02:48:21 +00:00
|
|
|
cap->max_send_sge, rxe->attr.max_send_sge);
|
2016-06-16 16:45:23 +03:00
|
|
|
goto err1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!has_srq) {
|
|
|
|
if (cap->max_recv_wr > rxe->attr.max_qp_wr) {
|
2023-03-03 16:16:22 -06:00
|
|
|
rxe_dbg_dev(rxe, "invalid recv wr = %u > %d\n",
|
2022-09-07 02:48:21 +00:00
|
|
|
cap->max_recv_wr, rxe->attr.max_qp_wr);
|
2016-06-16 16:45:23 +03:00
|
|
|
goto err1;
|
|
|
|
}
|
|
|
|
|
2018-06-18 08:05:26 -07:00
|
|
|
if (cap->max_recv_sge > rxe->attr.max_recv_sge) {
|
2023-03-03 16:16:22 -06:00
|
|
|
rxe_dbg_dev(rxe, "invalid recv sge = %u > %d\n",
|
2022-09-07 02:48:21 +00:00
|
|
|
cap->max_recv_sge, rxe->attr.max_recv_sge);
|
2016-06-16 16:45:23 +03:00
|
|
|
goto err1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (cap->max_inline_data > rxe->max_inline_data) {
|
2023-03-03 16:16:22 -06:00
|
|
|
rxe_dbg_dev(rxe, "invalid max inline data = %u > %d\n",
|
2022-09-07 02:48:21 +00:00
|
|
|
cap->max_inline_data, rxe->max_inline_data);
|
2016-06-16 16:45:23 +03:00
|
|
|
goto err1;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
err1:
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
int rxe_qp_chk_init(struct rxe_dev *rxe, struct ib_qp_init_attr *init)
|
|
|
|
{
|
|
|
|
struct ib_qp_cap *cap = &init->cap;
|
|
|
|
struct rxe_port *port;
|
|
|
|
int port_num = init->port_num;
|
|
|
|
|
2021-02-05 17:05:26 -06:00
|
|
|
switch (init->qp_type) {
|
2020-12-16 15:17:55 +08:00
|
|
|
case IB_QPT_GSI:
|
|
|
|
case IB_QPT_RC:
|
|
|
|
case IB_QPT_UC:
|
|
|
|
case IB_QPT_UD:
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
}
|
|
|
|
|
2016-06-16 16:45:23 +03:00
|
|
|
if (!init->recv_cq || !init->send_cq) {
|
2023-03-03 16:16:22 -06:00
|
|
|
rxe_dbg_dev(rxe, "missing cq\n");
|
2016-06-16 16:45:23 +03:00
|
|
|
goto err1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (rxe_qp_chk_cap(rxe, cap, !!init->srq))
|
|
|
|
goto err1;
|
|
|
|
|
2022-04-07 13:54:17 -05:00
|
|
|
if (init->qp_type == IB_QPT_GSI) {
|
2018-12-06 16:02:34 +02:00
|
|
|
if (!rdma_is_port_valid(&rxe->ib_dev, port_num)) {
|
2023-03-03 16:16:22 -06:00
|
|
|
rxe_dbg_dev(rxe, "invalid port = %d\n", port_num);
|
2016-06-16 16:45:23 +03:00
|
|
|
goto err1;
|
|
|
|
}
|
|
|
|
|
|
|
|
port = &rxe->port;
|
|
|
|
|
|
|
|
if (init->qp_type == IB_QPT_GSI && port->qp_gsi_index) {
|
2023-03-03 16:16:22 -06:00
|
|
|
rxe_dbg_dev(rxe, "GSI QP exists for port %d\n", port_num);
|
2016-06-16 16:45:23 +03:00
|
|
|
goto err1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
err1:
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int alloc_rd_atomic_resources(struct rxe_qp *qp, unsigned int n)
|
|
|
|
{
|
|
|
|
qp->resp.res_head = 0;
|
|
|
|
qp->resp.res_tail = 0;
|
|
|
|
qp->resp.resources = kcalloc(n, sizeof(struct resp_res), GFP_KERNEL);
|
|
|
|
|
|
|
|
if (!qp->resp.resources)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void free_rd_atomic_resources(struct rxe_qp *qp)
|
|
|
|
{
|
|
|
|
if (qp->resp.resources) {
|
|
|
|
int i;
|
|
|
|
|
2016-09-28 20:26:44 +00:00
|
|
|
for (i = 0; i < qp->attr.max_dest_rd_atomic; i++) {
|
2016-06-16 16:45:23 +03:00
|
|
|
struct resp_res *res = &qp->resp.resources[i];
|
|
|
|
|
2022-07-08 03:55:50 +00:00
|
|
|
free_rd_atomic_resource(res);
|
2016-06-16 16:45:23 +03:00
|
|
|
}
|
|
|
|
kfree(qp->resp.resources);
|
|
|
|
qp->resp.resources = NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-07-08 03:55:50 +00:00
|
|
|
void free_rd_atomic_resource(struct resp_res *res)
|
2016-06-16 16:45:23 +03:00
|
|
|
{
|
|
|
|
res->type = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void cleanup_rd_atomic_resources(struct rxe_qp *qp)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
struct resp_res *res;
|
|
|
|
|
|
|
|
if (qp->resp.resources) {
|
2016-09-28 20:26:44 +00:00
|
|
|
for (i = 0; i < qp->attr.max_dest_rd_atomic; i++) {
|
2016-06-16 16:45:23 +03:00
|
|
|
res = &qp->resp.resources[i];
|
2022-07-08 03:55:50 +00:00
|
|
|
free_rd_atomic_resource(res);
|
2016-06-16 16:45:23 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void rxe_qp_init_misc(struct rxe_dev *rxe, struct rxe_qp *qp,
|
|
|
|
struct ib_qp_init_attr *init)
|
|
|
|
{
|
|
|
|
struct rxe_port *port;
|
|
|
|
u32 qpn;
|
|
|
|
|
|
|
|
qp->sq_sig_type = init->sq_sig_type;
|
|
|
|
qp->attr.path_mtu = 1;
|
|
|
|
qp->mtu = ib_mtu_enum_to_int(qp->attr.path_mtu);
|
|
|
|
|
2021-11-03 00:02:31 -05:00
|
|
|
qpn = qp->elem.index;
|
2016-06-16 16:45:23 +03:00
|
|
|
port = &rxe->port;
|
|
|
|
|
|
|
|
switch (init->qp_type) {
|
|
|
|
case IB_QPT_GSI:
|
|
|
|
qp->ibqp.qp_num = 1;
|
|
|
|
port->qp_gsi_index = qpn;
|
|
|
|
qp->attr.port_num = init->port_num;
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
qp->ibqp.qp_num = qpn;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
spin_lock_init(&qp->state_lock);
|
|
|
|
|
2022-07-31 02:36:21 -04:00
|
|
|
spin_lock_init(&qp->sq.sq_lock);
|
|
|
|
spin_lock_init(&qp->rq.producer_lock);
|
|
|
|
spin_lock_init(&qp->rq.consumer_lock);
|
|
|
|
|
2023-06-02 11:54:08 +08:00
|
|
|
skb_queue_head_init(&qp->req_pkts);
|
|
|
|
skb_queue_head_init(&qp->resp_pkts);
|
|
|
|
|
2016-06-16 16:45:23 +03:00
|
|
|
atomic_set(&qp->ssn, 0);
|
|
|
|
atomic_set(&qp->skb_out, 0);
|
|
|
|
}
|
|
|
|
|
2023-06-20 08:55:19 -05:00
|
|
|
static int rxe_init_sq(struct rxe_qp *qp, struct ib_qp_init_attr *init,
|
|
|
|
struct ib_udata *udata,
|
|
|
|
struct rxe_create_qp_resp __user *uresp)
|
|
|
|
{
|
|
|
|
struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
|
|
|
|
int wqe_size;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
qp->sq.max_wr = init->cap.max_send_wr;
|
|
|
|
wqe_size = max_t(int, init->cap.max_send_sge * sizeof(struct ib_sge),
|
|
|
|
init->cap.max_inline_data);
|
|
|
|
qp->sq.max_sge = wqe_size / sizeof(struct ib_sge);
|
|
|
|
qp->sq.max_inline = wqe_size;
|
|
|
|
wqe_size += sizeof(struct rxe_send_wqe);
|
|
|
|
|
|
|
|
qp->sq.queue = rxe_queue_init(rxe, &qp->sq.max_wr, wqe_size,
|
|
|
|
QUEUE_TYPE_FROM_CLIENT);
|
|
|
|
if (!qp->sq.queue) {
|
2024-01-09 16:32:52 +08:00
|
|
|
rxe_err_qp(qp, "Unable to allocate send queue\n");
|
2023-06-20 08:55:19 -05:00
|
|
|
err = -ENOMEM;
|
|
|
|
goto err_out;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* prepare info for caller to mmap send queue if user space qp */
|
|
|
|
err = do_mmap_info(rxe, uresp ? &uresp->sq_mi : NULL, udata,
|
|
|
|
qp->sq.queue->buf, qp->sq.queue->buf_size,
|
|
|
|
&qp->sq.queue->ip);
|
|
|
|
if (err) {
|
2024-01-09 16:32:52 +08:00
|
|
|
rxe_err_qp(qp, "do_mmap_info failed, err = %d\n", err);
|
2023-06-20 08:55:19 -05:00
|
|
|
goto err_free;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* return actual capabilities to caller which may be larger
|
|
|
|
* than requested
|
|
|
|
*/
|
|
|
|
init->cap.max_send_wr = qp->sq.max_wr;
|
|
|
|
init->cap.max_send_sge = qp->sq.max_sge;
|
|
|
|
init->cap.max_inline_data = qp->sq.max_inline;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
err_free:
|
|
|
|
vfree(qp->sq.queue->buf);
|
|
|
|
kfree(qp->sq.queue);
|
|
|
|
qp->sq.queue = NULL;
|
|
|
|
err_out:
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2016-06-16 16:45:23 +03:00
|
|
|
static int rxe_qp_init_req(struct rxe_dev *rxe, struct rxe_qp *qp,
|
2019-03-31 19:10:07 +03:00
|
|
|
struct ib_qp_init_attr *init, struct ib_udata *udata,
|
2018-03-13 16:33:18 -06:00
|
|
|
struct rxe_create_qp_resp __user *uresp)
|
2016-06-16 16:45:23 +03:00
|
|
|
{
|
|
|
|
int err;
|
2023-06-20 08:55:19 -05:00
|
|
|
|
|
|
|
/* if we don't finish qp create make sure queue is valid */
|
|
|
|
skb_queue_head_init(&qp->req_pkts);
|
2016-06-16 16:45:23 +03:00
|
|
|
|
|
|
|
err = sock_create_kern(&init_net, AF_INET, SOCK_DGRAM, 0, &qp->sk);
|
|
|
|
if (err < 0)
|
|
|
|
return err;
|
2024-03-29 09:55:15 -05:00
|
|
|
qp->sk->sk->sk_user_data = (void *)(uintptr_t)qp->elem.index;
|
2016-06-16 16:45:23 +03:00
|
|
|
|
2018-07-05 18:43:47 -07:00
|
|
|
/* pick a source UDP port number for this QP based on
|
|
|
|
* the source QPN. this spreads traffic for different QPs
|
|
|
|
* across different NIC RX queues (while using a single
|
|
|
|
* flow for a given QP to maintain packet order).
|
|
|
|
* the port number must be in the Dynamic Ports range
|
|
|
|
* (0xc000 - 0xffff).
|
|
|
|
*/
|
hash.h: remove unused define directive
Patch series "test_hash.c: refactor into KUnit", v3.
We refactored the lib/test_hash.c file into KUnit as part of the student
group LKCAMP [1] introductory hackathon for kernel development.
This test was pointed to our group by Daniel Latypov [2], so its full
conversion into a pure KUnit test was our goal in this patch series, but
we ran into many problems relating to it not being split as unit tests,
which complicated matters a bit, as the reasoning behind the original
tests is quite cryptic for those unfamiliar with hash implementations.
Some interesting developments we'd like to highlight are:
- In patch 1/5 we noticed that there was an unused define directive
that could be removed.
- In patch 4/5 we noticed how stringhash and hash tests are all under
the lib/test_hash.c file, which might cause some confusion, and we
also broke those kernel config entries up.
Overall KUnit developments have been made in the other patches in this
series:
In patches 2/5, 3/5 and 5/5 we refactored the lib/test_hash.c file so as
to make it more compatible with the KUnit style, whilst preserving the
original idea of the maintainer who designed it (i.e. George Spelvin),
which might be undesirable for unit tests, but we assume it is enough
for a first patch.
This patch (of 5):
Currently, there exist hash_32() and __hash_32() functions, which were
introduced in a patch [1] targeting architecture specific optimizations.
These functions can be overridden on a per-architecture basis to achieve
such optimizations. They must set their corresponding define directive
(HAVE_ARCH_HASH_32 and HAVE_ARCH__HASH_32, respectively) so that header
files can deal with these overrides properly.
As the supported 32-bit architectures that have their own hash function
implementation (i.e. m68k, Microblaze, H8/300, pa-risc) have only been
making use of the (more general) __hash_32() function (which only lacks
a right shift operation when compared to the hash_32() function), remove
the define directive corresponding to the arch-specific hash_32()
implementation.
[1] https://lore.kernel.org/lkml/20160525073311.5600.qmail@ns.sciencehorizons.net/
[akpm@linux-foundation.org: hash_32_generic() becomes hash_32()]
Link: https://lkml.kernel.org/r/20211208183711.390454-1-isabbasso@riseup.net
Link: https://lkml.kernel.org/r/20211208183711.390454-2-isabbasso@riseup.net
Reviewed-by: David Gow <davidgow@google.com>
Tested-by: David Gow <davidgow@google.com>
Co-developed-by: Augusto Durães Camargo <augusto.duraes33@gmail.com>
Signed-off-by: Augusto Durães Camargo <augusto.duraes33@gmail.com>
Co-developed-by: Enzo Ferreira <ferreiraenzoa@gmail.com>
Signed-off-by: Enzo Ferreira <ferreiraenzoa@gmail.com>
Signed-off-by: Isabella Basso <isabbasso@riseup.net>
Cc: Geert Uytterhoeven <geert@linux-m68k.org>
Cc: Brendan Higgins <brendanhiggins@google.com>
Cc: Daniel Latypov <dlatypov@google.com>
Cc: Shuah Khan <skhan@linuxfoundation.org>
Cc: Rodrigo Siqueira <rodrigosiqueiramelo@gmail.com>
Cc: kernel test robot <lkp@intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2022-01-19 18:09:02 -08:00
|
|
|
qp->src_port = RXE_ROCE_V2_SPORT + (hash_32(qp_num(qp), 14) & 0x3fff);
|
2016-06-16 16:45:23 +03:00
|
|
|
|
2023-06-20 08:55:19 -05:00
|
|
|
err = rxe_init_sq(qp, init, udata, uresp);
|
|
|
|
if (err)
|
2016-06-16 16:45:23 +03:00
|
|
|
return err;
|
|
|
|
|
2021-09-14 11:42:03 -05:00
|
|
|
qp->req.wqe_index = queue_get_producer(qp->sq.queue,
|
|
|
|
QUEUE_TYPE_FROM_CLIENT);
|
2021-05-27 14:47:48 -05:00
|
|
|
|
2016-06-16 16:45:23 +03:00
|
|
|
qp->req.opcode = -1;
|
|
|
|
qp->comp.opcode = -1;
|
|
|
|
|
2024-03-29 09:55:07 -05:00
|
|
|
rxe_init_task(&qp->send_task, qp, rxe_sender);
|
2016-06-16 16:45:23 +03:00
|
|
|
|
|
|
|
qp->qp_timeout_jiffies = 0; /* Can't be set for UD/UC in modify_qp */
|
2017-03-19 11:20:56 +02:00
|
|
|
if (init->qp_type == IB_QPT_RC) {
|
2017-10-24 03:28:27 -07:00
|
|
|
timer_setup(&qp->rnr_nak_timer, rnr_nak_timer, 0);
|
|
|
|
timer_setup(&qp->retrans_timer, retransmit_timer, 0);
|
2017-03-19 11:20:56 +02:00
|
|
|
}
|
2016-06-16 16:45:23 +03:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2023-06-20 08:55:19 -05:00
|
|
|
static int rxe_init_rq(struct rxe_qp *qp, struct ib_qp_init_attr *init,
|
|
|
|
struct ib_udata *udata,
|
|
|
|
struct rxe_create_qp_resp __user *uresp)
|
|
|
|
{
|
|
|
|
struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
|
|
|
|
int wqe_size;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
qp->rq.max_wr = init->cap.max_recv_wr;
|
|
|
|
qp->rq.max_sge = init->cap.max_recv_sge;
|
|
|
|
wqe_size = sizeof(struct rxe_recv_wqe) +
|
|
|
|
qp->rq.max_sge*sizeof(struct ib_sge);
|
|
|
|
|
|
|
|
qp->rq.queue = rxe_queue_init(rxe, &qp->rq.max_wr, wqe_size,
|
|
|
|
QUEUE_TYPE_FROM_CLIENT);
|
|
|
|
if (!qp->rq.queue) {
|
2024-01-09 16:32:52 +08:00
|
|
|
rxe_err_qp(qp, "Unable to allocate recv queue\n");
|
2023-06-20 08:55:19 -05:00
|
|
|
err = -ENOMEM;
|
|
|
|
goto err_out;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* prepare info for caller to mmap recv queue if user space qp */
|
|
|
|
err = do_mmap_info(rxe, uresp ? &uresp->rq_mi : NULL, udata,
|
|
|
|
qp->rq.queue->buf, qp->rq.queue->buf_size,
|
|
|
|
&qp->rq.queue->ip);
|
|
|
|
if (err) {
|
2024-01-09 16:32:52 +08:00
|
|
|
rxe_err_qp(qp, "do_mmap_info failed, err = %d\n", err);
|
2023-06-20 08:55:19 -05:00
|
|
|
goto err_free;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* return actual capabilities to caller which may be larger
|
|
|
|
* than requested
|
|
|
|
*/
|
|
|
|
init->cap.max_recv_wr = qp->rq.max_wr;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
err_free:
|
|
|
|
vfree(qp->rq.queue->buf);
|
|
|
|
kfree(qp->rq.queue);
|
|
|
|
qp->rq.queue = NULL;
|
|
|
|
err_out:
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2016-06-16 16:45:23 +03:00
|
|
|
static int rxe_qp_init_resp(struct rxe_dev *rxe, struct rxe_qp *qp,
|
|
|
|
struct ib_qp_init_attr *init,
|
2019-03-31 19:10:07 +03:00
|
|
|
struct ib_udata *udata,
|
2018-03-13 16:33:18 -06:00
|
|
|
struct rxe_create_qp_resp __user *uresp)
|
2016-06-16 16:45:23 +03:00
|
|
|
{
|
|
|
|
int err;
|
2023-06-20 08:55:19 -05:00
|
|
|
|
|
|
|
/* if we don't finish qp create make sure queue is valid */
|
|
|
|
skb_queue_head_init(&qp->resp_pkts);
|
2016-06-16 16:45:23 +03:00
|
|
|
|
|
|
|
if (!qp->srq) {
|
2023-06-20 08:55:19 -05:00
|
|
|
err = rxe_init_rq(qp, init, udata, uresp);
|
|
|
|
if (err)
|
2016-06-16 16:45:23 +03:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2024-03-29 09:55:07 -05:00
|
|
|
rxe_init_task(&qp->recv_task, qp, rxe_receiver);
|
2016-06-16 16:45:23 +03:00
|
|
|
|
|
|
|
qp->resp.opcode = OPCODE_NONE;
|
|
|
|
qp->resp.msn = 0;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* called by the create qp verb */
|
|
|
|
int rxe_qp_from_init(struct rxe_dev *rxe, struct rxe_qp *qp, struct rxe_pd *pd,
|
2018-03-13 16:33:18 -06:00
|
|
|
struct ib_qp_init_attr *init,
|
|
|
|
struct rxe_create_qp_resp __user *uresp,
|
2018-12-17 17:15:18 +02:00
|
|
|
struct ib_pd *ibpd,
|
|
|
|
struct ib_udata *udata)
|
2016-06-16 16:45:23 +03:00
|
|
|
{
|
|
|
|
int err;
|
|
|
|
struct rxe_cq *rcq = to_rcq(init->recv_cq);
|
|
|
|
struct rxe_cq *scq = to_rcq(init->send_cq);
|
|
|
|
struct rxe_srq *srq = init->srq ? to_rsrq(init->srq) : NULL;
|
2023-05-10 11:50:56 +08:00
|
|
|
unsigned long flags;
|
2016-06-16 16:45:23 +03:00
|
|
|
|
2022-03-03 18:08:05 -06:00
|
|
|
rxe_get(pd);
|
|
|
|
rxe_get(rcq);
|
|
|
|
rxe_get(scq);
|
2016-06-16 16:45:23 +03:00
|
|
|
if (srq)
|
2022-03-03 18:08:05 -06:00
|
|
|
rxe_get(srq);
|
2016-06-16 16:45:23 +03:00
|
|
|
|
2023-06-20 08:55:19 -05:00
|
|
|
qp->pd = pd;
|
|
|
|
qp->rcq = rcq;
|
|
|
|
qp->scq = scq;
|
|
|
|
qp->srq = srq;
|
2016-06-16 16:45:23 +03:00
|
|
|
|
2022-04-20 20:40:40 -05:00
|
|
|
atomic_inc(&rcq->num_wq);
|
|
|
|
atomic_inc(&scq->num_wq);
|
|
|
|
|
2016-06-16 16:45:23 +03:00
|
|
|
rxe_qp_init_misc(rxe, qp, init);
|
|
|
|
|
2019-03-31 19:10:07 +03:00
|
|
|
err = rxe_qp_init_req(rxe, qp, init, udata, uresp);
|
2016-06-16 16:45:23 +03:00
|
|
|
if (err)
|
|
|
|
goto err1;
|
|
|
|
|
2019-03-31 19:10:07 +03:00
|
|
|
err = rxe_qp_init_resp(rxe, qp, init, udata, uresp);
|
2016-06-16 16:45:23 +03:00
|
|
|
if (err)
|
|
|
|
goto err2;
|
|
|
|
|
2023-05-10 11:50:56 +08:00
|
|
|
spin_lock_irqsave(&qp->state_lock, flags);
|
2016-06-16 16:45:23 +03:00
|
|
|
qp->attr.qp_state = IB_QPS_RESET;
|
|
|
|
qp->valid = 1;
|
2023-05-10 11:50:56 +08:00
|
|
|
spin_unlock_irqrestore(&qp->state_lock, flags);
|
2016-06-16 16:45:23 +03:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
err2:
|
|
|
|
rxe_queue_cleanup(qp->sq.queue);
|
2021-11-21 23:22:39 +03:00
|
|
|
qp->sq.queue = NULL;
|
2016-06-16 16:45:23 +03:00
|
|
|
err1:
|
2022-04-20 20:40:40 -05:00
|
|
|
atomic_dec(&rcq->num_wq);
|
|
|
|
atomic_dec(&scq->num_wq);
|
|
|
|
|
2021-05-11 10:26:03 +03:00
|
|
|
qp->pd = NULL;
|
|
|
|
qp->rcq = NULL;
|
|
|
|
qp->scq = NULL;
|
|
|
|
qp->srq = NULL;
|
|
|
|
|
2016-06-16 16:45:23 +03:00
|
|
|
if (srq)
|
2022-03-03 18:08:05 -06:00
|
|
|
rxe_put(srq);
|
|
|
|
rxe_put(scq);
|
|
|
|
rxe_put(rcq);
|
|
|
|
rxe_put(pd);
|
2016-06-16 16:45:23 +03:00
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* called by the query qp verb */
|
|
|
|
int rxe_qp_to_init(struct rxe_qp *qp, struct ib_qp_init_attr *init)
|
|
|
|
{
|
|
|
|
init->event_handler = qp->ibqp.event_handler;
|
|
|
|
init->qp_context = qp->ibqp.qp_context;
|
|
|
|
init->send_cq = qp->ibqp.send_cq;
|
|
|
|
init->recv_cq = qp->ibqp.recv_cq;
|
|
|
|
init->srq = qp->ibqp.srq;
|
|
|
|
|
|
|
|
init->cap.max_send_wr = qp->sq.max_wr;
|
|
|
|
init->cap.max_send_sge = qp->sq.max_sge;
|
|
|
|
init->cap.max_inline_data = qp->sq.max_inline;
|
|
|
|
|
|
|
|
if (!qp->srq) {
|
|
|
|
init->cap.max_recv_wr = qp->rq.max_wr;
|
|
|
|
init->cap.max_recv_sge = qp->rq.max_sge;
|
|
|
|
}
|
|
|
|
|
|
|
|
init->sq_sig_type = qp->sq_sig_type;
|
|
|
|
|
|
|
|
init->qp_type = qp->ibqp.qp_type;
|
|
|
|
init->port_num = 1;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int rxe_qp_chk_attr(struct rxe_dev *rxe, struct rxe_qp *qp,
|
|
|
|
struct ib_qp_attr *attr, int mask)
|
|
|
|
{
|
|
|
|
if (mask & IB_QP_PORT) {
|
2018-12-06 16:02:34 +02:00
|
|
|
if (!rdma_is_port_valid(&rxe->ib_dev, attr->port_num)) {
|
2022-11-03 12:10:05 -05:00
|
|
|
rxe_dbg_qp(qp, "invalid port %d\n", attr->port_num);
|
2016-06-16 16:45:23 +03:00
|
|
|
goto err1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (mask & IB_QP_CAP && rxe_qp_chk_cap(rxe, &attr->cap, !!qp->srq))
|
|
|
|
goto err1;
|
|
|
|
|
2023-05-30 17:13:33 -05:00
|
|
|
if (mask & IB_QP_ACCESS_FLAGS) {
|
|
|
|
if (!(qp_type(qp) == IB_QPT_RC || qp_type(qp) == IB_QPT_UC))
|
|
|
|
goto err1;
|
|
|
|
if (attr->qp_access_flags & ~RXE_ACCESS_SUPPORTED_QP)
|
|
|
|
goto err1;
|
|
|
|
}
|
|
|
|
|
2022-11-03 12:10:10 -05:00
|
|
|
if (mask & IB_QP_AV && rxe_av_chk_attr(qp, &attr->ah_attr))
|
2016-06-16 16:45:23 +03:00
|
|
|
goto err1;
|
|
|
|
|
|
|
|
if (mask & IB_QP_ALT_PATH) {
|
2022-11-03 12:10:10 -05:00
|
|
|
if (rxe_av_chk_attr(qp, &attr->alt_ah_attr))
|
2016-06-16 16:45:23 +03:00
|
|
|
goto err1;
|
2018-12-06 16:02:34 +02:00
|
|
|
if (!rdma_is_port_valid(&rxe->ib_dev, attr->alt_port_num)) {
|
2022-11-03 12:10:05 -05:00
|
|
|
rxe_dbg_qp(qp, "invalid alt port %d\n", attr->alt_port_num);
|
2016-06-16 16:45:23 +03:00
|
|
|
goto err1;
|
|
|
|
}
|
|
|
|
if (attr->alt_timeout > 31) {
|
2022-11-03 12:10:05 -05:00
|
|
|
rxe_dbg_qp(qp, "invalid alt timeout %d > 31\n",
|
2022-09-07 02:48:21 +00:00
|
|
|
attr->alt_timeout);
|
2016-06-16 16:45:23 +03:00
|
|
|
goto err1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (mask & IB_QP_PATH_MTU) {
|
|
|
|
struct rxe_port *port = &rxe->port;
|
|
|
|
|
|
|
|
enum ib_mtu max_mtu = port->attr.max_mtu;
|
|
|
|
enum ib_mtu mtu = attr->path_mtu;
|
|
|
|
|
|
|
|
if (mtu > max_mtu) {
|
2022-11-03 12:10:05 -05:00
|
|
|
rxe_dbg_qp(qp, "invalid mtu (%d) > (%d)\n",
|
2016-06-16 16:45:23 +03:00
|
|
|
ib_mtu_enum_to_int(mtu),
|
|
|
|
ib_mtu_enum_to_int(max_mtu));
|
|
|
|
goto err1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (mask & IB_QP_MAX_QP_RD_ATOMIC) {
|
|
|
|
if (attr->max_rd_atomic > rxe->attr.max_qp_rd_atom) {
|
2022-11-03 12:10:05 -05:00
|
|
|
rxe_dbg_qp(qp, "invalid max_rd_atomic %d > %d\n",
|
2022-09-07 02:48:21 +00:00
|
|
|
attr->max_rd_atomic,
|
|
|
|
rxe->attr.max_qp_rd_atom);
|
2016-06-16 16:45:23 +03:00
|
|
|
goto err1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (mask & IB_QP_TIMEOUT) {
|
|
|
|
if (attr->timeout > 31) {
|
2022-11-03 12:10:05 -05:00
|
|
|
rxe_dbg_qp(qp, "invalid timeout %d > 31\n",
|
|
|
|
attr->timeout);
|
2016-06-16 16:45:23 +03:00
|
|
|
goto err1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
err1:
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* move the qp to the reset state */
|
|
|
|
static void rxe_qp_reset(struct rxe_qp *qp)
|
|
|
|
{
|
|
|
|
/* stop tasks from running */
|
2024-03-29 09:55:07 -05:00
|
|
|
rxe_disable_task(&qp->recv_task);
|
|
|
|
rxe_disable_task(&qp->send_task);
|
2016-06-16 16:45:23 +03:00
|
|
|
|
2023-03-04 11:45:32 -06:00
|
|
|
/* drain work and packet queuesc */
|
2024-03-29 09:55:07 -05:00
|
|
|
rxe_sender(qp);
|
|
|
|
rxe_receiver(qp);
|
2016-06-16 16:45:23 +03:00
|
|
|
|
2023-03-04 11:45:32 -06:00
|
|
|
if (qp->rq.queue)
|
|
|
|
rxe_queue_reset(qp->rq.queue);
|
|
|
|
if (qp->sq.queue)
|
2016-11-16 10:39:17 +02:00
|
|
|
rxe_queue_reset(qp->sq.queue);
|
2016-06-16 16:45:23 +03:00
|
|
|
|
|
|
|
/* cleanup attributes */
|
|
|
|
atomic_set(&qp->ssn, 0);
|
|
|
|
qp->req.opcode = -1;
|
|
|
|
qp->req.need_retry = 0;
|
2022-06-30 14:04:22 -05:00
|
|
|
qp->req.wait_for_rnr_timer = 0;
|
2016-06-16 16:45:23 +03:00
|
|
|
qp->req.noack_pkts = 0;
|
|
|
|
qp->resp.msn = 0;
|
|
|
|
qp->resp.opcode = -1;
|
|
|
|
qp->resp.drop_msg = 0;
|
|
|
|
qp->resp.goto_error = 0;
|
|
|
|
qp->resp.sent_psn_nak = 0;
|
|
|
|
|
|
|
|
if (qp->resp.mr) {
|
2022-03-03 18:08:05 -06:00
|
|
|
rxe_put(qp->resp.mr);
|
2016-06-16 16:45:23 +03:00
|
|
|
qp->resp.mr = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
cleanup_rd_atomic_resources(qp);
|
|
|
|
|
|
|
|
/* reenable tasks */
|
2024-03-29 09:55:07 -05:00
|
|
|
rxe_enable_task(&qp->recv_task);
|
|
|
|
rxe_enable_task(&qp->send_task);
|
2016-06-16 16:45:23 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/* move the qp to the error state */
|
|
|
|
void rxe_qp_error(struct rxe_qp *qp)
|
|
|
|
{
|
2023-05-10 11:50:56 +08:00
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&qp->state_lock, flags);
|
2016-11-16 10:39:18 +02:00
|
|
|
qp->attr.qp_state = IB_QPS_ERR;
|
2016-06-16 16:45:23 +03:00
|
|
|
|
|
|
|
/* drain work and packet queues */
|
2024-03-29 09:55:07 -05:00
|
|
|
rxe_sched_task(&qp->recv_task);
|
|
|
|
rxe_sched_task(&qp->send_task);
|
2023-05-10 11:50:56 +08:00
|
|
|
spin_unlock_irqrestore(&qp->state_lock, flags);
|
2023-04-04 23:26:11 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
static void rxe_qp_sqd(struct rxe_qp *qp, struct ib_qp_attr *attr,
|
|
|
|
int mask)
|
|
|
|
{
|
2023-05-10 11:50:56 +08:00
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&qp->state_lock, flags);
|
2023-04-04 23:26:11 -05:00
|
|
|
qp->attr.sq_draining = 1;
|
2024-03-29 09:55:07 -05:00
|
|
|
rxe_sched_task(&qp->send_task);
|
2023-05-10 11:50:56 +08:00
|
|
|
spin_unlock_irqrestore(&qp->state_lock, flags);
|
2023-04-04 23:26:11 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
/* caller should hold qp->state_lock */
|
|
|
|
static int __qp_chk_state(struct rxe_qp *qp, struct ib_qp_attr *attr,
|
|
|
|
int mask)
|
|
|
|
{
|
|
|
|
enum ib_qp_state cur_state;
|
|
|
|
enum ib_qp_state new_state;
|
|
|
|
|
|
|
|
cur_state = (mask & IB_QP_CUR_STATE) ?
|
|
|
|
attr->cur_qp_state : qp->attr.qp_state;
|
|
|
|
new_state = (mask & IB_QP_STATE) ?
|
|
|
|
attr->qp_state : cur_state;
|
|
|
|
|
|
|
|
if (!ib_modify_qp_is_ok(cur_state, new_state, qp_type(qp), mask))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (mask & IB_QP_STATE && cur_state == IB_QPS_SQD) {
|
|
|
|
if (qp->attr.sq_draining && new_state != IB_QPS_ERR)
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
2016-06-16 16:45:23 +03:00
|
|
|
}
|
|
|
|
|
2023-04-04 23:26:11 -05:00
|
|
|
static const char *const qps2str[] = {
|
|
|
|
[IB_QPS_RESET] = "RESET",
|
|
|
|
[IB_QPS_INIT] = "INIT",
|
|
|
|
[IB_QPS_RTR] = "RTR",
|
|
|
|
[IB_QPS_RTS] = "RTS",
|
|
|
|
[IB_QPS_SQD] = "SQD",
|
|
|
|
[IB_QPS_SQE] = "SQE",
|
|
|
|
[IB_QPS_ERR] = "ERR",
|
|
|
|
};
|
|
|
|
|
2016-06-16 16:45:23 +03:00
|
|
|
/* called by the modify qp verb */
|
|
|
|
int rxe_qp_from_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask,
|
|
|
|
struct ib_udata *udata)
|
|
|
|
{
|
|
|
|
int err;
|
|
|
|
|
2023-04-04 23:26:11 -05:00
|
|
|
if (mask & IB_QP_CUR_STATE)
|
|
|
|
qp->attr.cur_qp_state = attr->qp_state;
|
|
|
|
|
|
|
|
if (mask & IB_QP_STATE) {
|
2023-05-10 11:50:56 +08:00
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&qp->state_lock, flags);
|
2023-04-04 23:26:11 -05:00
|
|
|
err = __qp_chk_state(qp, attr, mask);
|
|
|
|
if (!err) {
|
|
|
|
qp->attr.qp_state = attr->qp_state;
|
|
|
|
rxe_dbg_qp(qp, "state -> %s\n",
|
|
|
|
qps2str[attr->qp_state]);
|
|
|
|
}
|
2023-05-10 11:50:56 +08:00
|
|
|
spin_unlock_irqrestore(&qp->state_lock, flags);
|
2023-04-04 23:26:11 -05:00
|
|
|
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
|
|
|
switch (attr->qp_state) {
|
|
|
|
case IB_QPS_RESET:
|
|
|
|
rxe_qp_reset(qp);
|
|
|
|
break;
|
|
|
|
case IB_QPS_SQD:
|
|
|
|
rxe_qp_sqd(qp, attr, mask);
|
|
|
|
break;
|
|
|
|
case IB_QPS_ERR:
|
|
|
|
rxe_qp_error(qp);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-06-16 16:45:23 +03:00
|
|
|
if (mask & IB_QP_MAX_QP_RD_ATOMIC) {
|
2020-02-17 12:57:14 -08:00
|
|
|
int max_rd_atomic = attr->max_rd_atomic ?
|
|
|
|
roundup_pow_of_two(attr->max_rd_atomic) : 0;
|
2016-06-16 16:45:23 +03:00
|
|
|
|
2016-09-28 20:26:44 +00:00
|
|
|
qp->attr.max_rd_atomic = max_rd_atomic;
|
|
|
|
atomic_set(&qp->req.rd_atomic, max_rd_atomic);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (mask & IB_QP_MAX_DEST_RD_ATOMIC) {
|
2020-02-17 12:57:14 -08:00
|
|
|
int max_dest_rd_atomic = attr->max_dest_rd_atomic ?
|
|
|
|
roundup_pow_of_two(attr->max_dest_rd_atomic) : 0;
|
2016-09-28 20:26:44 +00:00
|
|
|
|
|
|
|
qp->attr.max_dest_rd_atomic = max_dest_rd_atomic;
|
|
|
|
|
2016-06-16 16:45:23 +03:00
|
|
|
free_rd_atomic_resources(qp);
|
|
|
|
|
2016-09-28 20:26:44 +00:00
|
|
|
err = alloc_rd_atomic_resources(qp, max_dest_rd_atomic);
|
2016-06-16 16:45:23 +03:00
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (mask & IB_QP_EN_SQD_ASYNC_NOTIFY)
|
|
|
|
qp->attr.en_sqd_async_notify = attr->en_sqd_async_notify;
|
|
|
|
|
|
|
|
if (mask & IB_QP_ACCESS_FLAGS)
|
|
|
|
qp->attr.qp_access_flags = attr->qp_access_flags;
|
|
|
|
|
|
|
|
if (mask & IB_QP_PKEY_INDEX)
|
|
|
|
qp->attr.pkey_index = attr->pkey_index;
|
|
|
|
|
|
|
|
if (mask & IB_QP_PORT)
|
|
|
|
qp->attr.port_num = attr->port_num;
|
|
|
|
|
|
|
|
if (mask & IB_QP_QKEY)
|
|
|
|
qp->attr.qkey = attr->qkey;
|
|
|
|
|
2020-08-20 17:46:23 -05:00
|
|
|
if (mask & IB_QP_AV)
|
2019-01-29 12:08:49 +02:00
|
|
|
rxe_init_av(&attr->ah_attr, &qp->pri_av);
|
2016-06-16 16:45:23 +03:00
|
|
|
|
|
|
|
if (mask & IB_QP_ALT_PATH) {
|
2019-01-29 12:08:49 +02:00
|
|
|
rxe_init_av(&attr->alt_ah_attr, &qp->alt_av);
|
2016-06-16 16:45:23 +03:00
|
|
|
qp->attr.alt_port_num = attr->alt_port_num;
|
|
|
|
qp->attr.alt_pkey_index = attr->alt_pkey_index;
|
|
|
|
qp->attr.alt_timeout = attr->alt_timeout;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (mask & IB_QP_PATH_MTU) {
|
|
|
|
qp->attr.path_mtu = attr->path_mtu;
|
|
|
|
qp->mtu = ib_mtu_enum_to_int(attr->path_mtu);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (mask & IB_QP_TIMEOUT) {
|
|
|
|
qp->attr.timeout = attr->timeout;
|
|
|
|
if (attr->timeout == 0) {
|
|
|
|
qp->qp_timeout_jiffies = 0;
|
|
|
|
} else {
|
|
|
|
/* According to the spec, timeout = 4.096 * 2 ^ attr->timeout [us] */
|
|
|
|
int j = nsecs_to_jiffies(4096ULL << attr->timeout);
|
|
|
|
|
|
|
|
qp->qp_timeout_jiffies = j ? j : 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (mask & IB_QP_RETRY_CNT) {
|
|
|
|
qp->attr.retry_cnt = attr->retry_cnt;
|
|
|
|
qp->comp.retry_cnt = attr->retry_cnt;
|
2022-11-03 12:10:05 -05:00
|
|
|
rxe_dbg_qp(qp, "set retry count = %d\n", attr->retry_cnt);
|
2016-06-16 16:45:23 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
if (mask & IB_QP_RNR_RETRY) {
|
|
|
|
qp->attr.rnr_retry = attr->rnr_retry;
|
|
|
|
qp->comp.rnr_retry = attr->rnr_retry;
|
2022-11-03 12:10:05 -05:00
|
|
|
rxe_dbg_qp(qp, "set rnr retry count = %d\n", attr->rnr_retry);
|
2016-06-16 16:45:23 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
if (mask & IB_QP_RQ_PSN) {
|
|
|
|
qp->attr.rq_psn = (attr->rq_psn & BTH_PSN_MASK);
|
|
|
|
qp->resp.psn = qp->attr.rq_psn;
|
2022-11-03 12:10:05 -05:00
|
|
|
rxe_dbg_qp(qp, "set resp psn = 0x%x\n", qp->resp.psn);
|
2016-06-16 16:45:23 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
if (mask & IB_QP_MIN_RNR_TIMER) {
|
|
|
|
qp->attr.min_rnr_timer = attr->min_rnr_timer;
|
2022-11-03 12:10:05 -05:00
|
|
|
rxe_dbg_qp(qp, "set min rnr timer = 0x%x\n",
|
2016-06-16 16:45:23 +03:00
|
|
|
attr->min_rnr_timer);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (mask & IB_QP_SQ_PSN) {
|
|
|
|
qp->attr.sq_psn = (attr->sq_psn & BTH_PSN_MASK);
|
|
|
|
qp->req.psn = qp->attr.sq_psn;
|
|
|
|
qp->comp.psn = qp->attr.sq_psn;
|
2022-11-03 12:10:05 -05:00
|
|
|
rxe_dbg_qp(qp, "set req psn = 0x%x\n", qp->req.psn);
|
2016-06-16 16:45:23 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
if (mask & IB_QP_PATH_MIG_STATE)
|
|
|
|
qp->attr.path_mig_state = attr->path_mig_state;
|
|
|
|
|
|
|
|
if (mask & IB_QP_DEST_QPN)
|
|
|
|
qp->attr.dest_qp_num = attr->dest_qp_num;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* called by the query qp verb */
|
|
|
|
int rxe_qp_to_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask)
|
|
|
|
{
|
2023-05-10 11:50:56 +08:00
|
|
|
unsigned long flags;
|
|
|
|
|
2016-06-16 16:45:23 +03:00
|
|
|
*attr = qp->attr;
|
|
|
|
|
|
|
|
attr->rq_psn = qp->resp.psn;
|
|
|
|
attr->sq_psn = qp->req.psn;
|
|
|
|
|
|
|
|
attr->cap.max_send_wr = qp->sq.max_wr;
|
|
|
|
attr->cap.max_send_sge = qp->sq.max_sge;
|
|
|
|
attr->cap.max_inline_data = qp->sq.max_inline;
|
|
|
|
|
|
|
|
if (!qp->srq) {
|
|
|
|
attr->cap.max_recv_wr = qp->rq.max_wr;
|
|
|
|
attr->cap.max_recv_sge = qp->rq.max_sge;
|
|
|
|
}
|
|
|
|
|
2018-01-31 06:06:56 -05:00
|
|
|
rxe_av_to_attr(&qp->pri_av, &attr->ah_attr);
|
|
|
|
rxe_av_to_attr(&qp->alt_av, &attr->alt_ah_attr);
|
2016-06-16 16:45:23 +03:00
|
|
|
|
2023-04-04 23:26:09 -05:00
|
|
|
/* Applications that get this state typically spin on it.
|
|
|
|
* Yield the processor
|
|
|
|
*/
|
2023-05-10 11:50:56 +08:00
|
|
|
spin_lock_irqsave(&qp->state_lock, flags);
|
2024-10-31 17:20:19 +08:00
|
|
|
attr->cur_qp_state = qp_state(qp);
|
2023-04-04 23:26:11 -05:00
|
|
|
if (qp->attr.sq_draining) {
|
2023-05-10 11:50:56 +08:00
|
|
|
spin_unlock_irqrestore(&qp->state_lock, flags);
|
2016-06-16 16:45:23 +03:00
|
|
|
cond_resched();
|
2023-05-15 15:10:57 -05:00
|
|
|
} else {
|
2023-05-10 11:50:56 +08:00
|
|
|
spin_unlock_irqrestore(&qp->state_lock, flags);
|
2023-04-04 23:26:11 -05:00
|
|
|
}
|
2016-06-16 16:45:23 +03:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2022-01-27 15:37:33 -06:00
|
|
|
int rxe_qp_chk_destroy(struct rxe_qp *qp)
|
|
|
|
{
|
|
|
|
/* See IBA o10-2.2.3
|
|
|
|
* An attempt to destroy a QP while attached to a mcast group
|
|
|
|
* will fail immediately.
|
|
|
|
*/
|
|
|
|
if (atomic_read(&qp->mcg_num)) {
|
2022-11-03 12:10:05 -05:00
|
|
|
rxe_dbg_qp(qp, "Attempt to destroy while attached to multicast group\n");
|
2022-01-27 15:37:33 -06:00
|
|
|
return -EBUSY;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2022-04-20 20:40:37 -05:00
|
|
|
/* called when the last reference to the qp is dropped */
|
|
|
|
static void rxe_qp_do_cleanup(struct work_struct *work)
|
2016-06-16 16:45:23 +03:00
|
|
|
{
|
2022-04-20 20:40:37 -05:00
|
|
|
struct rxe_qp *qp = container_of(work, typeof(*qp), cleanup_work.work);
|
2023-05-10 11:50:56 +08:00
|
|
|
unsigned long flags;
|
2022-04-20 20:40:37 -05:00
|
|
|
|
2023-05-10 11:50:56 +08:00
|
|
|
spin_lock_irqsave(&qp->state_lock, flags);
|
2016-06-16 16:45:23 +03:00
|
|
|
qp->valid = 0;
|
2023-05-10 11:50:56 +08:00
|
|
|
spin_unlock_irqrestore(&qp->state_lock, flags);
|
2016-06-16 16:45:23 +03:00
|
|
|
qp->qp_timeout_jiffies = 0;
|
|
|
|
|
2025-04-19 10:07:41 +02:00
|
|
|
/* In the function timer_setup, .function is initialized. If .function
|
|
|
|
* is NULL, it indicates the function timer_setup is not called, the
|
|
|
|
* timer is not initialized. Or else, the timer is initialized.
|
|
|
|
*/
|
|
|
|
if (qp_type(qp) == IB_QPT_RC && qp->retrans_timer.function &&
|
|
|
|
qp->rnr_nak_timer.function) {
|
2025-04-05 10:17:26 +02:00
|
|
|
timer_delete_sync(&qp->retrans_timer);
|
|
|
|
timer_delete_sync(&qp->rnr_nak_timer);
|
2017-03-19 11:20:56 +02:00
|
|
|
}
|
2016-06-16 16:45:23 +03:00
|
|
|
|
2024-03-29 09:55:07 -05:00
|
|
|
if (qp->recv_task.func)
|
|
|
|
rxe_cleanup_task(&qp->recv_task);
|
2023-04-13 18:11:15 +08:00
|
|
|
|
2024-03-29 09:55:07 -05:00
|
|
|
if (qp->send_task.func)
|
|
|
|
rxe_cleanup_task(&qp->send_task);
|
2016-06-16 16:45:23 +03:00
|
|
|
|
|
|
|
/* flush out any receive wr's or pending requests */
|
2024-03-29 09:55:07 -05:00
|
|
|
rxe_sender(qp);
|
|
|
|
rxe_receiver(qp);
|
2016-06-16 16:45:23 +03:00
|
|
|
|
|
|
|
if (qp->sq.queue)
|
|
|
|
rxe_queue_cleanup(qp->sq.queue);
|
|
|
|
|
|
|
|
if (qp->srq)
|
2022-03-03 18:08:05 -06:00
|
|
|
rxe_put(qp->srq);
|
2016-06-16 16:45:23 +03:00
|
|
|
|
|
|
|
if (qp->rq.queue)
|
|
|
|
rxe_queue_cleanup(qp->rq.queue);
|
|
|
|
|
2022-07-05 18:54:14 -04:00
|
|
|
if (qp->scq) {
|
|
|
|
atomic_dec(&qp->scq->num_wq);
|
2022-03-03 18:08:05 -06:00
|
|
|
rxe_put(qp->scq);
|
2022-07-05 18:54:14 -04:00
|
|
|
}
|
2022-04-20 20:40:40 -05:00
|
|
|
|
2022-07-05 18:54:14 -04:00
|
|
|
if (qp->rcq) {
|
|
|
|
atomic_dec(&qp->rcq->num_wq);
|
2022-03-03 18:08:05 -06:00
|
|
|
rxe_put(qp->rcq);
|
2022-07-05 18:54:14 -04:00
|
|
|
}
|
2022-04-20 20:40:40 -05:00
|
|
|
|
2016-06-16 16:45:23 +03:00
|
|
|
if (qp->pd)
|
2022-03-03 18:08:05 -06:00
|
|
|
rxe_put(qp->pd);
|
2016-06-16 16:45:23 +03:00
|
|
|
|
2022-03-03 18:07:58 -06:00
|
|
|
if (qp->resp.mr)
|
2022-03-03 18:08:05 -06:00
|
|
|
rxe_put(qp->resp.mr);
|
2016-06-16 16:45:23 +03:00
|
|
|
|
|
|
|
free_rd_atomic_resources(qp);
|
|
|
|
|
2022-08-21 21:16:14 -04:00
|
|
|
if (qp->sk) {
|
2022-11-22 23:14:37 +08:00
|
|
|
if (qp_type(qp) == IB_QPT_RC)
|
|
|
|
sk_dst_reset(qp->sk->sk);
|
|
|
|
|
2022-08-21 21:16:14 -04:00
|
|
|
kernel_sock_shutdown(qp->sk, SHUT_RDWR);
|
|
|
|
sock_release(qp->sk);
|
|
|
|
}
|
2016-06-16 16:45:23 +03:00
|
|
|
}
|
2018-01-12 15:11:59 -08:00
|
|
|
|
|
|
|
/* called when the last reference to the qp is dropped */
|
2021-11-03 00:02:31 -05:00
|
|
|
void rxe_qp_cleanup(struct rxe_pool_elem *elem)
|
2018-01-12 15:11:59 -08:00
|
|
|
{
|
2021-11-03 00:02:31 -05:00
|
|
|
struct rxe_qp *qp = container_of(elem, typeof(*qp), elem);
|
2018-01-12 15:11:59 -08:00
|
|
|
|
|
|
|
execute_in_process_context(rxe_qp_do_cleanup, &qp->cleanup_work);
|
|
|
|
}
|