mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-08-05 16:54:27 +00:00
RDMA/bnxt_re: Refurbish CQ to NQ hash calculation
There are few use cases where CQ create and destroy is seen before re-creating the CQ, this kind of use case is disturbing the RR distribution and all the active CQ getting mapped to only 2 NQ alternatively. Fixing the CQ to NQ hash calculation by implementing a quick load sorting mechanism under a mutex. Using this, if the CQ was allocated and destroyed before using it, the nq selecting algorithm still obtains the least loaded CQ. Thus balancing the load on NQs. Signed-off-by: Selvin Xavier <selvin.xavier@broadcom.com> Signed-off-by: Kalesh AP <kalesh-anakkur.purayil@broadcom.com> Link: https://patch.msgid.link/1731577748-1804-4-git-send-email-selvin.xavier@broadcom.com Signed-off-by: Leon Romanovsky <leon@kernel.org>
This commit is contained in:
parent
30b871338c
commit
cb97b377a1
5 changed files with 32 additions and 11 deletions
|
@ -159,6 +159,8 @@ struct bnxt_re_pacing {
|
|||
struct bnxt_re_nq_record {
|
||||
struct bnxt_qplib_nq nq[BNXT_RE_MAX_MSIX];
|
||||
int num_msix;
|
||||
/* serialize NQ access */
|
||||
struct mutex load_lock;
|
||||
};
|
||||
|
||||
#define MAX_CQ_HASH_BITS (16)
|
||||
|
|
|
@ -3029,6 +3029,28 @@ int bnxt_re_post_recv(struct ib_qp *ib_qp, const struct ib_recv_wr *wr,
|
|||
return rc;
|
||||
}
|
||||
|
||||
static struct bnxt_qplib_nq *bnxt_re_get_nq(struct bnxt_re_dev *rdev)
|
||||
{
|
||||
int min, indx;
|
||||
|
||||
mutex_lock(&rdev->nqr->load_lock);
|
||||
for (indx = 0, min = 0; indx < (rdev->nqr->num_msix - 1); indx++) {
|
||||
if (rdev->nqr->nq[min].load > rdev->nqr->nq[indx].load)
|
||||
min = indx;
|
||||
}
|
||||
rdev->nqr->nq[min].load++;
|
||||
mutex_unlock(&rdev->nqr->load_lock);
|
||||
|
||||
return &rdev->nqr->nq[min];
|
||||
}
|
||||
|
||||
static void bnxt_re_put_nq(struct bnxt_re_dev *rdev, struct bnxt_qplib_nq *nq)
|
||||
{
|
||||
mutex_lock(&rdev->nqr->load_lock);
|
||||
nq->load--;
|
||||
mutex_unlock(&rdev->nqr->load_lock);
|
||||
}
|
||||
|
||||
/* Completion Queues */
|
||||
int bnxt_re_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
|
||||
{
|
||||
|
@ -3047,6 +3069,8 @@ int bnxt_re_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
|
|||
hash_del(&cq->hash_entry);
|
||||
}
|
||||
bnxt_qplib_destroy_cq(&rdev->qplib_res, &cq->qplib_cq);
|
||||
|
||||
bnxt_re_put_nq(rdev, nq);
|
||||
ib_umem_release(cq->umem);
|
||||
|
||||
atomic_dec(&rdev->stats.res.cq_count);
|
||||
|
@ -3065,8 +3089,6 @@ int bnxt_re_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
|
|||
rdma_udata_to_drv_context(udata, struct bnxt_re_ucontext, ib_uctx);
|
||||
struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
|
||||
struct bnxt_qplib_chip_ctx *cctx;
|
||||
struct bnxt_qplib_nq *nq = NULL;
|
||||
unsigned int nq_alloc_cnt;
|
||||
int cqe = attr->cqe;
|
||||
int rc, entries;
|
||||
u32 active_cqs;
|
||||
|
@ -3117,16 +3139,10 @@ int bnxt_re_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
|
|||
|
||||
cq->qplib_cq.dpi = &rdev->dpi_privileged;
|
||||
}
|
||||
/*
|
||||
* Allocating the NQ in a round robin fashion. nq_alloc_cnt is a
|
||||
* used for getting the NQ index.
|
||||
*/
|
||||
nq_alloc_cnt = atomic_inc_return(&rdev->nq_alloc_cnt);
|
||||
nq = &rdev->nqr->nq[nq_alloc_cnt % (rdev->nqr->num_msix - 1)];
|
||||
cq->qplib_cq.max_wqe = entries;
|
||||
cq->qplib_cq.cnq_hw_ring_id = nq->ring_id;
|
||||
cq->qplib_cq.nq = nq;
|
||||
cq->qplib_cq.coalescing = &rdev->cq_coalescing;
|
||||
cq->qplib_cq.nq = bnxt_re_get_nq(rdev);
|
||||
cq->qplib_cq.cnq_hw_ring_id = cq->qplib_cq.nq->ring_id;
|
||||
|
||||
rc = bnxt_qplib_create_cq(&rdev->qplib_res, &cq->qplib_cq);
|
||||
if (rc) {
|
||||
|
@ -3136,7 +3152,6 @@ int bnxt_re_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
|
|||
|
||||
cq->ib_cq.cqe = entries;
|
||||
cq->cq_period = cq->qplib_cq.period;
|
||||
nq->budget++;
|
||||
|
||||
active_cqs = atomic_inc_return(&rdev->stats.res.cq_count);
|
||||
if (active_cqs > rdev->stats.res.cq_watermark)
|
||||
|
|
|
@ -1566,6 +1566,8 @@ static int bnxt_re_init_res(struct bnxt_re_dev *rdev)
|
|||
|
||||
bnxt_qplib_init_res(&rdev->qplib_res);
|
||||
|
||||
mutex_init(&rdev->nqr->load_lock);
|
||||
|
||||
for (i = 1; i < rdev->nqr->num_msix ; i++) {
|
||||
db_offt = rdev->en_dev->msix_entries[i].db_offset;
|
||||
rc = bnxt_qplib_enable_nq(rdev->en_dev->pdev, &rdev->nqr->nq[i - 1],
|
||||
|
|
|
@ -551,6 +551,7 @@ int bnxt_qplib_enable_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq,
|
|||
nq->pdev = pdev;
|
||||
nq->cqn_handler = cqn_handler;
|
||||
nq->srqn_handler = srqn_handler;
|
||||
nq->load = 0;
|
||||
|
||||
/* Have a task to schedule CQ notifiers in post send case */
|
||||
nq->cqn_wq = create_singlethread_workqueue("bnxt_qplib_nq");
|
||||
|
|
|
@ -519,6 +519,7 @@ struct bnxt_qplib_nq {
|
|||
struct tasklet_struct nq_tasklet;
|
||||
bool requested;
|
||||
int budget;
|
||||
u32 load;
|
||||
|
||||
cqn_handler_t cqn_handler;
|
||||
srqn_handler_t srqn_handler;
|
||||
|
|
Loading…
Add table
Reference in a new issue