mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-08-05 00:34:52 +00:00
RDMA/mlx5: Attach ndescs to mlx5_ib_mkey
Generalize the use of ndescs by adding it to mlx5_ib_mkey. Signed-off-by: Aharon Landau <aharonl@nvidia.com> Reviewed-by: Shay Drory <shayd@nvidia.com> Acked-by: Michael S. Tsirkin <mst@redhat.com> Signed-off-by: Leon Romanovsky <leonro@nvidia.com>
This commit is contained in:
parent
4123bfb0b2
commit
ae0579acde
6 changed files with 26 additions and 52 deletions
|
@ -1292,18 +1292,16 @@ static int devx_handle_mkey_indirect(struct devx_obj *obj,
|
|||
struct mlx5_ib_dev *dev,
|
||||
void *in, void *out)
|
||||
{
|
||||
struct mlx5_ib_devx_mr *devx_mr = &obj->devx_mr;
|
||||
struct mlx5_ib_mkey *mkey;
|
||||
struct mlx5_ib_mkey *mkey = &obj->mkey;
|
||||
void *mkc;
|
||||
u8 key;
|
||||
|
||||
mkey = &devx_mr->mmkey;
|
||||
mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
|
||||
key = MLX5_GET(mkc, mkc, mkey_7_0);
|
||||
mkey->key = mlx5_idx_to_mkey(
|
||||
MLX5_GET(create_mkey_out, out, mkey_index)) | key;
|
||||
mkey->type = MLX5_MKEY_INDIRECT_DEVX;
|
||||
devx_mr->ndescs = MLX5_GET(mkc, mkc, translations_octword_size);
|
||||
mkey->ndescs = MLX5_GET(mkc, mkc, translations_octword_size);
|
||||
init_waitqueue_head(&mkey->wait);
|
||||
|
||||
return mlx5r_store_odp_mkey(dev, mkey);
|
||||
|
@ -1381,13 +1379,13 @@ static int devx_obj_cleanup(struct ib_uobject *uobject,
|
|||
dev = mlx5_udata_to_mdev(&attrs->driver_udata);
|
||||
if (obj->flags & DEVX_OBJ_FLAGS_INDIRECT_MKEY &&
|
||||
xa_erase(&obj->ib_dev->odp_mkeys,
|
||||
mlx5_base_mkey(obj->devx_mr.mmkey.key)))
|
||||
mlx5_base_mkey(obj->mkey.key)))
|
||||
/*
|
||||
* The pagefault_single_data_segment() does commands against
|
||||
* the mmkey, we must wait for that to stop before freeing the
|
||||
* mkey, as another allocation could get the same mkey #.
|
||||
*/
|
||||
mlx5r_deref_wait_odp_mkey(&obj->devx_mr.mmkey);
|
||||
mlx5r_deref_wait_odp_mkey(&obj->mkey);
|
||||
|
||||
if (obj->flags & DEVX_OBJ_FLAGS_DCT)
|
||||
ret = mlx5_core_destroy_dct(obj->ib_dev, &obj->core_dct);
|
||||
|
|
|
@ -16,7 +16,7 @@ struct devx_obj {
|
|||
u32 dinbox[MLX5_MAX_DESTROY_INBOX_SIZE_DW];
|
||||
u32 flags;
|
||||
union {
|
||||
struct mlx5_ib_devx_mr devx_mr;
|
||||
struct mlx5_ib_mkey mkey;
|
||||
struct mlx5_core_dct core_dct;
|
||||
struct mlx5_core_cq core_cq;
|
||||
u32 flow_counter_bulk_size;
|
||||
|
|
|
@ -628,6 +628,7 @@ enum mlx5_mkey_type {
|
|||
struct mlx5_ib_mkey {
|
||||
u32 key;
|
||||
enum mlx5_mkey_type type;
|
||||
unsigned int ndescs;
|
||||
struct wait_queue_head wait;
|
||||
refcount_t usecount;
|
||||
};
|
||||
|
@ -672,7 +673,6 @@ struct mlx5_ib_mr {
|
|||
void *descs_alloc;
|
||||
dma_addr_t desc_map;
|
||||
int max_descs;
|
||||
int ndescs;
|
||||
int desc_size;
|
||||
int access_mode;
|
||||
|
||||
|
@ -727,12 +727,6 @@ static inline bool is_dmabuf_mr(struct mlx5_ib_mr *mr)
|
|||
struct mlx5_ib_mw {
|
||||
struct ib_mw ibmw;
|
||||
struct mlx5_ib_mkey mmkey;
|
||||
int ndescs;
|
||||
};
|
||||
|
||||
struct mlx5_ib_devx_mr {
|
||||
struct mlx5_ib_mkey mmkey;
|
||||
int ndescs;
|
||||
};
|
||||
|
||||
struct mlx5_ib_umr_context {
|
||||
|
|
|
@ -2264,9 +2264,9 @@ int mlx5_ib_alloc_mw(struct ib_mw *ibmw, struct ib_udata *udata)
|
|||
struct mlx5_ib_dev *dev = to_mdev(ibmw->device);
|
||||
int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
|
||||
struct mlx5_ib_mw *mw = to_mmw(ibmw);
|
||||
unsigned int ndescs;
|
||||
u32 *in = NULL;
|
||||
void *mkc;
|
||||
int ndescs;
|
||||
int err;
|
||||
struct mlx5_ib_alloc_mw req = {};
|
||||
struct {
|
||||
|
@ -2311,7 +2311,7 @@ int mlx5_ib_alloc_mw(struct ib_mw *ibmw, struct ib_udata *udata)
|
|||
|
||||
mw->mmkey.type = MLX5_MKEY_MW;
|
||||
ibmw->rkey = mw->mmkey.key;
|
||||
mw->ndescs = ndescs;
|
||||
mw->mmkey.ndescs = ndescs;
|
||||
|
||||
resp.response_length =
|
||||
min(offsetofend(typeof(resp), response_length), udata->outlen);
|
||||
|
@ -2407,7 +2407,7 @@ mlx5_ib_map_pa_mr_sg_pi(struct ib_mr *ibmr, struct scatterlist *data_sg,
|
|||
mr->meta_length = 0;
|
||||
if (data_sg_nents == 1) {
|
||||
n++;
|
||||
mr->ndescs = 1;
|
||||
mr->mmkey.ndescs = 1;
|
||||
if (data_sg_offset)
|
||||
sg_offset = *data_sg_offset;
|
||||
mr->data_length = sg_dma_len(data_sg) - sg_offset;
|
||||
|
@ -2460,7 +2460,7 @@ mlx5_ib_sg_to_klms(struct mlx5_ib_mr *mr,
|
|||
if (sg_offset_p)
|
||||
*sg_offset_p = sg_offset;
|
||||
|
||||
mr->ndescs = i;
|
||||
mr->mmkey.ndescs = i;
|
||||
mr->data_length = mr->ibmr.length;
|
||||
|
||||
if (meta_sg_nents) {
|
||||
|
@ -2493,11 +2493,11 @@ static int mlx5_set_page(struct ib_mr *ibmr, u64 addr)
|
|||
struct mlx5_ib_mr *mr = to_mmr(ibmr);
|
||||
__be64 *descs;
|
||||
|
||||
if (unlikely(mr->ndescs == mr->max_descs))
|
||||
if (unlikely(mr->mmkey.ndescs == mr->max_descs))
|
||||
return -ENOMEM;
|
||||
|
||||
descs = mr->descs;
|
||||
descs[mr->ndescs++] = cpu_to_be64(addr | MLX5_EN_RD | MLX5_EN_WR);
|
||||
descs[mr->mmkey.ndescs++] = cpu_to_be64(addr | MLX5_EN_RD | MLX5_EN_WR);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -2507,11 +2507,11 @@ static int mlx5_set_page_pi(struct ib_mr *ibmr, u64 addr)
|
|||
struct mlx5_ib_mr *mr = to_mmr(ibmr);
|
||||
__be64 *descs;
|
||||
|
||||
if (unlikely(mr->ndescs + mr->meta_ndescs == mr->max_descs))
|
||||
if (unlikely(mr->mmkey.ndescs + mr->meta_ndescs == mr->max_descs))
|
||||
return -ENOMEM;
|
||||
|
||||
descs = mr->descs;
|
||||
descs[mr->ndescs + mr->meta_ndescs++] =
|
||||
descs[mr->mmkey.ndescs + mr->meta_ndescs++] =
|
||||
cpu_to_be64(addr | MLX5_EN_RD | MLX5_EN_WR);
|
||||
|
||||
return 0;
|
||||
|
@ -2527,7 +2527,7 @@ mlx5_ib_map_mtt_mr_sg_pi(struct ib_mr *ibmr, struct scatterlist *data_sg,
|
|||
struct mlx5_ib_mr *pi_mr = mr->mtt_mr;
|
||||
int n;
|
||||
|
||||
pi_mr->ndescs = 0;
|
||||
pi_mr->mmkey.ndescs = 0;
|
||||
pi_mr->meta_ndescs = 0;
|
||||
pi_mr->meta_length = 0;
|
||||
|
||||
|
@ -2561,7 +2561,7 @@ mlx5_ib_map_mtt_mr_sg_pi(struct ib_mr *ibmr, struct scatterlist *data_sg,
|
|||
* metadata offset at the first metadata page
|
||||
*/
|
||||
pi_mr->pi_iova = (iova & page_mask) +
|
||||
pi_mr->ndescs * ibmr->page_size +
|
||||
pi_mr->mmkey.ndescs * ibmr->page_size +
|
||||
(pi_mr->ibmr.iova & ~page_mask);
|
||||
/*
|
||||
* In order to use one MTT MR for data and metadata, we register
|
||||
|
@ -2592,7 +2592,7 @@ mlx5_ib_map_klm_mr_sg_pi(struct ib_mr *ibmr, struct scatterlist *data_sg,
|
|||
struct mlx5_ib_mr *pi_mr = mr->klm_mr;
|
||||
int n;
|
||||
|
||||
pi_mr->ndescs = 0;
|
||||
pi_mr->mmkey.ndescs = 0;
|
||||
pi_mr->meta_ndescs = 0;
|
||||
pi_mr->meta_length = 0;
|
||||
|
||||
|
@ -2627,7 +2627,7 @@ int mlx5_ib_map_mr_sg_pi(struct ib_mr *ibmr, struct scatterlist *data_sg,
|
|||
|
||||
WARN_ON(ibmr->type != IB_MR_TYPE_INTEGRITY);
|
||||
|
||||
mr->ndescs = 0;
|
||||
mr->mmkey.ndescs = 0;
|
||||
mr->data_length = 0;
|
||||
mr->data_iova = 0;
|
||||
mr->meta_ndescs = 0;
|
||||
|
@ -2683,7 +2683,7 @@ int mlx5_ib_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
|
|||
struct mlx5_ib_mr *mr = to_mmr(ibmr);
|
||||
int n;
|
||||
|
||||
mr->ndescs = 0;
|
||||
mr->mmkey.ndescs = 0;
|
||||
|
||||
ib_dma_sync_single_for_cpu(ibmr->device, mr->desc_map,
|
||||
mr->desc_size * mr->max_descs,
|
||||
|
|
|
@ -797,21 +797,6 @@ static bool mkey_is_eq(struct mlx5_ib_mkey *mmkey, u32 key)
|
|||
return mmkey->key == key;
|
||||
}
|
||||
|
||||
static int get_indirect_num_descs(struct mlx5_ib_mkey *mmkey)
|
||||
{
|
||||
struct mlx5_ib_mw *mw;
|
||||
struct mlx5_ib_devx_mr *devx_mr;
|
||||
|
||||
if (mmkey->type == MLX5_MKEY_MW) {
|
||||
mw = container_of(mmkey, struct mlx5_ib_mw, mmkey);
|
||||
return mw->ndescs;
|
||||
}
|
||||
|
||||
devx_mr = container_of(mmkey, struct mlx5_ib_devx_mr,
|
||||
mmkey);
|
||||
return devx_mr->ndescs;
|
||||
}
|
||||
|
||||
/*
|
||||
* Handle a single data segment in a page-fault WQE or RDMA region.
|
||||
*
|
||||
|
@ -836,7 +821,6 @@ static int pagefault_single_data_segment(struct mlx5_ib_dev *dev,
|
|||
struct mlx5_klm *pklm;
|
||||
u32 *out = NULL;
|
||||
size_t offset;
|
||||
int ndescs;
|
||||
|
||||
io_virt += *bytes_committed;
|
||||
bcnt -= *bytes_committed;
|
||||
|
@ -885,8 +869,6 @@ next_mr:
|
|||
|
||||
case MLX5_MKEY_MW:
|
||||
case MLX5_MKEY_INDIRECT_DEVX:
|
||||
ndescs = get_indirect_num_descs(mmkey);
|
||||
|
||||
if (depth >= MLX5_CAP_GEN(dev->mdev, max_indirection)) {
|
||||
mlx5_ib_dbg(dev, "indirection level exceeded\n");
|
||||
ret = -EFAULT;
|
||||
|
@ -894,7 +876,7 @@ next_mr:
|
|||
}
|
||||
|
||||
outlen = MLX5_ST_SZ_BYTES(query_mkey_out) +
|
||||
sizeof(*pklm) * (ndescs - 2);
|
||||
sizeof(*pklm) * (mmkey->ndescs - 2);
|
||||
|
||||
if (outlen > cur_outlen) {
|
||||
kfree(out);
|
||||
|
@ -916,7 +898,7 @@ next_mr:
|
|||
offset = io_virt - MLX5_GET64(query_mkey_out, out,
|
||||
memory_key_mkey_entry.start_addr);
|
||||
|
||||
for (i = 0; bcnt && i < ndescs; i++, pklm++) {
|
||||
for (i = 0; bcnt && i < mmkey->ndescs; i++, pklm++) {
|
||||
if (offset >= be32_to_cpu(pklm->bcount)) {
|
||||
offset -= be32_to_cpu(pklm->bcount);
|
||||
continue;
|
||||
|
|
|
@ -217,7 +217,7 @@ static __be64 sig_mkey_mask(void)
|
|||
static void set_reg_umr_seg(struct mlx5_wqe_umr_ctrl_seg *umr,
|
||||
struct mlx5_ib_mr *mr, u8 flags, bool atomic)
|
||||
{
|
||||
int size = (mr->ndescs + mr->meta_ndescs) * mr->desc_size;
|
||||
int size = (mr->mmkey.ndescs + mr->meta_ndescs) * mr->desc_size;
|
||||
|
||||
memset(umr, 0, sizeof(*umr));
|
||||
|
||||
|
@ -374,7 +374,7 @@ static void set_reg_mkey_seg(struct mlx5_mkey_seg *seg,
|
|||
struct mlx5_ib_mr *mr,
|
||||
u32 key, int access)
|
||||
{
|
||||
int ndescs = ALIGN(mr->ndescs + mr->meta_ndescs, 8) >> 1;
|
||||
int ndescs = ALIGN(mr->mmkey.ndescs + mr->meta_ndescs, 8) >> 1;
|
||||
|
||||
memset(seg, 0, sizeof(*seg));
|
||||
|
||||
|
@ -439,7 +439,7 @@ static void set_reg_data_seg(struct mlx5_wqe_data_seg *dseg,
|
|||
struct mlx5_ib_mr *mr,
|
||||
struct mlx5_ib_pd *pd)
|
||||
{
|
||||
int bcount = mr->desc_size * (mr->ndescs + mr->meta_ndescs);
|
||||
int bcount = mr->desc_size * (mr->mmkey.ndescs + mr->meta_ndescs);
|
||||
|
||||
dseg->addr = cpu_to_be64(mr->desc_map);
|
||||
dseg->byte_count = cpu_to_be32(ALIGN(bcount, 64));
|
||||
|
@ -861,7 +861,7 @@ static int set_reg_wr(struct mlx5_ib_qp *qp,
|
|||
struct mlx5_ib_mr *mr = to_mmr(wr->mr);
|
||||
struct mlx5_ib_pd *pd = to_mpd(qp->ibqp.pd);
|
||||
struct mlx5_ib_dev *dev = to_mdev(pd->ibpd.device);
|
||||
int mr_list_size = (mr->ndescs + mr->meta_ndescs) * mr->desc_size;
|
||||
int mr_list_size = (mr->mmkey.ndescs + mr->meta_ndescs) * mr->desc_size;
|
||||
bool umr_inline = mr_list_size <= MLX5_IB_SQ_UMR_INLINE_THRESHOLD;
|
||||
bool atomic = wr->access & IB_ACCESS_REMOTE_ATOMIC;
|
||||
u8 flags = 0;
|
||||
|
@ -1111,7 +1111,7 @@ static int handle_reg_mr_integrity(struct mlx5_ib_dev *dev,
|
|||
memset(&pa_pi_mr, 0, sizeof(struct mlx5_ib_mr));
|
||||
/* No UMR, use local_dma_lkey */
|
||||
pa_pi_mr.ibmr.lkey = mr->ibmr.pd->local_dma_lkey;
|
||||
pa_pi_mr.ndescs = mr->ndescs;
|
||||
pa_pi_mr.mmkey.ndescs = mr->mmkey.ndescs;
|
||||
pa_pi_mr.data_length = mr->data_length;
|
||||
pa_pi_mr.data_iova = mr->data_iova;
|
||||
if (mr->meta_ndescs) {
|
||||
|
|
Loading…
Add table
Reference in a new issue