mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-08-21 06:50:25 +00:00
RDMA/hns: Support DSCP
Add support for DSCP configuration. For DSCP, get dscp-prio mapping via hns3 nic driver api .get_dscp_prio() and fill the SL (in WQE for UD or in QPC for RC) with the priority value. The prio-tc mapping is configured to HW by hns3 nic driver. HW will select a corresponding TC according to SL and the prio-tc mapping. Signed-off-by: Junxian Huang <huangjunxian6@hisilicon.com> Link: https://lore.kernel.org/r/20240315093551.1650088-1-huangjunxian6@hisilicon.com Signed-off-by: Leon Romanovsky <leon@kernel.org>
This commit is contained in:
parent
2ca7e93bc9
commit
ee20cc17e9
5 changed files with 115 additions and 29 deletions
|
@ -59,8 +59,10 @@ int hns_roce_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr,
|
||||||
struct hns_roce_dev *hr_dev = to_hr_dev(ibah->device);
|
struct hns_roce_dev *hr_dev = to_hr_dev(ibah->device);
|
||||||
struct hns_roce_ib_create_ah_resp resp = {};
|
struct hns_roce_ib_create_ah_resp resp = {};
|
||||||
struct hns_roce_ah *ah = to_hr_ah(ibah);
|
struct hns_roce_ah *ah = to_hr_ah(ibah);
|
||||||
int ret = 0;
|
u8 tclass = get_tclass(grh);
|
||||||
u32 max_sl;
|
u8 priority = 0;
|
||||||
|
u8 tc_mode = 0;
|
||||||
|
int ret;
|
||||||
|
|
||||||
if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08 && udata)
|
if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08 && udata)
|
||||||
return -EOPNOTSUPP;
|
return -EOPNOTSUPP;
|
||||||
|
@ -74,16 +76,23 @@ int hns_roce_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr,
|
||||||
ah->av.hop_limit = grh->hop_limit;
|
ah->av.hop_limit = grh->hop_limit;
|
||||||
ah->av.flowlabel = grh->flow_label;
|
ah->av.flowlabel = grh->flow_label;
|
||||||
ah->av.udp_sport = get_ah_udp_sport(ah_attr);
|
ah->av.udp_sport = get_ah_udp_sport(ah_attr);
|
||||||
ah->av.tclass = get_tclass(grh);
|
ah->av.tclass = tclass;
|
||||||
|
|
||||||
|
ret = hr_dev->hw->get_dscp(hr_dev, tclass, &tc_mode, &priority);
|
||||||
|
if (ret == -EOPNOTSUPP)
|
||||||
|
ret = 0;
|
||||||
|
|
||||||
|
if (ret && grh->sgid_attr->gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP)
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
if (tc_mode == HNAE3_TC_MAP_MODE_DSCP &&
|
||||||
|
grh->sgid_attr->gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP)
|
||||||
|
ah->av.sl = priority;
|
||||||
|
else
|
||||||
ah->av.sl = rdma_ah_get_sl(ah_attr);
|
ah->av.sl = rdma_ah_get_sl(ah_attr);
|
||||||
max_sl = min_t(u32, MAX_SERVICE_LEVEL, hr_dev->caps.sl_num - 1);
|
|
||||||
if (unlikely(ah->av.sl > max_sl)) {
|
if (!check_sl_valid(hr_dev, ah->av.sl))
|
||||||
ibdev_err_ratelimited(&hr_dev->ib_dev,
|
|
||||||
"failed to set sl, sl (%u) shouldn't be larger than %u.\n",
|
|
||||||
ah->av.sl, max_sl);
|
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
|
||||||
|
|
||||||
memcpy(ah->av.dgid, grh->dgid.raw, HNS_ROCE_GID_SIZE);
|
memcpy(ah->av.dgid, grh->dgid.raw, HNS_ROCE_GID_SIZE);
|
||||||
memcpy(ah->av.mac, ah_attr->roce.dmac, ETH_ALEN);
|
memcpy(ah->av.mac, ah_attr->roce.dmac, ETH_ALEN);
|
||||||
|
@ -99,6 +108,8 @@ int hns_roce_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr,
|
||||||
}
|
}
|
||||||
|
|
||||||
if (udata) {
|
if (udata) {
|
||||||
|
resp.priority = ah->av.sl;
|
||||||
|
resp.tc_mode = tc_mode;
|
||||||
memcpy(resp.dmac, ah_attr->roce.dmac, ETH_ALEN);
|
memcpy(resp.dmac, ah_attr->roce.dmac, ETH_ALEN);
|
||||||
ret = ib_copy_to_udata(udata, &resp,
|
ret = ib_copy_to_udata(udata, &resp,
|
||||||
min(udata->outlen, sizeof(resp)));
|
min(udata->outlen, sizeof(resp)));
|
||||||
|
|
|
@ -645,6 +645,8 @@ struct hns_roce_qp {
|
||||||
struct hns_user_mmap_entry *dwqe_mmap_entry;
|
struct hns_user_mmap_entry *dwqe_mmap_entry;
|
||||||
u32 config;
|
u32 config;
|
||||||
enum hns_roce_cong_type cong_type;
|
enum hns_roce_cong_type cong_type;
|
||||||
|
u8 tc_mode;
|
||||||
|
u8 priority;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct hns_roce_ib_iboe {
|
struct hns_roce_ib_iboe {
|
||||||
|
@ -950,6 +952,8 @@ struct hns_roce_hw {
|
||||||
int (*query_sccc)(struct hns_roce_dev *hr_dev, u32 qpn, void *buffer);
|
int (*query_sccc)(struct hns_roce_dev *hr_dev, u32 qpn, void *buffer);
|
||||||
int (*query_hw_counter)(struct hns_roce_dev *hr_dev,
|
int (*query_hw_counter)(struct hns_roce_dev *hr_dev,
|
||||||
u64 *stats, u32 port, int *hw_counters);
|
u64 *stats, u32 port, int *hw_counters);
|
||||||
|
int (*get_dscp)(struct hns_roce_dev *hr_dev, u8 dscp,
|
||||||
|
u8 *tc_mode, u8 *priority);
|
||||||
const struct ib_device_ops *hns_roce_dev_ops;
|
const struct ib_device_ops *hns_roce_dev_ops;
|
||||||
const struct ib_device_ops *hns_roce_dev_srq_ops;
|
const struct ib_device_ops *hns_roce_dev_srq_ops;
|
||||||
};
|
};
|
||||||
|
@ -1292,4 +1296,6 @@ struct hns_user_mmap_entry *
|
||||||
hns_roce_user_mmap_entry_insert(struct ib_ucontext *ucontext, u64 address,
|
hns_roce_user_mmap_entry_insert(struct ib_ucontext *ucontext, u64 address,
|
||||||
size_t length,
|
size_t length,
|
||||||
enum hns_roce_mmap_type mmap_type);
|
enum hns_roce_mmap_type mmap_type);
|
||||||
|
bool check_sl_valid(struct hns_roce_dev *hr_dev, u8 sl);
|
||||||
|
|
||||||
#endif /* _HNS_ROCE_DEVICE_H */
|
#endif /* _HNS_ROCE_DEVICE_H */
|
||||||
|
|
|
@ -443,10 +443,6 @@ static int fill_ud_av(struct hns_roce_v2_ud_send_wqe *ud_sq_wqe,
|
||||||
hr_reg_write(ud_sq_wqe, UD_SEND_WQE_HOPLIMIT, ah->av.hop_limit);
|
hr_reg_write(ud_sq_wqe, UD_SEND_WQE_HOPLIMIT, ah->av.hop_limit);
|
||||||
hr_reg_write(ud_sq_wqe, UD_SEND_WQE_TCLASS, ah->av.tclass);
|
hr_reg_write(ud_sq_wqe, UD_SEND_WQE_TCLASS, ah->av.tclass);
|
||||||
hr_reg_write(ud_sq_wqe, UD_SEND_WQE_FLOW_LABEL, ah->av.flowlabel);
|
hr_reg_write(ud_sq_wqe, UD_SEND_WQE_FLOW_LABEL, ah->av.flowlabel);
|
||||||
|
|
||||||
if (WARN_ON(ah->av.sl > MAX_SERVICE_LEVEL))
|
|
||||||
return -EINVAL;
|
|
||||||
|
|
||||||
hr_reg_write(ud_sq_wqe, UD_SEND_WQE_SL, ah->av.sl);
|
hr_reg_write(ud_sq_wqe, UD_SEND_WQE_SL, ah->av.sl);
|
||||||
|
|
||||||
ud_sq_wqe->sgid_index = ah->av.gid_index;
|
ud_sq_wqe->sgid_index = ah->av.gid_index;
|
||||||
|
@ -4828,6 +4824,69 @@ static int fill_cong_field(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int hns_roce_hw_v2_get_dscp(struct hns_roce_dev *hr_dev, u8 dscp,
|
||||||
|
u8 *tc_mode, u8 *priority)
|
||||||
|
{
|
||||||
|
struct hns_roce_v2_priv *priv = hr_dev->priv;
|
||||||
|
struct hnae3_handle *handle = priv->handle;
|
||||||
|
const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
|
||||||
|
|
||||||
|
if (!ops->get_dscp_prio)
|
||||||
|
return -EOPNOTSUPP;
|
||||||
|
|
||||||
|
return ops->get_dscp_prio(handle, dscp, tc_mode, priority);
|
||||||
|
}
|
||||||
|
|
||||||
|
bool check_sl_valid(struct hns_roce_dev *hr_dev, u8 sl)
|
||||||
|
{
|
||||||
|
u32 max_sl;
|
||||||
|
|
||||||
|
max_sl = min_t(u32, MAX_SERVICE_LEVEL, hr_dev->caps.sl_num - 1);
|
||||||
|
if (unlikely(sl > max_sl)) {
|
||||||
|
ibdev_err_ratelimited(&hr_dev->ib_dev,
|
||||||
|
"failed to set SL(%u). Shouldn't be larger than %u.\n",
|
||||||
|
sl, max_sl);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int hns_roce_set_sl(struct ib_qp *ibqp,
|
||||||
|
const struct ib_qp_attr *attr,
|
||||||
|
struct hns_roce_v2_qp_context *context,
|
||||||
|
struct hns_roce_v2_qp_context *qpc_mask)
|
||||||
|
{
|
||||||
|
const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
|
||||||
|
struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
|
||||||
|
struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
|
||||||
|
struct ib_device *ibdev = &hr_dev->ib_dev;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
ret = hns_roce_hw_v2_get_dscp(hr_dev, get_tclass(&attr->ah_attr.grh),
|
||||||
|
&hr_qp->tc_mode, &hr_qp->priority);
|
||||||
|
if (ret && ret != -EOPNOTSUPP &&
|
||||||
|
grh->sgid_attr->gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) {
|
||||||
|
ibdev_err_ratelimited(ibdev,
|
||||||
|
"failed to get dscp, ret = %d.\n", ret);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (hr_qp->tc_mode == HNAE3_TC_MAP_MODE_DSCP &&
|
||||||
|
grh->sgid_attr->gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP)
|
||||||
|
hr_qp->sl = hr_qp->priority;
|
||||||
|
else
|
||||||
|
hr_qp->sl = rdma_ah_get_sl(&attr->ah_attr);
|
||||||
|
|
||||||
|
if (!check_sl_valid(hr_dev, hr_qp->sl))
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
hr_reg_write(context, QPC_SL, hr_qp->sl);
|
||||||
|
hr_reg_clear(qpc_mask, QPC_SL);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
static int hns_roce_v2_set_path(struct ib_qp *ibqp,
|
static int hns_roce_v2_set_path(struct ib_qp *ibqp,
|
||||||
const struct ib_qp_attr *attr,
|
const struct ib_qp_attr *attr,
|
||||||
int attr_mask,
|
int attr_mask,
|
||||||
|
@ -4843,25 +4902,18 @@ static int hns_roce_v2_set_path(struct ib_qp *ibqp,
|
||||||
int is_roce_protocol;
|
int is_roce_protocol;
|
||||||
u16 vlan_id = 0xffff;
|
u16 vlan_id = 0xffff;
|
||||||
bool is_udp = false;
|
bool is_udp = false;
|
||||||
u32 max_sl;
|
|
||||||
u8 ib_port;
|
u8 ib_port;
|
||||||
u8 hr_port;
|
u8 hr_port;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
max_sl = min_t(u32, MAX_SERVICE_LEVEL, hr_dev->caps.sl_num - 1);
|
|
||||||
if (unlikely(sl > max_sl)) {
|
|
||||||
ibdev_err_ratelimited(ibdev,
|
|
||||||
"failed to fill QPC, sl (%u) shouldn't be larger than %u.\n",
|
|
||||||
sl, max_sl);
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If free_mr_en of qp is set, it means that this qp comes from
|
* If free_mr_en of qp is set, it means that this qp comes from
|
||||||
* free mr. This qp will perform the loopback operation.
|
* free mr. This qp will perform the loopback operation.
|
||||||
* In the loopback scenario, only sl needs to be set.
|
* In the loopback scenario, only sl needs to be set.
|
||||||
*/
|
*/
|
||||||
if (hr_qp->free_mr_en) {
|
if (hr_qp->free_mr_en) {
|
||||||
|
if (!check_sl_valid(hr_dev, sl))
|
||||||
|
return -EINVAL;
|
||||||
hr_reg_write(context, QPC_SL, sl);
|
hr_reg_write(context, QPC_SL, sl);
|
||||||
hr_reg_clear(qpc_mask, QPC_SL);
|
hr_reg_clear(qpc_mask, QPC_SL);
|
||||||
hr_qp->sl = sl;
|
hr_qp->sl = sl;
|
||||||
|
@ -4931,11 +4983,7 @@ static int hns_roce_v2_set_path(struct ib_qp *ibqp,
|
||||||
memcpy(context->dgid, grh->dgid.raw, sizeof(grh->dgid.raw));
|
memcpy(context->dgid, grh->dgid.raw, sizeof(grh->dgid.raw));
|
||||||
memset(qpc_mask->dgid, 0, sizeof(grh->dgid.raw));
|
memset(qpc_mask->dgid, 0, sizeof(grh->dgid.raw));
|
||||||
|
|
||||||
hr_qp->sl = sl;
|
return hns_roce_set_sl(ibqp, attr, context, qpc_mask);
|
||||||
hr_reg_write(context, QPC_SL, hr_qp->sl);
|
|
||||||
hr_reg_clear(qpc_mask, QPC_SL);
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool check_qp_state(enum ib_qp_state cur_state,
|
static bool check_qp_state(enum ib_qp_state cur_state,
|
||||||
|
@ -6735,6 +6783,7 @@ static const struct hns_roce_hw hns_roce_hw_v2 = {
|
||||||
.query_srqc = hns_roce_v2_query_srqc,
|
.query_srqc = hns_roce_v2_query_srqc,
|
||||||
.query_sccc = hns_roce_v2_query_sccc,
|
.query_sccc = hns_roce_v2_query_sccc,
|
||||||
.query_hw_counter = hns_roce_hw_v2_query_counter,
|
.query_hw_counter = hns_roce_hw_v2_query_counter,
|
||||||
|
.get_dscp = hns_roce_hw_v2_get_dscp,
|
||||||
.hns_roce_dev_ops = &hns_roce_v2_dev_ops,
|
.hns_roce_dev_ops = &hns_roce_v2_dev_ops,
|
||||||
.hns_roce_dev_srq_ops = &hns_roce_v2_dev_srq_ops,
|
.hns_roce_dev_srq_ops = &hns_roce_v2_dev_srq_ops,
|
||||||
};
|
};
|
||||||
|
|
|
@ -1386,6 +1386,7 @@ int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
|
||||||
int attr_mask, struct ib_udata *udata)
|
int attr_mask, struct ib_udata *udata)
|
||||||
{
|
{
|
||||||
struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
|
struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
|
||||||
|
struct hns_roce_ib_modify_qp_resp resp = {};
|
||||||
struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
|
struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
|
||||||
enum ib_qp_state cur_state, new_state;
|
enum ib_qp_state cur_state, new_state;
|
||||||
int ret = -EINVAL;
|
int ret = -EINVAL;
|
||||||
|
@ -1427,6 +1428,18 @@ int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
|
||||||
|
|
||||||
ret = hr_dev->hw->modify_qp(ibqp, attr, attr_mask, cur_state,
|
ret = hr_dev->hw->modify_qp(ibqp, attr, attr_mask, cur_state,
|
||||||
new_state, udata);
|
new_state, udata);
|
||||||
|
if (ret)
|
||||||
|
goto out;
|
||||||
|
|
||||||
|
if (udata && udata->outlen) {
|
||||||
|
resp.tc_mode = hr_qp->tc_mode;
|
||||||
|
resp.priority = hr_qp->sl;
|
||||||
|
ret = ib_copy_to_udata(udata, &resp,
|
||||||
|
min(udata->outlen, sizeof(resp)));
|
||||||
|
if (ret)
|
||||||
|
ibdev_err_ratelimited(&hr_dev->ib_dev,
|
||||||
|
"failed to copy modify qp resp.\n");
|
||||||
|
}
|
||||||
|
|
||||||
out:
|
out:
|
||||||
mutex_unlock(&hr_qp->mutex);
|
mutex_unlock(&hr_qp->mutex);
|
||||||
|
|
|
@ -109,6 +109,12 @@ struct hns_roce_ib_create_qp_resp {
|
||||||
__aligned_u64 dwqe_mmap_key;
|
__aligned_u64 dwqe_mmap_key;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
struct hns_roce_ib_modify_qp_resp {
|
||||||
|
__u8 tc_mode;
|
||||||
|
__u8 priority;
|
||||||
|
__u8 reserved[6];
|
||||||
|
};
|
||||||
|
|
||||||
enum {
|
enum {
|
||||||
HNS_ROCE_EXSGE_FLAGS = 1 << 0,
|
HNS_ROCE_EXSGE_FLAGS = 1 << 0,
|
||||||
HNS_ROCE_RQ_INLINE_FLAGS = 1 << 1,
|
HNS_ROCE_RQ_INLINE_FLAGS = 1 << 1,
|
||||||
|
@ -143,7 +149,8 @@ struct hns_roce_ib_alloc_pd_resp {
|
||||||
|
|
||||||
struct hns_roce_ib_create_ah_resp {
|
struct hns_roce_ib_create_ah_resp {
|
||||||
__u8 dmac[6];
|
__u8 dmac[6];
|
||||||
__u8 reserved[2];
|
__u8 priority;
|
||||||
|
__u8 tc_mode;
|
||||||
};
|
};
|
||||||
|
|
||||||
#endif /* HNS_ABI_USER_H */
|
#endif /* HNS_ABI_USER_H */
|
||||||
|
|
Loading…
Add table
Reference in a new issue