mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-11-17 12:14:43 +00:00
Revert "IB/mlx5: Fix long EEH recover time with NVMe offloads"
Longer term testing shows this patch didn't play well with MR cache and
caused to call traces during remove_mkeys().
This reverts commit bb7e22a8ab.
Signed-off-by: Leon Romanovsky <leonro@mellanox.com>
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
This commit is contained in:
parent
7422edce73
commit
ccffa54548
1 changed files with 3 additions and 16 deletions
|
|
@ -73,8 +73,7 @@ static int destroy_mkey(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
|
||||||
|
|
||||||
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
|
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
|
||||||
/* Wait until all page fault handlers using the mr complete. */
|
/* Wait until all page fault handlers using the mr complete. */
|
||||||
if (mr->umem && mr->umem->is_odp)
|
synchronize_srcu(&dev->mr_srcu);
|
||||||
synchronize_srcu(&dev->mr_srcu);
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
return err;
|
return err;
|
||||||
|
|
@ -238,9 +237,6 @@ static void remove_keys(struct mlx5_ib_dev *dev, int c, int num)
|
||||||
{
|
{
|
||||||
struct mlx5_mr_cache *cache = &dev->cache;
|
struct mlx5_mr_cache *cache = &dev->cache;
|
||||||
struct mlx5_cache_ent *ent = &cache->ent[c];
|
struct mlx5_cache_ent *ent = &cache->ent[c];
|
||||||
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
|
|
||||||
bool odp_mkey_exist = false;
|
|
||||||
#endif
|
|
||||||
struct mlx5_ib_mr *tmp_mr;
|
struct mlx5_ib_mr *tmp_mr;
|
||||||
struct mlx5_ib_mr *mr;
|
struct mlx5_ib_mr *mr;
|
||||||
LIST_HEAD(del_list);
|
LIST_HEAD(del_list);
|
||||||
|
|
@ -253,10 +249,6 @@ static void remove_keys(struct mlx5_ib_dev *dev, int c, int num)
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list);
|
mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list);
|
||||||
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
|
|
||||||
if (mr->umem && mr->umem->is_odp)
|
|
||||||
odp_mkey_exist = true;
|
|
||||||
#endif
|
|
||||||
list_move(&mr->list, &del_list);
|
list_move(&mr->list, &del_list);
|
||||||
ent->cur--;
|
ent->cur--;
|
||||||
ent->size--;
|
ent->size--;
|
||||||
|
|
@ -265,8 +257,7 @@ static void remove_keys(struct mlx5_ib_dev *dev, int c, int num)
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
|
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
|
||||||
if (odp_mkey_exist)
|
synchronize_srcu(&dev->mr_srcu);
|
||||||
synchronize_srcu(&dev->mr_srcu);
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
list_for_each_entry_safe(mr, tmp_mr, &del_list, list) {
|
list_for_each_entry_safe(mr, tmp_mr, &del_list, list) {
|
||||||
|
|
@ -581,7 +572,6 @@ static void clean_keys(struct mlx5_ib_dev *dev, int c)
|
||||||
{
|
{
|
||||||
struct mlx5_mr_cache *cache = &dev->cache;
|
struct mlx5_mr_cache *cache = &dev->cache;
|
||||||
struct mlx5_cache_ent *ent = &cache->ent[c];
|
struct mlx5_cache_ent *ent = &cache->ent[c];
|
||||||
bool odp_mkey_exist = false;
|
|
||||||
struct mlx5_ib_mr *tmp_mr;
|
struct mlx5_ib_mr *tmp_mr;
|
||||||
struct mlx5_ib_mr *mr;
|
struct mlx5_ib_mr *mr;
|
||||||
LIST_HEAD(del_list);
|
LIST_HEAD(del_list);
|
||||||
|
|
@ -594,8 +584,6 @@ static void clean_keys(struct mlx5_ib_dev *dev, int c)
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list);
|
mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list);
|
||||||
if (mr->umem && mr->umem->is_odp)
|
|
||||||
odp_mkey_exist = true;
|
|
||||||
list_move(&mr->list, &del_list);
|
list_move(&mr->list, &del_list);
|
||||||
ent->cur--;
|
ent->cur--;
|
||||||
ent->size--;
|
ent->size--;
|
||||||
|
|
@ -604,8 +592,7 @@ static void clean_keys(struct mlx5_ib_dev *dev, int c)
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
|
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
|
||||||
if (odp_mkey_exist)
|
synchronize_srcu(&dev->mr_srcu);
|
||||||
synchronize_srcu(&dev->mr_srcu);
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
list_for_each_entry_safe(mr, tmp_mr, &del_list, list) {
|
list_for_each_entry_safe(mr, tmp_mr, &del_list, list) {
|
||||||
|
|
|
||||||
Loading…
Add table
Reference in a new issue