mirror of
				git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
				synced 2025-11-01 09:13:37 +00:00 
			
		
		
		
	mm/hugetlb: remove unnecessary holding of hugetlb_lock
In isolate_or_dissolve_huge_folio(), after acquiring the hugetlb_lock, it is only for the purpose of obtaining the correct hstate, which is then passed to alloc_and_dissolve_hugetlb_folio(). alloc_and_dissolve_hugetlb_folio() itself also acquires the hugetlb_lock. We can have alloc_and_dissolve_hugetlb_folio() obtain the hstate by itself, so that isolate_or_dissolve_huge_folio() no longer needs to acquire the hugetlb_lock. In addition, we keep the folio_test_hugetlb() check within isolate_or_dissolve_huge_folio(). By doing so, we can avoid disrupting the normal path by vainly holding the hugetlb_lock. replace_free_hugepage_folios() has the same issue, and we should address it as well. Addresses a possible performance problem which was added by the hotfix113ed54ad2("mm/hugetlb: fix kernel NULL pointer dereference when replacing free hugetlb folios"). Link: https://lkml.kernel.org/r/1748317010-16272-1-git-send-email-yangge1116@126.com Fixes:113ed54ad2("mm/hugetlb: fix kernel NULL pointer dereference when replacing free hugetlb folios") Signed-off-by: Ge Yang <yangge1116@126.com> Suggested-by: Oscar Salvador <osalvador@suse.de> Reviewed-by: Muchun Song <muchun.song@linux.dev> Cc: Baolin Wang <baolin.wang@linux.alibaba.com> Cc: Barry Song <21cnbao@gmail.com> Cc: David Hildenbrand <david@redhat.com> Cc: <stable@vger.kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
		
							parent
							
								
									3746351e87
								
							
						
					
					
						commit
						344ef45b03
					
				
					 1 changed files with 17 additions and 37 deletions
				
			
		
							
								
								
									
										54
									
								
								mm/hugetlb.c
									
										
									
									
									
								
							
							
						
						
									
										54
									
								
								mm/hugetlb.c
									
										
									
									
									
								
							| 
						 | 
				
			
			@ -2787,20 +2787,24 @@ void restore_reserve_on_error(struct hstate *h, struct vm_area_struct *vma,
 | 
			
		|||
/*
 | 
			
		||||
 * alloc_and_dissolve_hugetlb_folio - Allocate a new folio and dissolve
 | 
			
		||||
 * the old one
 | 
			
		||||
 * @h: struct hstate old page belongs to
 | 
			
		||||
 * @old_folio: Old folio to dissolve
 | 
			
		||||
 * @list: List to isolate the page in case we need to
 | 
			
		||||
 * Returns 0 on success, otherwise negated error.
 | 
			
		||||
 */
 | 
			
		||||
static int alloc_and_dissolve_hugetlb_folio(struct hstate *h,
 | 
			
		||||
			struct folio *old_folio, struct list_head *list)
 | 
			
		||||
static int alloc_and_dissolve_hugetlb_folio(struct folio *old_folio,
 | 
			
		||||
			struct list_head *list)
 | 
			
		||||
{
 | 
			
		||||
	gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE;
 | 
			
		||||
	gfp_t gfp_mask;
 | 
			
		||||
	struct hstate *h;
 | 
			
		||||
	int nid = folio_nid(old_folio);
 | 
			
		||||
	struct folio *new_folio = NULL;
 | 
			
		||||
	int ret = 0;
 | 
			
		||||
 | 
			
		||||
retry:
 | 
			
		||||
	/*
 | 
			
		||||
	 * The old_folio might have been dissolved from under our feet, so make sure
 | 
			
		||||
	 * to carefully check the state under the lock.
 | 
			
		||||
	 */
 | 
			
		||||
	spin_lock_irq(&hugetlb_lock);
 | 
			
		||||
	if (!folio_test_hugetlb(old_folio)) {
 | 
			
		||||
		/*
 | 
			
		||||
| 
						 | 
				
			
			@ -2829,8 +2833,10 @@ retry:
 | 
			
		|||
		cond_resched();
 | 
			
		||||
		goto retry;
 | 
			
		||||
	} else {
 | 
			
		||||
		h = folio_hstate(old_folio);
 | 
			
		||||
		if (!new_folio) {
 | 
			
		||||
			spin_unlock_irq(&hugetlb_lock);
 | 
			
		||||
			gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE;
 | 
			
		||||
			new_folio = alloc_buddy_hugetlb_folio(h, gfp_mask, nid,
 | 
			
		||||
							      NULL, NULL);
 | 
			
		||||
			if (!new_folio)
 | 
			
		||||
| 
						 | 
				
			
			@ -2874,35 +2880,24 @@ free_new:
 | 
			
		|||
 | 
			
		||||
int isolate_or_dissolve_huge_folio(struct folio *folio, struct list_head *list)
 | 
			
		||||
{
 | 
			
		||||
	struct hstate *h;
 | 
			
		||||
	int ret = -EBUSY;
 | 
			
		||||
 | 
			
		||||
	/*
 | 
			
		||||
	 * The page might have been dissolved from under our feet, so make sure
 | 
			
		||||
	 * to carefully check the state under the lock.
 | 
			
		||||
	 * Return success when racing as if we dissolved the page ourselves.
 | 
			
		||||
	 */
 | 
			
		||||
	spin_lock_irq(&hugetlb_lock);
 | 
			
		||||
	if (folio_test_hugetlb(folio)) {
 | 
			
		||||
		h = folio_hstate(folio);
 | 
			
		||||
	} else {
 | 
			
		||||
		spin_unlock_irq(&hugetlb_lock);
 | 
			
		||||
	/* Not to disrupt normal path by vainly holding hugetlb_lock */
 | 
			
		||||
	if (!folio_test_hugetlb(folio))
 | 
			
		||||
		return 0;
 | 
			
		||||
	}
 | 
			
		||||
	spin_unlock_irq(&hugetlb_lock);
 | 
			
		||||
 | 
			
		||||
	/*
 | 
			
		||||
	 * Fence off gigantic pages as there is a cyclic dependency between
 | 
			
		||||
	 * alloc_contig_range and them. Return -ENOMEM as this has the effect
 | 
			
		||||
	 * of bailing out right away without further retrying.
 | 
			
		||||
	 */
 | 
			
		||||
	if (hstate_is_gigantic(h))
 | 
			
		||||
	if (folio_order(folio) > MAX_PAGE_ORDER)
 | 
			
		||||
		return -ENOMEM;
 | 
			
		||||
 | 
			
		||||
	if (folio_ref_count(folio) && folio_isolate_hugetlb(folio, list))
 | 
			
		||||
		ret = 0;
 | 
			
		||||
	else if (!folio_ref_count(folio))
 | 
			
		||||
		ret = alloc_and_dissolve_hugetlb_folio(h, folio, list);
 | 
			
		||||
		ret = alloc_and_dissolve_hugetlb_folio(folio, list);
 | 
			
		||||
 | 
			
		||||
	return ret;
 | 
			
		||||
}
 | 
			
		||||
| 
						 | 
				
			
			@ -2916,7 +2911,6 @@ int isolate_or_dissolve_huge_folio(struct folio *folio, struct list_head *list)
 | 
			
		|||
 */
 | 
			
		||||
int replace_free_hugepage_folios(unsigned long start_pfn, unsigned long end_pfn)
 | 
			
		||||
{
 | 
			
		||||
	struct hstate *h;
 | 
			
		||||
	struct folio *folio;
 | 
			
		||||
	int ret = 0;
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -2925,23 +2919,9 @@ int replace_free_hugepage_folios(unsigned long start_pfn, unsigned long end_pfn)
 | 
			
		|||
	while (start_pfn < end_pfn) {
 | 
			
		||||
		folio = pfn_folio(start_pfn);
 | 
			
		||||
 | 
			
		||||
		/*
 | 
			
		||||
		 * The folio might have been dissolved from under our feet, so make sure
 | 
			
		||||
		 * to carefully check the state under the lock.
 | 
			
		||||
		 */
 | 
			
		||||
		spin_lock_irq(&hugetlb_lock);
 | 
			
		||||
		if (folio_test_hugetlb(folio)) {
 | 
			
		||||
			h = folio_hstate(folio);
 | 
			
		||||
		} else {
 | 
			
		||||
			spin_unlock_irq(&hugetlb_lock);
 | 
			
		||||
			start_pfn++;
 | 
			
		||||
			continue;
 | 
			
		||||
		}
 | 
			
		||||
		spin_unlock_irq(&hugetlb_lock);
 | 
			
		||||
 | 
			
		||||
		if (!folio_ref_count(folio)) {
 | 
			
		||||
			ret = alloc_and_dissolve_hugetlb_folio(h, folio,
 | 
			
		||||
							       &isolate_list);
 | 
			
		||||
		/* Not to disrupt normal path by vainly holding hugetlb_lock */
 | 
			
		||||
		if (folio_test_hugetlb(folio) && !folio_ref_count(folio)) {
 | 
			
		||||
			ret = alloc_and_dissolve_hugetlb_folio(folio, &isolate_list);
 | 
			
		||||
			if (ret)
 | 
			
		||||
				break;
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
		Loading…
	
	Add table
		
		Reference in a new issue