mirror of
				git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
				synced 2025-10-31 16:54:21 +00:00 
			
		
		
		
	mm/zsmalloc.c: migration can leave pages in ZS_EMPTY indefinitely
In zs_page_migrate() we call putback_zspage() after we have finished
migrating all pages in this zspage.  However, the return value is
ignored.  If a zs_free() races in between zs_page_isolate() and
zs_page_migrate(), freeing the last object in the zspage,
putback_zspage() will leave the page in ZS_EMPTY for potentially an
unbounded amount of time.
To fix this, we need to do the same thing as zs_page_putback() does:
schedule free_work to occur.
To avoid duplicated code, move the sequence to a new
putback_zspage_deferred() function which both zs_page_migrate() and
zs_page_putback() call.
Link: http://lkml.kernel.org/r/20190809181751.219326-1-henryburns@google.com
Fixes: 48b4800a1c ("zsmalloc: page migration support")
Signed-off-by: Henry Burns <henryburns@google.com>
Reviewed-by: Sergey Senozhatsky <sergey.senozhatsky@gmail.com>
Cc: Henry Burns <henrywolfeburns@gmail.com>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Shakeel Butt <shakeelb@google.com>
Cc: Jonathan Adams <jwadams@google.com>
Cc: <stable@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
			
			
This commit is contained in:
		
							parent
							
								
									f7da677bc6
								
							
						
					
					
						commit
						1a87aa0359
					
				
					 1 changed files with 15 additions and 4 deletions
				
			
		|  | @ -1862,6 +1862,18 @@ static void dec_zspage_isolation(struct zspage *zspage) | ||||||
| 	zspage->isolated--; | 	zspage->isolated--; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  | static void putback_zspage_deferred(struct zs_pool *pool, | ||||||
|  | 				    struct size_class *class, | ||||||
|  | 				    struct zspage *zspage) | ||||||
|  | { | ||||||
|  | 	enum fullness_group fg; | ||||||
|  | 
 | ||||||
|  | 	fg = putback_zspage(class, zspage); | ||||||
|  | 	if (fg == ZS_EMPTY) | ||||||
|  | 		schedule_work(&pool->free_work); | ||||||
|  | 
 | ||||||
|  | } | ||||||
|  | 
 | ||||||
| static void replace_sub_page(struct size_class *class, struct zspage *zspage, | static void replace_sub_page(struct size_class *class, struct zspage *zspage, | ||||||
| 				struct page *newpage, struct page *oldpage) | 				struct page *newpage, struct page *oldpage) | ||||||
| { | { | ||||||
|  | @ -2031,7 +2043,7 @@ static int zs_page_migrate(struct address_space *mapping, struct page *newpage, | ||||||
| 	 * the list if @page is final isolated subpage in the zspage. | 	 * the list if @page is final isolated subpage in the zspage. | ||||||
| 	 */ | 	 */ | ||||||
| 	if (!is_zspage_isolated(zspage)) | 	if (!is_zspage_isolated(zspage)) | ||||||
| 		putback_zspage(class, zspage); | 		putback_zspage_deferred(pool, class, zspage); | ||||||
| 
 | 
 | ||||||
| 	reset_page(page); | 	reset_page(page); | ||||||
| 	put_page(page); | 	put_page(page); | ||||||
|  | @ -2077,14 +2089,13 @@ static void zs_page_putback(struct page *page) | ||||||
| 	spin_lock(&class->lock); | 	spin_lock(&class->lock); | ||||||
| 	dec_zspage_isolation(zspage); | 	dec_zspage_isolation(zspage); | ||||||
| 	if (!is_zspage_isolated(zspage)) { | 	if (!is_zspage_isolated(zspage)) { | ||||||
| 		fg = putback_zspage(class, zspage); |  | ||||||
| 		/*
 | 		/*
 | ||||||
| 		 * Due to page_lock, we cannot free zspage immediately | 		 * Due to page_lock, we cannot free zspage immediately | ||||||
| 		 * so let's defer. | 		 * so let's defer. | ||||||
| 		 */ | 		 */ | ||||||
| 		if (fg == ZS_EMPTY) | 		putback_zspage_deferred(pool, class, zspage); | ||||||
| 			schedule_work(&pool->free_work); |  | ||||||
| 	} | 	} | ||||||
|  | 
 | ||||||
| 	spin_unlock(&class->lock); | 	spin_unlock(&class->lock); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
		Loading…
	
	Add table
		
		Reference in a new issue
	
	 Henry Burns
						Henry Burns