mirror of
				git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
				synced 2025-10-31 16:54:21 +00:00 
			
		
		
		
	dm: improve hash_locks sizing and hash function
Both bufio and bio-prison-v1 use the identical model for splitting their respective locks and rbtrees. Improve dm_num_hash_locks() to distribute across more rbtrees to improve overall performance -- but the maximum number of locks/rbtrees is still 64. Also factor out a common hash function named dm_hash_locks_index(), the magic numbers used were determined to be best using this program: https://gist.github.com/jthornber/e05c47daa7b500c56dc339269c5467fc Signed-off-by: Joe Thornber <ejt@redhat.com> Signed-off-by: Mike Snitzer <snitzer@kernel.org>
This commit is contained in:
		
							parent
							
								
									b6279f82eb
								
							
						
					
					
						commit
						363b7fd76c
					
				
					 3 changed files with 17 additions and 4 deletions
				
			
		|  | @ -117,9 +117,10 @@ static int cmp_keys(struct dm_cell_key *lhs, | ||||||
| 	return 0; | 	return 0; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| static unsigned lock_nr(struct dm_cell_key *key, unsigned int num_locks) | static inline unsigned int lock_nr(struct dm_cell_key *key, unsigned int num_locks) | ||||||
| { | { | ||||||
| 	return (key->block_begin >> BIO_PRISON_MAX_RANGE_SHIFT) & (num_locks - 1); | 	return dm_hash_locks_index((key->block_begin >> BIO_PRISON_MAX_RANGE_SHIFT), | ||||||
|  | 				   num_locks); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| bool dm_cell_key_has_valid_range(struct dm_cell_key *key) | bool dm_cell_key_has_valid_range(struct dm_cell_key *key) | ||||||
|  |  | ||||||
|  | @ -398,7 +398,7 @@ struct dm_buffer_cache { | ||||||
| 
 | 
 | ||||||
| static inline unsigned int cache_index(sector_t block, unsigned int num_locks) | static inline unsigned int cache_index(sector_t block, unsigned int num_locks) | ||||||
| { | { | ||||||
| 	return block & (num_locks - 1); | 	return dm_hash_locks_index(block, num_locks); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| static inline void cache_read_lock(struct dm_buffer_cache *bc, sector_t block) | static inline void cache_read_lock(struct dm_buffer_cache *bc, sector_t block) | ||||||
|  |  | ||||||
|  | @ -233,9 +233,21 @@ unsigned int dm_get_reserved_bio_based_ios(void); | ||||||
| 
 | 
 | ||||||
| static inline unsigned int dm_num_hash_locks(void) | static inline unsigned int dm_num_hash_locks(void) | ||||||
| { | { | ||||||
| 	unsigned int num_locks = roundup_pow_of_two(num_online_cpus()); | 	unsigned int num_locks = roundup_pow_of_two(num_online_cpus()) << 1; | ||||||
| 
 | 
 | ||||||
| 	return min_t(unsigned int, num_locks, DM_HASH_LOCKS_MAX); | 	return min_t(unsigned int, num_locks, DM_HASH_LOCKS_MAX); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  | #define DM_HASH_LOCKS_MULT  4294967291ULL | ||||||
|  | #define DM_HASH_LOCKS_SHIFT 6 | ||||||
|  | 
 | ||||||
|  | static inline unsigned int dm_hash_locks_index(sector_t block, | ||||||
|  | 					       unsigned int num_locks) | ||||||
|  | { | ||||||
|  | 	sector_t h1 = (block * DM_HASH_LOCKS_MULT) >> DM_HASH_LOCKS_SHIFT; | ||||||
|  | 	sector_t h2 = h1 >> DM_HASH_LOCKS_SHIFT; | ||||||
|  | 
 | ||||||
|  | 	return (h1 ^ h2) & (num_locks - 1); | ||||||
|  | } | ||||||
|  | 
 | ||||||
| #endif | #endif | ||||||
|  |  | ||||||
		Loading…
	
	Add table
		
		Reference in a new issue
	
	 Joe Thornber
						Joe Thornber