mirror of
				git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
				synced 2025-11-01 09:13:37 +00:00 
			
		
		
		
	sbitmap: add helper to clear a batch of tags
sbitmap currently only supports clearing tags one-by-one, add a helper that allows the caller to pass in an array of tags to clear. Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
		
							parent
							
								
									5a72e899ce
								
							
						
					
					
						commit
						1aec5e4a29
					
				
					 2 changed files with 52 additions and 3 deletions
				
			
		| 
						 | 
				
			
			@ -528,6 +528,17 @@ void sbitmap_queue_min_shallow_depth(struct sbitmap_queue *sbq,
 | 
			
		|||
void sbitmap_queue_clear(struct sbitmap_queue *sbq, unsigned int nr,
 | 
			
		||||
			 unsigned int cpu);
 | 
			
		||||
 | 
			
		||||
/**
 | 
			
		||||
 * sbitmap_queue_clear_batch() - Free a batch of allocated bits
 | 
			
		||||
 * &struct sbitmap_queue.
 | 
			
		||||
 * @sbq: Bitmap to free from.
 | 
			
		||||
 * @offset: offset for each tag in array
 | 
			
		||||
 * @tags: array of tags
 | 
			
		||||
 * @nr_tags: number of tags in array
 | 
			
		||||
 */
 | 
			
		||||
void sbitmap_queue_clear_batch(struct sbitmap_queue *sbq, int offset,
 | 
			
		||||
				int *tags, int nr_tags);
 | 
			
		||||
 | 
			
		||||
static inline int sbq_index_inc(int index)
 | 
			
		||||
{
 | 
			
		||||
	return (index + 1) & (SBQ_WAIT_QUEUES - 1);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -628,6 +628,46 @@ void sbitmap_queue_wake_up(struct sbitmap_queue *sbq)
 | 
			
		|||
}
 | 
			
		||||
EXPORT_SYMBOL_GPL(sbitmap_queue_wake_up);
 | 
			
		||||
 | 
			
		||||
static inline void sbitmap_update_cpu_hint(struct sbitmap *sb, int cpu, int tag)
 | 
			
		||||
{
 | 
			
		||||
	if (likely(!sb->round_robin && tag < sb->depth))
 | 
			
		||||
		*per_cpu_ptr(sb->alloc_hint, cpu) = tag;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void sbitmap_queue_clear_batch(struct sbitmap_queue *sbq, int offset,
 | 
			
		||||
				int *tags, int nr_tags)
 | 
			
		||||
{
 | 
			
		||||
	struct sbitmap *sb = &sbq->sb;
 | 
			
		||||
	unsigned long *addr = NULL;
 | 
			
		||||
	unsigned long mask = 0;
 | 
			
		||||
	int i;
 | 
			
		||||
 | 
			
		||||
	smp_mb__before_atomic();
 | 
			
		||||
	for (i = 0; i < nr_tags; i++) {
 | 
			
		||||
		const int tag = tags[i] - offset;
 | 
			
		||||
		unsigned long *this_addr;
 | 
			
		||||
 | 
			
		||||
		/* since we're clearing a batch, skip the deferred map */
 | 
			
		||||
		this_addr = &sb->map[SB_NR_TO_INDEX(sb, tag)].word;
 | 
			
		||||
		if (!addr) {
 | 
			
		||||
			addr = this_addr;
 | 
			
		||||
		} else if (addr != this_addr) {
 | 
			
		||||
			atomic_long_andnot(mask, (atomic_long_t *) addr);
 | 
			
		||||
			mask = 0;
 | 
			
		||||
			addr = this_addr;
 | 
			
		||||
		}
 | 
			
		||||
		mask |= (1UL << SB_NR_TO_BIT(sb, tag));
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if (mask)
 | 
			
		||||
		atomic_long_andnot(mask, (atomic_long_t *) addr);
 | 
			
		||||
 | 
			
		||||
	smp_mb__after_atomic();
 | 
			
		||||
	sbitmap_queue_wake_up(sbq);
 | 
			
		||||
	sbitmap_update_cpu_hint(&sbq->sb, raw_smp_processor_id(),
 | 
			
		||||
					tags[nr_tags - 1] - offset);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void sbitmap_queue_clear(struct sbitmap_queue *sbq, unsigned int nr,
 | 
			
		||||
			 unsigned int cpu)
 | 
			
		||||
{
 | 
			
		||||
| 
						 | 
				
			
			@ -652,9 +692,7 @@ void sbitmap_queue_clear(struct sbitmap_queue *sbq, unsigned int nr,
 | 
			
		|||
	 */
 | 
			
		||||
	smp_mb__after_atomic();
 | 
			
		||||
	sbitmap_queue_wake_up(sbq);
 | 
			
		||||
 | 
			
		||||
	if (likely(!sbq->sb.round_robin && nr < sbq->sb.depth))
 | 
			
		||||
		*per_cpu_ptr(sbq->sb.alloc_hint, cpu) = nr;
 | 
			
		||||
	sbitmap_update_cpu_hint(&sbq->sb, cpu, nr);
 | 
			
		||||
}
 | 
			
		||||
EXPORT_SYMBOL_GPL(sbitmap_queue_clear);
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
		Loading…
	
	Add table
		
		Reference in a new issue