mirror of
				git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
				synced 2025-10-31 16:54:21 +00:00 
			
		
		
		
	- Improve scalability of DM's device hash by switching to rbtree
- Extend DM ioctl's DM_LIST_DEVICES_CMD handling to include UUID and
   allow filtering based on name or UUID prefix.
 
 - Various small fixes for typos, warnings, unused function, or
   needlessly exported interfaces.
 
 - Remove needless request_queue NULL pointer checks in DM thin and
   cache targets.
 
 - Remove unnecessary loop in DM core's __split_and_process_bio().
 
 - Remove DM core's dm_vcalloc() and just use kvcalloc or
   kvmalloc_array instead (depending whether zeroing is useful).
 
 - Fix request-based DM's double free of blk_mq_tag_set in device
   remove after table load fails.
 
 - Improve DM persistent data performance on non-x86 by fixing packed
   structs to have a stated alignment. Also remove needless extra work
   from redundant calls to sm_disk_get_nr_free() and a paranoid BUG_ON()
   that caused duplicate checksum calculation.
 
 - Fix missing goto in DM integrity's bitmap_flush_interval error
   handling.
 
 - Add "reset_recalculate" feature flag to DM integrity.
 
 - Improve DM integrity by leveraging discard support to avoid needless
   re-writing of metadata and also use discard support to improve
   hash recalculation.
 
 - Fix race with DM raid target's reshape and MD raid4/5/6 resync that
   resulted in inconsistant reshape state during table reloads.
 
 - Update DM raid target to temove unnecessary discard limits for raid0
   and raid10 now that MD has optimized discard handling for both raid
   levels.
 -----BEGIN PGP SIGNATURE-----
 
 iQFHBAABCAAxFiEEJfWUX4UqZ4x1O2wixSPxCi2dA1oFAmCMVxkTHHNuaXR6ZXJA
 cmVkaGF0LmNvbQAKCRDFI/EKLZ0DWjD7B/4mucuval0w8OBl7MPE5mR/tPmU/Avr
 iFmXQjofjXCXGdCk7XqOqKZGKlm/jCTQkhbZPEo5PTCZPf6iGyeoSFOC0xck9HUI
 9EJNrnx6L0ch3OS5IREgc3oO7vXFnmnLvmo27t7yUaqWqdMBIZ/rEA6Ro7oq4pXc
 fo/Yqka2iuS0X2RKhAbci2KAOWdeX400nqH7bHaR5VgOE3JRJtsEWRTmkngB4bzM
 Bsz2zzaSnbFa/Nlg2cH69kw2NJpZsAoKmoQ0OJt7QQs+GYLvCbioQnlxLPSFhlKX
 4mSPPPKy3TxaAqsHXt+gCG6Vs8VvvgK0iHIa9e0ifIy3wvLoosLpIXhc
 =k1ND
 -----END PGP SIGNATURE-----
Merge tag 'for-5.13/dm-changes' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm
Pull device mapper updates from Mike Snitzer:
 - Improve scalability of DM's device hash by switching to rbtree
 - Extend DM ioctl's DM_LIST_DEVICES_CMD handling to include UUID and
   allow filtering based on name or UUID prefix.
 - Various small fixes for typos, warnings, unused function, or
   needlessly exported interfaces.
 - Remove needless request_queue NULL pointer checks in DM thin and
   cache targets.
 - Remove unnecessary loop in DM core's __split_and_process_bio().
 - Remove DM core's dm_vcalloc() and just use kvcalloc or kvmalloc_array
   instead (depending whether zeroing is useful).
 - Fix request-based DM's double free of blk_mq_tag_set in device remove
   after table load fails.
 - Improve DM persistent data performance on non-x86 by fixing packed
   structs to have a stated alignment. Also remove needless extra work
   from redundant calls to sm_disk_get_nr_free() and a paranoid BUG_ON()
   that caused duplicate checksum calculation.
 - Fix missing goto in DM integrity's bitmap_flush_interval error
   handling.
 - Add "reset_recalculate" feature flag to DM integrity.
 - Improve DM integrity by leveraging discard support to avoid needless
   re-writing of metadata and also use discard support to improve hash
   recalculation.
 - Fix race with DM raid target's reshape and MD raid4/5/6 resync that
   resulted in inconsistant reshape state during table reloads.
 - Update DM raid target to temove unnecessary discard limits for raid0
   and raid10 now that MD has optimized discard handling for both raid
   levels.
* tag 'for-5.13/dm-changes' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm: (26 commits)
  dm raid: remove unnecessary discard limits for raid0 and raid10
  dm rq: fix double free of blk_mq_tag_set in dev remove after table load fails
  dm integrity: use discard support when recalculating
  dm integrity: increase RECALC_SECTORS to improve recalculate speed
  dm integrity: don't re-write metadata if discarding same blocks
  dm raid: fix inconclusive reshape layout on fast raid4/5/6 table reload sequences
  dm raid: fix fall-through warning in rs_check_takeover() for Clang
  dm clone metadata: remove unused function
  dm integrity: fix missing goto in bitmap_flush_interval error handling
  dm: replace dm_vcalloc()
  dm space map common: fix division bug in sm_ll_find_free_block()
  dm persistent data: packed struct should have an aligned() attribute too
  dm btree spine: remove paranoid node_check call in node_prep_for_write()
  dm space map disk: remove redundant calls to sm_disk_get_nr_free()
  dm integrity: add the "reset_recalculate" feature flag
  dm persistent data: remove unused return from exit_shadow_spine()
  dm cache: remove needless request_queue NULL pointer checks
  dm thin: remove needless request_queue NULL pointer check
  dm: unexport dm_{get,put}_table_device
  dm ebs: fix a few typos
  ...
			
			
This commit is contained in:
		
						commit
						7af81cd0c4
					
				
					 21 changed files with 371 additions and 270 deletions
				
			
		|  | @ -3387,7 +3387,7 @@ static bool origin_dev_supports_discard(struct block_device *origin_bdev) | |||
| { | ||||
| 	struct request_queue *q = bdev_get_queue(origin_bdev); | ||||
| 
 | ||||
| 	return q && blk_queue_discard(q); | ||||
| 	return blk_queue_discard(q); | ||||
| } | ||||
| 
 | ||||
| /*
 | ||||
|  |  | |||
|  | @ -276,12 +276,6 @@ static inline int superblock_read_lock(struct dm_clone_metadata *cmd, | |||
| 	return dm_bm_read_lock(cmd->bm, SUPERBLOCK_LOCATION, &sb_validator, sblock); | ||||
| } | ||||
| 
 | ||||
| static inline int superblock_write_lock(struct dm_clone_metadata *cmd, | ||||
| 					struct dm_block **sblock) | ||||
| { | ||||
| 	return dm_bm_write_lock(cmd->bm, SUPERBLOCK_LOCATION, &sb_validator, sblock); | ||||
| } | ||||
| 
 | ||||
| static inline int superblock_write_lock_zero(struct dm_clone_metadata *cmd, | ||||
| 					     struct dm_block **sblock) | ||||
| { | ||||
|  |  | |||
|  | @ -28,7 +28,7 @@ struct ebs_c { | |||
| 	spinlock_t lock;		/* Guard bios input list above. */ | ||||
| 	sector_t start;			/* <start> table line argument, see ebs_ctr below. */ | ||||
| 	unsigned int e_bs;		/* Emulated block size in sectors exposed to upper layer. */ | ||||
| 	unsigned int u_bs;		/* Underlying block size in sectors retrievd from/set on lower layer device. */ | ||||
| 	unsigned int u_bs;		/* Underlying block size in sectors retrieved from/set on lower layer device. */ | ||||
| 	unsigned char block_shift;	/* bitshift sectors -> blocks used in dm-bufio API. */ | ||||
| 	bool u_bs_set:1;		/* Flag to indicate underlying block size is set on table line. */ | ||||
| }; | ||||
|  | @ -43,7 +43,7 @@ static inline sector_t __block_mod(sector_t sector, unsigned int bs) | |||
| 	return sector & (bs - 1); | ||||
| } | ||||
| 
 | ||||
| /* Return number of blocks for a bio, accounting for misalignement of start and end sectors. */ | ||||
| /* Return number of blocks for a bio, accounting for misalignment of start and end sectors. */ | ||||
| static inline unsigned int __nr_blocks(struct ebs_c *ec, struct bio *bio) | ||||
| { | ||||
| 	sector_t end_sector = __block_mod(bio->bi_iter.bi_sector, ec->u_bs) + bio_sectors(bio); | ||||
|  | @ -171,7 +171,7 @@ static void __ebs_forget_bio(struct ebs_c *ec, struct bio *bio) | |||
| 	dm_bufio_forget_buffers(ec->bufio, __sector_to_block(ec, sector), blocks); | ||||
| } | ||||
| 
 | ||||
| /* Worker funtion to process incoming bios. */ | ||||
| /* Worker function to process incoming bios. */ | ||||
| static void __ebs_process_bios(struct work_struct *ws) | ||||
| { | ||||
| 	int r; | ||||
|  |  | |||
|  | @ -35,7 +35,7 @@ | |||
| #define MIN_LOG2_INTERLEAVE_SECTORS	3 | ||||
| #define MAX_LOG2_INTERLEAVE_SECTORS	31 | ||||
| #define METADATA_WORKQUEUE_MAX_ACTIVE	16 | ||||
| #define RECALC_SECTORS			8192 | ||||
| #define RECALC_SECTORS			32768 | ||||
| #define RECALC_WRITE_SUPER		16 | ||||
| #define BITMAP_BLOCK_SIZE		4096	/* don't change it */ | ||||
| #define BITMAP_FLUSH_INTERVAL		(10 * HZ) | ||||
|  | @ -262,6 +262,7 @@ struct dm_integrity_c { | |||
| 	bool journal_uptodate; | ||||
| 	bool just_formatted; | ||||
| 	bool recalculate_flag; | ||||
| 	bool reset_recalculate_flag; | ||||
| 	bool discard; | ||||
| 	bool fix_padding; | ||||
| 	bool fix_hmac; | ||||
|  | @ -1428,8 +1429,10 @@ static int dm_integrity_rw_tag(struct dm_integrity_c *ic, unsigned char *tag, se | |||
| 		if (op == TAG_READ) { | ||||
| 			memcpy(tag, dp, to_copy); | ||||
| 		} else if (op == TAG_WRITE) { | ||||
| 			if (memcmp(dp, tag, to_copy)) { | ||||
| 				memcpy(dp, tag, to_copy); | ||||
| 				dm_bufio_mark_partial_buffer_dirty(b, *metadata_offset, *metadata_offset + to_copy); | ||||
| 			} | ||||
| 		} else { | ||||
| 			/* e.g.: op == TAG_CMP */ | ||||
| 
 | ||||
|  | @ -2686,6 +2689,7 @@ next_chunk: | |||
| 	if (unlikely(dm_integrity_failed(ic))) | ||||
| 		goto err; | ||||
| 
 | ||||
| 	if (!ic->discard) { | ||||
| 		io_req.bi_op = REQ_OP_READ; | ||||
| 		io_req.bi_op_flags = 0; | ||||
| 		io_req.mem.type = DM_IO_VMA; | ||||
|  | @ -2707,6 +2711,9 @@ next_chunk: | |||
| 			integrity_sector_checksum(ic, logical_sector + i, ic->recalc_buffer + (i << SECTOR_SHIFT), t); | ||||
| 			t += ic->tag_size; | ||||
| 		} | ||||
| 	} else { | ||||
| 		t = ic->recalc_tags + (n_sectors >> ic->sb->log2_sectors_per_block) * ic->tag_size; | ||||
| 	} | ||||
| 
 | ||||
| 	metadata_block = get_metadata_sector_and_offset(ic, area, offset, &metadata_offset); | ||||
| 
 | ||||
|  | @ -3134,7 +3141,8 @@ static void dm_integrity_resume(struct dm_target *ti) | |||
| 		rw_journal_sectors(ic, REQ_OP_READ, 0, 0, | ||||
| 				   ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL); | ||||
| 		if (ic->mode == 'B') { | ||||
| 			if (ic->sb->log2_blocks_per_bitmap_bit == ic->log2_blocks_per_bitmap_bit) { | ||||
| 			if (ic->sb->log2_blocks_per_bitmap_bit == ic->log2_blocks_per_bitmap_bit && | ||||
| 			    !ic->reset_recalculate_flag) { | ||||
| 				block_bitmap_copy(ic, ic->recalc_bitmap, ic->journal); | ||||
| 				block_bitmap_copy(ic, ic->may_write_bitmap, ic->journal); | ||||
| 				if (!block_bitmap_op(ic, ic->journal, 0, ic->provided_data_sectors, | ||||
|  | @ -3156,7 +3164,8 @@ static void dm_integrity_resume(struct dm_target *ti) | |||
| 			} | ||||
| 		} else { | ||||
| 			if (!(ic->sb->log2_blocks_per_bitmap_bit == ic->log2_blocks_per_bitmap_bit && | ||||
| 			      block_bitmap_op(ic, ic->journal, 0, ic->provided_data_sectors, BITMAP_OP_TEST_ALL_CLEAR))) { | ||||
| 			      block_bitmap_op(ic, ic->journal, 0, ic->provided_data_sectors, BITMAP_OP_TEST_ALL_CLEAR)) || | ||||
| 			    ic->reset_recalculate_flag) { | ||||
| 				ic->sb->flags |= cpu_to_le32(SB_FLAG_RECALCULATING); | ||||
| 				ic->sb->recalc_sector = cpu_to_le64(0); | ||||
| 			} | ||||
|  | @ -3169,6 +3178,10 @@ static void dm_integrity_resume(struct dm_target *ti) | |||
| 			dm_integrity_io_error(ic, "writing superblock", r); | ||||
| 	} else { | ||||
| 		replay_journal(ic); | ||||
| 		if (ic->reset_recalculate_flag) { | ||||
| 			ic->sb->flags |= cpu_to_le32(SB_FLAG_RECALCULATING); | ||||
| 			ic->sb->recalc_sector = cpu_to_le64(0); | ||||
| 		} | ||||
| 		if (ic->mode == 'B') { | ||||
| 			ic->sb->flags |= cpu_to_le32(SB_FLAG_DIRTY_BITMAP); | ||||
| 			ic->sb->log2_blocks_per_bitmap_bit = ic->log2_blocks_per_bitmap_bit; | ||||
|  | @ -3242,6 +3255,7 @@ static void dm_integrity_status(struct dm_target *ti, status_type_t type, | |||
| 		arg_count += !!ic->meta_dev; | ||||
| 		arg_count += ic->sectors_per_block != 1; | ||||
| 		arg_count += !!(ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)); | ||||
| 		arg_count += ic->reset_recalculate_flag; | ||||
| 		arg_count += ic->discard; | ||||
| 		arg_count += ic->mode == 'J'; | ||||
| 		arg_count += ic->mode == 'J'; | ||||
|  | @ -3261,6 +3275,8 @@ static void dm_integrity_status(struct dm_target *ti, status_type_t type, | |||
| 			DMEMIT(" block_size:%u", ic->sectors_per_block << SECTOR_SHIFT); | ||||
| 		if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)) | ||||
| 			DMEMIT(" recalculate"); | ||||
| 		if (ic->reset_recalculate_flag) | ||||
| 			DMEMIT(" reset_recalculate"); | ||||
| 		if (ic->discard) | ||||
| 			DMEMIT(" allow_discards"); | ||||
| 		DMEMIT(" journal_sectors:%u", ic->initial_sectors - SB_SECTORS); | ||||
|  | @ -3914,7 +3930,7 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv) | |||
| 	unsigned extra_args; | ||||
| 	struct dm_arg_set as; | ||||
| 	static const struct dm_arg _args[] = { | ||||
| 		{0, 17, "Invalid number of feature args"}, | ||||
| 		{0, 18, "Invalid number of feature args"}, | ||||
| 	}; | ||||
| 	unsigned journal_sectors, interleave_sectors, buffer_sectors, journal_watermark, sync_msec; | ||||
| 	bool should_write_sb; | ||||
|  | @ -4039,6 +4055,7 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv) | |||
| 			if (val >= (uint64_t)UINT_MAX * 1000 / HZ) { | ||||
| 				r = -EINVAL; | ||||
| 				ti->error = "Invalid bitmap_flush_interval argument"; | ||||
| 				goto bad; | ||||
| 			} | ||||
| 			ic->bitmap_flush_interval = msecs_to_jiffies(val); | ||||
| 		} else if (!strncmp(opt_string, "internal_hash:", strlen("internal_hash:"))) { | ||||
|  | @ -4058,6 +4075,9 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv) | |||
| 				goto bad; | ||||
| 		} else if (!strcmp(opt_string, "recalculate")) { | ||||
| 			ic->recalculate_flag = true; | ||||
| 		} else if (!strcmp(opt_string, "reset_recalculate")) { | ||||
| 			ic->recalculate_flag = true; | ||||
| 			ic->reset_recalculate_flag = true; | ||||
| 		} else if (!strcmp(opt_string, "allow_discards")) { | ||||
| 			ic->discard = true; | ||||
| 		} else if (!strcmp(opt_string, "fix_padding")) { | ||||
|  | @ -4348,12 +4368,14 @@ try_smaller_buffer: | |||
| 			goto bad; | ||||
| 		} | ||||
| 		INIT_WORK(&ic->recalc_work, integrity_recalc); | ||||
| 		if (!ic->discard) { | ||||
| 			ic->recalc_buffer = vmalloc(RECALC_SECTORS << SECTOR_SHIFT); | ||||
| 			if (!ic->recalc_buffer) { | ||||
| 				ti->error = "Cannot allocate buffer for recalculating"; | ||||
| 				r = -ENOMEM; | ||||
| 				goto bad; | ||||
| 			} | ||||
| 		} | ||||
| 		ic->recalc_tags = kvmalloc_array(RECALC_SECTORS >> ic->sb->log2_sectors_per_block, | ||||
| 						 ic->tag_size, GFP_KERNEL); | ||||
| 		if (!ic->recalc_tags) { | ||||
|  | @ -4361,6 +4383,9 @@ try_smaller_buffer: | |||
| 			r = -ENOMEM; | ||||
| 			goto bad; | ||||
| 		} | ||||
| 		if (ic->discard) | ||||
| 			memset(ic->recalc_tags, DISCARD_FILLER, | ||||
| 			       (RECALC_SECTORS >> ic->sb->log2_sectors_per_block) * ic->tag_size); | ||||
| 	} else { | ||||
| 		if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)) { | ||||
| 			ti->error = "Recalculate can only be specified with internal_hash"; | ||||
|  | @ -4554,7 +4579,7 @@ static void dm_integrity_dtr(struct dm_target *ti) | |||
| 
 | ||||
| static struct target_type integrity_target = { | ||||
| 	.name			= "integrity", | ||||
| 	.version		= {1, 7, 0}, | ||||
| 	.version		= {1, 9, 0}, | ||||
| 	.module			= THIS_MODULE, | ||||
| 	.features		= DM_TARGET_SINGLETON | DM_TARGET_INTEGRITY, | ||||
| 	.ctr			= dm_integrity_ctr, | ||||
|  |  | |||
|  | @ -14,6 +14,7 @@ | |||
| #include <linux/init.h> | ||||
| #include <linux/wait.h> | ||||
| #include <linux/slab.h> | ||||
| #include <linux/rbtree.h> | ||||
| #include <linux/dm-ioctl.h> | ||||
| #include <linux/hdreg.h> | ||||
| #include <linux/compat.h> | ||||
|  | @ -36,8 +37,10 @@ struct dm_file { | |||
|  * name or uuid. | ||||
|  *---------------------------------------------------------------*/ | ||||
| struct hash_cell { | ||||
| 	struct list_head name_list; | ||||
| 	struct list_head uuid_list; | ||||
| 	struct rb_node name_node; | ||||
| 	struct rb_node uuid_node; | ||||
| 	bool name_set; | ||||
| 	bool uuid_set; | ||||
| 
 | ||||
| 	char *name; | ||||
| 	char *uuid; | ||||
|  | @ -53,10 +56,8 @@ struct vers_iter { | |||
| }; | ||||
| 
 | ||||
| 
 | ||||
| #define NUM_BUCKETS 64 | ||||
| #define MASK_BUCKETS (NUM_BUCKETS - 1) | ||||
| static struct list_head _name_buckets[NUM_BUCKETS]; | ||||
| static struct list_head _uuid_buckets[NUM_BUCKETS]; | ||||
| static struct rb_root name_rb_tree = RB_ROOT; | ||||
| static struct rb_root uuid_rb_tree = RB_ROOT; | ||||
| 
 | ||||
| static void dm_hash_remove_all(bool keep_open_devices, bool mark_deferred, bool only_deferred); | ||||
| 
 | ||||
|  | @ -70,73 +71,110 @@ static DECLARE_RWSEM(_hash_lock); | |||
|  */ | ||||
| static DEFINE_MUTEX(dm_hash_cells_mutex); | ||||
| 
 | ||||
| static void init_buckets(struct list_head *buckets) | ||||
| { | ||||
| 	unsigned int i; | ||||
| 
 | ||||
| 	for (i = 0; i < NUM_BUCKETS; i++) | ||||
| 		INIT_LIST_HEAD(buckets + i); | ||||
| } | ||||
| 
 | ||||
| static int dm_hash_init(void) | ||||
| { | ||||
| 	init_buckets(_name_buckets); | ||||
| 	init_buckets(_uuid_buckets); | ||||
| 	return 0; | ||||
| } | ||||
| 
 | ||||
| static void dm_hash_exit(void) | ||||
| { | ||||
| 	dm_hash_remove_all(false, false, false); | ||||
| } | ||||
| 
 | ||||
| /*-----------------------------------------------------------------
 | ||||
|  * Hash function: | ||||
|  * We're not really concerned with the str hash function being | ||||
|  * fast since it's only used by the ioctl interface. | ||||
|  *---------------------------------------------------------------*/ | ||||
| static unsigned int hash_str(const char *str) | ||||
| { | ||||
| 	const unsigned int hash_mult = 2654435387U; | ||||
| 	unsigned int h = 0; | ||||
| 
 | ||||
| 	while (*str) | ||||
| 		h = (h + (unsigned int) *str++) * hash_mult; | ||||
| 
 | ||||
| 	return h & MASK_BUCKETS; | ||||
| } | ||||
| 
 | ||||
| /*-----------------------------------------------------------------
 | ||||
|  * Code for looking up a device by name | ||||
|  *---------------------------------------------------------------*/ | ||||
| static struct hash_cell *__get_name_cell(const char *str) | ||||
| { | ||||
| 	struct hash_cell *hc; | ||||
| 	unsigned int h = hash_str(str); | ||||
| 	struct rb_node *n = name_rb_tree.rb_node; | ||||
| 
 | ||||
| 	list_for_each_entry (hc, _name_buckets + h, name_list) | ||||
| 		if (!strcmp(hc->name, str)) { | ||||
| 	while (n) { | ||||
| 		struct hash_cell *hc = container_of(n, struct hash_cell, name_node); | ||||
| 		int c = strcmp(hc->name, str); | ||||
| 		if (!c) { | ||||
| 			dm_get(hc->md); | ||||
| 			return hc; | ||||
| 		} | ||||
| 		n = c >= 0 ? n->rb_left : n->rb_right; | ||||
| 	} | ||||
| 
 | ||||
| 	return NULL; | ||||
| } | ||||
| 
 | ||||
| static struct hash_cell *__get_uuid_cell(const char *str) | ||||
| { | ||||
| 	struct hash_cell *hc; | ||||
| 	unsigned int h = hash_str(str); | ||||
| 	struct rb_node *n = uuid_rb_tree.rb_node; | ||||
| 
 | ||||
| 	list_for_each_entry (hc, _uuid_buckets + h, uuid_list) | ||||
| 		if (!strcmp(hc->uuid, str)) { | ||||
| 	while (n) { | ||||
| 		struct hash_cell *hc = container_of(n, struct hash_cell, uuid_node); | ||||
| 		int c = strcmp(hc->uuid, str); | ||||
| 		if (!c) { | ||||
| 			dm_get(hc->md); | ||||
| 			return hc; | ||||
| 		} | ||||
| 		n = c >= 0 ? n->rb_left : n->rb_right; | ||||
| 	} | ||||
| 
 | ||||
| 	return NULL; | ||||
| } | ||||
| 
 | ||||
| static void __unlink_name(struct hash_cell *hc) | ||||
| { | ||||
| 	if (hc->name_set) { | ||||
| 		hc->name_set = false; | ||||
| 		rb_erase(&hc->name_node, &name_rb_tree); | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| static void __unlink_uuid(struct hash_cell *hc) | ||||
| { | ||||
| 	if (hc->uuid_set) { | ||||
| 		hc->uuid_set = false; | ||||
| 		rb_erase(&hc->uuid_node, &uuid_rb_tree); | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| static void __link_name(struct hash_cell *new_hc) | ||||
| { | ||||
| 	struct rb_node **n, *parent; | ||||
| 
 | ||||
| 	__unlink_name(new_hc); | ||||
| 
 | ||||
| 	new_hc->name_set = true; | ||||
| 
 | ||||
| 	n = &name_rb_tree.rb_node; | ||||
| 	parent = NULL; | ||||
| 
 | ||||
| 	while (*n) { | ||||
| 		struct hash_cell *hc = container_of(*n, struct hash_cell, name_node); | ||||
| 		int c = strcmp(hc->name, new_hc->name); | ||||
| 		BUG_ON(!c); | ||||
| 		parent = *n; | ||||
| 		n = c >= 0 ? &hc->name_node.rb_left : &hc->name_node.rb_right; | ||||
| 	} | ||||
| 
 | ||||
| 	rb_link_node(&new_hc->name_node, parent, n); | ||||
| 	rb_insert_color(&new_hc->name_node, &name_rb_tree); | ||||
| } | ||||
| 
 | ||||
| static void __link_uuid(struct hash_cell *new_hc) | ||||
| { | ||||
| 	struct rb_node **n, *parent; | ||||
| 
 | ||||
| 	__unlink_uuid(new_hc); | ||||
| 
 | ||||
| 	new_hc->uuid_set = true; | ||||
| 
 | ||||
| 	n = &uuid_rb_tree.rb_node; | ||||
| 	parent = NULL; | ||||
| 
 | ||||
| 	while (*n) { | ||||
| 		struct hash_cell *hc = container_of(*n, struct hash_cell, uuid_node); | ||||
| 		int c = strcmp(hc->uuid, new_hc->uuid); | ||||
| 		BUG_ON(!c); | ||||
| 		parent = *n; | ||||
| 		n = c > 0 ? &hc->uuid_node.rb_left : &hc->uuid_node.rb_right; | ||||
| 	} | ||||
| 
 | ||||
| 	rb_link_node(&new_hc->uuid_node, parent, n); | ||||
| 	rb_insert_color(&new_hc->uuid_node, &uuid_rb_tree); | ||||
| } | ||||
| 
 | ||||
| static struct hash_cell *__get_dev_cell(uint64_t dev) | ||||
| { | ||||
| 	struct mapped_device *md; | ||||
|  | @ -185,8 +223,7 @@ static struct hash_cell *alloc_cell(const char *name, const char *uuid, | |||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	INIT_LIST_HEAD(&hc->name_list); | ||||
| 	INIT_LIST_HEAD(&hc->uuid_list); | ||||
| 	hc->name_set = hc->uuid_set = false; | ||||
| 	hc->md = md; | ||||
| 	hc->new_map = NULL; | ||||
| 	return hc; | ||||
|  | @ -226,16 +263,16 @@ static int dm_hash_insert(const char *name, const char *uuid, struct mapped_devi | |||
| 		goto bad; | ||||
| 	} | ||||
| 
 | ||||
| 	list_add(&cell->name_list, _name_buckets + hash_str(name)); | ||||
| 	__link_name(cell); | ||||
| 
 | ||||
| 	if (uuid) { | ||||
| 		hc = __get_uuid_cell(uuid); | ||||
| 		if (hc) { | ||||
| 			list_del(&cell->name_list); | ||||
| 			__unlink_name(cell); | ||||
| 			dm_put(hc->md); | ||||
| 			goto bad; | ||||
| 		} | ||||
| 		list_add(&cell->uuid_list, _uuid_buckets + hash_str(uuid)); | ||||
| 		__link_uuid(cell); | ||||
| 	} | ||||
| 	dm_get(md); | ||||
| 	mutex_lock(&dm_hash_cells_mutex); | ||||
|  | @ -256,9 +293,9 @@ static struct dm_table *__hash_remove(struct hash_cell *hc) | |||
| 	struct dm_table *table; | ||||
| 	int srcu_idx; | ||||
| 
 | ||||
| 	/* remove from the dev hash */ | ||||
| 	list_del(&hc->uuid_list); | ||||
| 	list_del(&hc->name_list); | ||||
| 	/* remove from the dev trees */ | ||||
| 	__unlink_name(hc); | ||||
| 	__unlink_uuid(hc); | ||||
| 	mutex_lock(&dm_hash_cells_mutex); | ||||
| 	dm_set_mdptr(hc->md, NULL); | ||||
| 	mutex_unlock(&dm_hash_cells_mutex); | ||||
|  | @ -279,7 +316,8 @@ static struct dm_table *__hash_remove(struct hash_cell *hc) | |||
| 
 | ||||
| static void dm_hash_remove_all(bool keep_open_devices, bool mark_deferred, bool only_deferred) | ||||
| { | ||||
| 	int i, dev_skipped; | ||||
| 	int dev_skipped; | ||||
| 	struct rb_node *n; | ||||
| 	struct hash_cell *hc; | ||||
| 	struct mapped_device *md; | ||||
| 	struct dm_table *t; | ||||
|  | @ -289,8 +327,8 @@ retry: | |||
| 
 | ||||
| 	down_write(&_hash_lock); | ||||
| 
 | ||||
| 	for (i = 0; i < NUM_BUCKETS; i++) { | ||||
| 		list_for_each_entry(hc, _name_buckets + i, name_list) { | ||||
| 	for (n = rb_first(&name_rb_tree); n; n = rb_next(n)) { | ||||
| 		hc = container_of(n, struct hash_cell, name_node); | ||||
| 		md = hc->md; | ||||
| 		dm_get(md); | ||||
| 
 | ||||
|  | @ -323,7 +361,6 @@ retry: | |||
| 		 */ | ||||
| 		goto retry; | ||||
| 	} | ||||
| 	} | ||||
| 
 | ||||
| 	up_write(&_hash_lock); | ||||
| 
 | ||||
|  | @ -340,7 +377,7 @@ static void __set_cell_uuid(struct hash_cell *hc, char *new_uuid) | |||
| 	hc->uuid = new_uuid; | ||||
| 	mutex_unlock(&dm_hash_cells_mutex); | ||||
| 
 | ||||
| 	list_add(&hc->uuid_list, _uuid_buckets + hash_str(new_uuid)); | ||||
| 	__link_uuid(hc); | ||||
| } | ||||
| 
 | ||||
| /*
 | ||||
|  | @ -354,14 +391,14 @@ static char *__change_cell_name(struct hash_cell *hc, char *new_name) | |||
| 	/*
 | ||||
| 	 * Rename and move the name cell. | ||||
| 	 */ | ||||
| 	list_del(&hc->name_list); | ||||
| 	__unlink_name(hc); | ||||
| 	old_name = hc->name; | ||||
| 
 | ||||
| 	mutex_lock(&dm_hash_cells_mutex); | ||||
| 	hc->name = new_name; | ||||
| 	mutex_unlock(&dm_hash_cells_mutex); | ||||
| 
 | ||||
| 	list_add(&hc->name_list, _name_buckets + hash_str(new_name)); | ||||
| 	__link_name(hc); | ||||
| 
 | ||||
| 	return old_name; | ||||
| } | ||||
|  | @ -503,9 +540,33 @@ static void *get_result_buffer(struct dm_ioctl *param, size_t param_size, | |||
| 	return ((void *) param) + param->data_start; | ||||
| } | ||||
| 
 | ||||
| static bool filter_device(struct hash_cell *hc, const char *pfx_name, const char *pfx_uuid) | ||||
| { | ||||
| 	const char *val; | ||||
| 	size_t val_len, pfx_len; | ||||
| 
 | ||||
| 	val = hc->name; | ||||
| 	val_len = strlen(val); | ||||
| 	pfx_len = strnlen(pfx_name, DM_NAME_LEN); | ||||
| 	if (pfx_len > val_len) | ||||
| 		return false; | ||||
| 	if (memcmp(val, pfx_name, pfx_len)) | ||||
| 		return false; | ||||
| 
 | ||||
| 	val = hc->uuid ? hc->uuid : ""; | ||||
| 	val_len = strlen(val); | ||||
| 	pfx_len = strnlen(pfx_uuid, DM_UUID_LEN); | ||||
| 	if (pfx_len > val_len) | ||||
| 		return false; | ||||
| 	if (memcmp(val, pfx_uuid, pfx_len)) | ||||
| 		return false; | ||||
| 
 | ||||
| 	return true; | ||||
| } | ||||
| 
 | ||||
| static int list_devices(struct file *filp, struct dm_ioctl *param, size_t param_size) | ||||
| { | ||||
| 	unsigned int i; | ||||
| 	struct rb_node *n; | ||||
| 	struct hash_cell *hc; | ||||
| 	size_t len, needed = 0; | ||||
| 	struct gendisk *disk; | ||||
|  | @ -518,11 +579,14 @@ static int list_devices(struct file *filp, struct dm_ioctl *param, size_t param_ | |||
| 	 * Loop through all the devices working out how much | ||||
| 	 * space we need. | ||||
| 	 */ | ||||
| 	for (i = 0; i < NUM_BUCKETS; i++) { | ||||
| 		list_for_each_entry (hc, _name_buckets + i, name_list) { | ||||
| 	for (n = rb_first(&name_rb_tree); n; n = rb_next(n)) { | ||||
| 		hc = container_of(n, struct hash_cell, name_node); | ||||
| 		if (!filter_device(hc, param->name, param->uuid)) | ||||
| 			continue; | ||||
| 		needed += align_val(offsetof(struct dm_name_list, name) + strlen(hc->name) + 1); | ||||
| 			needed += align_val(sizeof(uint32_t)); | ||||
| 		} | ||||
| 		needed += align_val(sizeof(uint32_t) * 2); | ||||
| 		if (param->flags & DM_UUID_FLAG && hc->uuid) | ||||
| 			needed += align_val(strlen(hc->uuid) + 1); | ||||
| 	} | ||||
| 
 | ||||
| 	/*
 | ||||
|  | @ -540,8 +604,11 @@ static int list_devices(struct file *filp, struct dm_ioctl *param, size_t param_ | |||
| 	/*
 | ||||
| 	 * Now loop through filling out the names. | ||||
| 	 */ | ||||
| 	for (i = 0; i < NUM_BUCKETS; i++) { | ||||
| 		list_for_each_entry (hc, _name_buckets + i, name_list) { | ||||
| 	for (n = rb_first(&name_rb_tree); n; n = rb_next(n)) { | ||||
| 		void *uuid_ptr; | ||||
| 		hc = container_of(n, struct hash_cell, name_node); | ||||
| 		if (!filter_device(hc, param->name, param->uuid)) | ||||
| 			continue; | ||||
| 		if (old_nl) | ||||
| 			old_nl->next = (uint32_t) ((void *) nl - | ||||
| 						   (void *) old_nl); | ||||
|  | @ -552,10 +619,20 @@ static int list_devices(struct file *filp, struct dm_ioctl *param, size_t param_ | |||
| 
 | ||||
| 		old_nl = nl; | ||||
| 		event_nr = align_ptr(nl->name + strlen(hc->name) + 1); | ||||
| 			*event_nr = dm_get_event_nr(hc->md); | ||||
| 			nl = align_ptr(event_nr + 1); | ||||
| 		event_nr[0] = dm_get_event_nr(hc->md); | ||||
| 		event_nr[1] = 0; | ||||
| 		uuid_ptr = align_ptr(event_nr + 2); | ||||
| 		if (param->flags & DM_UUID_FLAG) { | ||||
| 			if (hc->uuid) { | ||||
| 				event_nr[1] |= DM_NAME_LIST_FLAG_HAS_UUID; | ||||
| 				strcpy(uuid_ptr, hc->uuid); | ||||
| 				uuid_ptr = align_ptr(uuid_ptr + strlen(hc->uuid) + 1); | ||||
| 			} else { | ||||
| 				event_nr[1] |= DM_NAME_LIST_FLAG_DOESNT_HAVE_UUID; | ||||
| 			} | ||||
| 		} | ||||
| 		nl = uuid_ptr; | ||||
| 	} | ||||
| 	/*
 | ||||
| 	 * If mismatch happens, security may be compromised due to buffer | ||||
| 	 * overflow, so it's better to crash. | ||||
|  | @ -1991,14 +2068,9 @@ int __init dm_interface_init(void) | |||
| { | ||||
| 	int r; | ||||
| 
 | ||||
| 	r = dm_hash_init(); | ||||
| 	if (r) | ||||
| 		return r; | ||||
| 
 | ||||
| 	r = misc_register(&_dm_misc); | ||||
| 	if (r) { | ||||
| 		DMERR("misc_register failed for control device"); | ||||
| 		dm_hash_exit(); | ||||
| 		return r; | ||||
| 	} | ||||
| 
 | ||||
|  |  | |||
|  | @ -1853,6 +1853,7 @@ static int rs_check_takeover(struct raid_set *rs) | |||
| 		    ((mddev->layout == ALGORITHM_PARITY_N && mddev->new_layout == ALGORITHM_PARITY_N) || | ||||
| 		     __within_range(mddev->new_layout, ALGORITHM_LEFT_ASYMMETRIC, ALGORITHM_RIGHT_SYMMETRIC))) | ||||
| 			return 0; | ||||
| 		break; | ||||
| 
 | ||||
| 	default: | ||||
| 		break; | ||||
|  | @ -1868,6 +1869,14 @@ static bool rs_takeover_requested(struct raid_set *rs) | |||
| 	return rs->md.new_level != rs->md.level; | ||||
| } | ||||
| 
 | ||||
| /* True if layout is set to reshape. */ | ||||
| static bool rs_is_layout_change(struct raid_set *rs, bool use_mddev) | ||||
| { | ||||
| 	return (use_mddev ? rs->md.delta_disks : rs->delta_disks) || | ||||
| 	       rs->md.new_layout != rs->md.layout || | ||||
| 	       rs->md.new_chunk_sectors != rs->md.chunk_sectors; | ||||
| } | ||||
| 
 | ||||
| /* True if @rs is requested to reshape by ctr */ | ||||
| static bool rs_reshape_requested(struct raid_set *rs) | ||||
| { | ||||
|  | @ -1880,9 +1889,7 @@ static bool rs_reshape_requested(struct raid_set *rs) | |||
| 	if (rs_is_raid0(rs)) | ||||
| 		return false; | ||||
| 
 | ||||
| 	change = mddev->new_layout != mddev->layout || | ||||
| 		 mddev->new_chunk_sectors != mddev->chunk_sectors || | ||||
| 		 rs->delta_disks; | ||||
| 	change = rs_is_layout_change(rs, false); | ||||
| 
 | ||||
| 	/* Historical case to support raid1 reshape without delta disks */ | ||||
| 	if (rs_is_raid1(rs)) { | ||||
|  | @ -2817,7 +2824,7 @@ static sector_t _get_reshape_sectors(struct raid_set *rs) | |||
| } | ||||
| 
 | ||||
| /*
 | ||||
|  * | ||||
|  * Reshape: | ||||
|  * - change raid layout | ||||
|  * - change chunk size | ||||
|  * - add disks | ||||
|  | @ -2926,6 +2933,20 @@ static int rs_setup_reshape(struct raid_set *rs) | |||
| 	return r; | ||||
| } | ||||
| 
 | ||||
| /*
 | ||||
|  * If the md resync thread has updated superblock with max reshape position | ||||
|  * at the end of a reshape but not (yet) reset the layout configuration | ||||
|  * changes -> reset the latter. | ||||
|  */ | ||||
| static void rs_reset_inconclusive_reshape(struct raid_set *rs) | ||||
| { | ||||
| 	if (!rs_is_reshaping(rs) && rs_is_layout_change(rs, true)) { | ||||
| 		rs_set_cur(rs); | ||||
| 		rs->md.delta_disks = 0; | ||||
| 		rs->md.reshape_backwards = 0; | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| /*
 | ||||
|  * Enable/disable discard support on RAID set depending on | ||||
|  * RAID level and discard properties of underlying RAID members. | ||||
|  | @ -3212,11 +3233,14 @@ size_check: | |||
| 	if (r) | ||||
| 		goto bad; | ||||
| 
 | ||||
| 	/* Catch any inconclusive reshape superblock content. */ | ||||
| 	rs_reset_inconclusive_reshape(rs); | ||||
| 
 | ||||
| 	/* Start raid set read-only and assumed clean to change in raid_resume() */ | ||||
| 	rs->md.ro = 1; | ||||
| 	rs->md.in_sync = 1; | ||||
| 
 | ||||
| 	/* Keep array frozen */ | ||||
| 	/* Keep array frozen until resume. */ | ||||
| 	set_bit(MD_RECOVERY_FROZEN, &rs->md.recovery); | ||||
| 
 | ||||
| 	/* Has to be held on running the array */ | ||||
|  | @ -3230,7 +3254,6 @@ size_check: | |||
| 	} | ||||
| 
 | ||||
| 	r = md_start(&rs->md); | ||||
| 
 | ||||
| 	if (r) { | ||||
| 		ti->error = "Failed to start raid array"; | ||||
| 		mddev_unlock(&rs->md); | ||||
|  | @ -3727,15 +3750,6 @@ static void raid_io_hints(struct dm_target *ti, struct queue_limits *limits) | |||
| 
 | ||||
| 	blk_limits_io_min(limits, chunk_size_bytes); | ||||
| 	blk_limits_io_opt(limits, chunk_size_bytes * mddev_data_stripes(rs)); | ||||
| 
 | ||||
| 	/*
 | ||||
| 	 * RAID0 and RAID10 personalities require bio splitting, | ||||
| 	 * RAID1/4/5/6 don't and process large discard bios properly. | ||||
| 	 */ | ||||
| 	if (rs_is_raid0(rs) || rs_is_raid10(rs)) { | ||||
| 		limits->discard_granularity = chunk_size_bytes; | ||||
| 		limits->max_discard_sectors = rs->md.chunk_sectors; | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| static void raid_postsuspend(struct dm_target *ti) | ||||
|  |  | |||
|  | @ -569,6 +569,7 @@ out_tag_set: | |||
| 	blk_mq_free_tag_set(md->tag_set); | ||||
| out_kfree_tag_set: | ||||
| 	kfree(md->tag_set); | ||||
| 	md->tag_set = NULL; | ||||
| 
 | ||||
| 	return err; | ||||
| } | ||||
|  | @ -578,6 +579,7 @@ void dm_mq_cleanup_mapped_device(struct mapped_device *md) | |||
| 	if (md->tag_set) { | ||||
| 		blk_mq_free_tag_set(md->tag_set); | ||||
| 		kfree(md->tag_set); | ||||
| 		md->tag_set = NULL; | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
|  |  | |||
|  | @ -596,7 +596,7 @@ static void persistent_dtr(struct dm_exception_store *store) | |||
| 	free_area(ps); | ||||
| 
 | ||||
| 	/* Allocated in persistent_read_metadata */ | ||||
| 	vfree(ps->callbacks); | ||||
| 	kvfree(ps->callbacks); | ||||
| 
 | ||||
| 	kfree(ps); | ||||
| } | ||||
|  | @ -621,8 +621,8 @@ static int persistent_read_metadata(struct dm_exception_store *store, | |||
| 	 */ | ||||
| 	ps->exceptions_per_area = (ps->store->chunk_size << SECTOR_SHIFT) / | ||||
| 				  sizeof(struct disk_exception); | ||||
| 	ps->callbacks = dm_vcalloc(ps->exceptions_per_area, | ||||
| 				   sizeof(*ps->callbacks)); | ||||
| 	ps->callbacks = kvcalloc(ps->exceptions_per_area, | ||||
| 				 sizeof(*ps->callbacks), GFP_KERNEL); | ||||
| 	if (!ps->callbacks) | ||||
| 		return -ENOMEM; | ||||
| 
 | ||||
|  |  | |||
|  | @ -663,7 +663,8 @@ static int dm_exception_table_init(struct dm_exception_table *et, | |||
| 
 | ||||
| 	et->hash_shift = hash_shift; | ||||
| 	et->hash_mask = size - 1; | ||||
| 	et->table = dm_vcalloc(size, sizeof(struct hlist_bl_head)); | ||||
| 	et->table = kvmalloc_array(size, sizeof(struct hlist_bl_head), | ||||
| 				   GFP_KERNEL); | ||||
| 	if (!et->table) | ||||
| 		return -ENOMEM; | ||||
| 
 | ||||
|  | @ -689,7 +690,7 @@ static void dm_exception_table_exit(struct dm_exception_table *et, | |||
| 			kmem_cache_free(mem, ex); | ||||
| 	} | ||||
| 
 | ||||
| 	vfree(et->table); | ||||
| 	kvfree(et->table); | ||||
| } | ||||
| 
 | ||||
| static uint32_t exception_hash(struct dm_exception_table *et, chunk_t chunk) | ||||
|  |  | |||
|  | @ -94,24 +94,6 @@ static int setup_btree_index(unsigned int l, struct dm_table *t) | |||
| 	return 0; | ||||
| } | ||||
| 
 | ||||
| void *dm_vcalloc(unsigned long nmemb, unsigned long elem_size) | ||||
| { | ||||
| 	unsigned long size; | ||||
| 	void *addr; | ||||
| 
 | ||||
| 	/*
 | ||||
| 	 * Check that we're not going to overflow. | ||||
| 	 */ | ||||
| 	if (nmemb > (ULONG_MAX / elem_size)) | ||||
| 		return NULL; | ||||
| 
 | ||||
| 	size = nmemb * elem_size; | ||||
| 	addr = vzalloc(size); | ||||
| 
 | ||||
| 	return addr; | ||||
| } | ||||
| EXPORT_SYMBOL(dm_vcalloc); | ||||
| 
 | ||||
| /*
 | ||||
|  * highs, and targets are managed as dynamic arrays during a | ||||
|  * table load. | ||||
|  | @ -124,15 +106,15 @@ static int alloc_targets(struct dm_table *t, unsigned int num) | |||
| 	/*
 | ||||
| 	 * Allocate both the target array and offset array at once. | ||||
| 	 */ | ||||
| 	n_highs = (sector_t *) dm_vcalloc(num, sizeof(struct dm_target) + | ||||
| 					  sizeof(sector_t)); | ||||
| 	n_highs = kvcalloc(num, sizeof(struct dm_target) + sizeof(sector_t), | ||||
| 			   GFP_KERNEL); | ||||
| 	if (!n_highs) | ||||
| 		return -ENOMEM; | ||||
| 
 | ||||
| 	n_targets = (struct dm_target *) (n_highs + num); | ||||
| 
 | ||||
| 	memset(n_highs, -1, sizeof(*n_highs) * num); | ||||
| 	vfree(t->highs); | ||||
| 	kvfree(t->highs); | ||||
| 
 | ||||
| 	t->num_allocated = num; | ||||
| 	t->highs = n_highs; | ||||
|  | @ -198,7 +180,7 @@ void dm_table_destroy(struct dm_table *t) | |||
| 
 | ||||
| 	/* free the indexes */ | ||||
| 	if (t->depth >= 2) | ||||
| 		vfree(t->index[t->depth - 2]); | ||||
| 		kvfree(t->index[t->depth - 2]); | ||||
| 
 | ||||
| 	/* free the targets */ | ||||
| 	for (i = 0; i < t->num_targets; i++) { | ||||
|  | @ -210,7 +192,7 @@ void dm_table_destroy(struct dm_table *t) | |||
| 		dm_put_target_type(tgt->type); | ||||
| 	} | ||||
| 
 | ||||
| 	vfree(t->highs); | ||||
| 	kvfree(t->highs); | ||||
| 
 | ||||
| 	/* free the device list */ | ||||
| 	free_devices(&t->devices, t->md); | ||||
|  | @ -1077,7 +1059,7 @@ static int setup_indexes(struct dm_table *t) | |||
| 		total += t->counts[i]; | ||||
| 	} | ||||
| 
 | ||||
| 	indexes = (sector_t *) dm_vcalloc(total, (unsigned long) NODE_SIZE); | ||||
| 	indexes = kvcalloc(total, NODE_SIZE, GFP_KERNEL); | ||||
| 	if (!indexes) | ||||
| 		return -ENOMEM; | ||||
| 
 | ||||
|  |  | |||
|  | @ -2816,7 +2816,7 @@ static bool data_dev_supports_discard(struct pool_c *pt) | |||
| { | ||||
| 	struct request_queue *q = bdev_get_queue(pt->data_dev->bdev); | ||||
| 
 | ||||
| 	return q && blk_queue_discard(q); | ||||
| 	return blk_queue_discard(q); | ||||
| } | ||||
| 
 | ||||
| static bool is_factor(sector_t block_size, uint32_t n) | ||||
|  |  | |||
|  | @ -893,6 +893,28 @@ out: | |||
| 	return r; | ||||
| } | ||||
| 
 | ||||
| static inline bool verity_is_verity_mode(const char *arg_name) | ||||
| { | ||||
| 	return (!strcasecmp(arg_name, DM_VERITY_OPT_LOGGING) || | ||||
| 		!strcasecmp(arg_name, DM_VERITY_OPT_RESTART) || | ||||
| 		!strcasecmp(arg_name, DM_VERITY_OPT_PANIC)); | ||||
| } | ||||
| 
 | ||||
| static int verity_parse_verity_mode(struct dm_verity *v, const char *arg_name) | ||||
| { | ||||
| 	if (v->mode) | ||||
| 		return -EINVAL; | ||||
| 
 | ||||
| 	if (!strcasecmp(arg_name, DM_VERITY_OPT_LOGGING)) | ||||
| 		v->mode = DM_VERITY_MODE_LOGGING; | ||||
| 	else if (!strcasecmp(arg_name, DM_VERITY_OPT_RESTART)) | ||||
| 		v->mode = DM_VERITY_MODE_RESTART; | ||||
| 	else if (!strcasecmp(arg_name, DM_VERITY_OPT_PANIC)) | ||||
| 		v->mode = DM_VERITY_MODE_PANIC; | ||||
| 
 | ||||
| 	return 0; | ||||
| } | ||||
| 
 | ||||
| static int verity_parse_opt_args(struct dm_arg_set *as, struct dm_verity *v, | ||||
| 				 struct dm_verity_sig_opts *verify_args) | ||||
| { | ||||
|  | @ -916,16 +938,12 @@ static int verity_parse_opt_args(struct dm_arg_set *as, struct dm_verity *v, | |||
| 		arg_name = dm_shift_arg(as); | ||||
| 		argc--; | ||||
| 
 | ||||
| 		if (!strcasecmp(arg_name, DM_VERITY_OPT_LOGGING)) { | ||||
| 			v->mode = DM_VERITY_MODE_LOGGING; | ||||
| 			continue; | ||||
| 
 | ||||
| 		} else if (!strcasecmp(arg_name, DM_VERITY_OPT_RESTART)) { | ||||
| 			v->mode = DM_VERITY_MODE_RESTART; | ||||
| 			continue; | ||||
| 
 | ||||
| 		} else if (!strcasecmp(arg_name, DM_VERITY_OPT_PANIC)) { | ||||
| 			v->mode = DM_VERITY_MODE_PANIC; | ||||
| 		if (verity_is_verity_mode(arg_name)) { | ||||
| 			r = verity_parse_verity_mode(v, arg_name); | ||||
| 			if (r) { | ||||
| 				ti->error = "Conflicting error handling parameters"; | ||||
| 				return r; | ||||
| 			} | ||||
| 			continue; | ||||
| 
 | ||||
| 		} else if (!strcasecmp(arg_name, DM_VERITY_OPT_IGN_ZEROES)) { | ||||
|  | @ -1242,7 +1260,7 @@ bad: | |||
| 
 | ||||
| static struct target_type verity_target = { | ||||
| 	.name		= "verity", | ||||
| 	.version	= {1, 7, 0}, | ||||
| 	.version	= {1, 8, 0}, | ||||
| 	.module		= THIS_MODULE, | ||||
| 	.ctr		= verity_ctr, | ||||
| 	.dtr		= verity_dtr, | ||||
|  |  | |||
|  | @ -73,7 +73,7 @@ struct wc_memory_superblock { | |||
| 		}; | ||||
| 		__le64 padding[8]; | ||||
| 	}; | ||||
| 	struct wc_memory_entry entries[0]; | ||||
| 	struct wc_memory_entry entries[]; | ||||
| }; | ||||
| 
 | ||||
| struct wc_entry { | ||||
|  |  | |||
|  | @ -840,7 +840,6 @@ int dm_get_table_device(struct mapped_device *md, dev_t dev, fmode_t mode, | |||
| 	*result = &td->dm_dev; | ||||
| 	return 0; | ||||
| } | ||||
| EXPORT_SYMBOL_GPL(dm_get_table_device); | ||||
| 
 | ||||
| void dm_put_table_device(struct mapped_device *md, struct dm_dev *d) | ||||
| { | ||||
|  | @ -854,7 +853,6 @@ void dm_put_table_device(struct mapped_device *md, struct dm_dev *d) | |||
| 	} | ||||
| 	mutex_unlock(&md->table_devices_lock); | ||||
| } | ||||
| EXPORT_SYMBOL(dm_put_table_device); | ||||
| 
 | ||||
| static void free_table_devices(struct list_head *devices) | ||||
| { | ||||
|  | @ -1641,7 +1639,6 @@ static blk_qc_t __split_and_process_bio(struct mapped_device *md, | |||
| 	} else { | ||||
| 		ci.bio = bio; | ||||
| 		ci.sector_count = bio_sectors(bio); | ||||
| 		while (ci.sector_count && !error) { | ||||
| 		error = __split_and_process_non_flush(&ci); | ||||
| 		if (ci.sector_count && !error) { | ||||
| 			/*
 | ||||
|  | @ -1671,8 +1668,6 @@ static blk_qc_t __split_and_process_bio(struct mapped_device *md, | |||
| 			bio_chain(b, bio); | ||||
| 			trace_block_split(b, bio->bi_iter.bi_sector); | ||||
| 			ret = submit_bio_noacct(bio); | ||||
| 				break; | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
|  |  | |||
|  | @ -34,12 +34,12 @@ struct node_header { | |||
| 	__le32 max_entries; | ||||
| 	__le32 value_size; | ||||
| 	__le32 padding; | ||||
| } __packed; | ||||
| } __attribute__((packed, aligned(8))); | ||||
| 
 | ||||
| struct btree_node { | ||||
| 	struct node_header header; | ||||
| 	__le64 keys[]; | ||||
| } __packed; | ||||
| } __attribute__((packed, aligned(8))); | ||||
| 
 | ||||
| 
 | ||||
| /*
 | ||||
|  | @ -83,7 +83,7 @@ struct shadow_spine { | |||
| }; | ||||
| 
 | ||||
| void init_shadow_spine(struct shadow_spine *s, struct dm_btree_info *info); | ||||
| int exit_shadow_spine(struct shadow_spine *s); | ||||
| void exit_shadow_spine(struct shadow_spine *s); | ||||
| 
 | ||||
| int shadow_step(struct shadow_spine *s, dm_block_t b, | ||||
| 		struct dm_btree_value_type *vt); | ||||
|  |  | |||
|  | @ -30,8 +30,6 @@ static void node_prepare_for_write(struct dm_block_validator *v, | |||
| 	h->csum = cpu_to_le32(dm_bm_checksum(&h->flags, | ||||
| 					     block_size - sizeof(__le32), | ||||
| 					     BTREE_CSUM_XOR)); | ||||
| 
 | ||||
| 	BUG_ON(node_check(v, b, 4096)); | ||||
| } | ||||
| 
 | ||||
| static int node_check(struct dm_block_validator *v, | ||||
|  | @ -183,15 +181,13 @@ void init_shadow_spine(struct shadow_spine *s, struct dm_btree_info *info) | |||
| 	s->count = 0; | ||||
| } | ||||
| 
 | ||||
| int exit_shadow_spine(struct shadow_spine *s) | ||||
| void exit_shadow_spine(struct shadow_spine *s) | ||||
| { | ||||
| 	int r = 0, i; | ||||
| 	int i; | ||||
| 
 | ||||
| 	for (i = 0; i < s->count; i++) { | ||||
| 		unlock_block(s->info, s->nodes[i]); | ||||
| 	} | ||||
| 
 | ||||
| 	return r; | ||||
| } | ||||
| 
 | ||||
| int shadow_step(struct shadow_spine *s, dm_block_t b, | ||||
|  |  | |||
|  | @ -339,6 +339,8 @@ int sm_ll_find_free_block(struct ll_disk *ll, dm_block_t begin, | |||
| 	 */ | ||||
| 	begin = do_div(index_begin, ll->entries_per_block); | ||||
| 	end = do_div(end, ll->entries_per_block); | ||||
| 	if (end == 0) | ||||
| 		end = ll->entries_per_block; | ||||
| 
 | ||||
| 	for (i = index_begin; i < index_end; i++, begin = 0) { | ||||
| 		struct dm_block *blk; | ||||
|  |  | |||
|  | @ -33,7 +33,7 @@ struct disk_index_entry { | |||
| 	__le64 blocknr; | ||||
| 	__le32 nr_free; | ||||
| 	__le32 none_free_before; | ||||
| } __packed; | ||||
| } __attribute__ ((packed, aligned(8))); | ||||
| 
 | ||||
| 
 | ||||
| #define MAX_METADATA_BITMAPS 255 | ||||
|  | @ -43,7 +43,7 @@ struct disk_metadata_index { | |||
| 	__le64 blocknr; | ||||
| 
 | ||||
| 	struct disk_index_entry index[MAX_METADATA_BITMAPS]; | ||||
| } __packed; | ||||
| } __attribute__ ((packed, aligned(8))); | ||||
| 
 | ||||
| struct ll_disk; | ||||
| 
 | ||||
|  | @ -86,7 +86,7 @@ struct disk_sm_root { | |||
| 	__le64 nr_allocated; | ||||
| 	__le64 bitmap_root; | ||||
| 	__le64 ref_count_root; | ||||
| } __packed; | ||||
| } __attribute__ ((packed, aligned(8))); | ||||
| 
 | ||||
| #define ENTRIES_PER_BYTE 4 | ||||
| 
 | ||||
|  | @ -94,7 +94,7 @@ struct disk_bitmap_header { | |||
| 	__le32 csum; | ||||
| 	__le32 not_used; | ||||
| 	__le64 blocknr; | ||||
| } __packed; | ||||
| } __attribute__ ((packed, aligned(8))); | ||||
| 
 | ||||
| enum allocation_event { | ||||
| 	SM_NONE, | ||||
|  |  | |||
|  | @ -187,13 +187,8 @@ static int sm_disk_new_block(struct dm_space_map *sm, dm_block_t *b) | |||
| static int sm_disk_commit(struct dm_space_map *sm) | ||||
| { | ||||
| 	int r; | ||||
| 	dm_block_t nr_free; | ||||
| 	struct sm_disk *smd = container_of(sm, struct sm_disk, sm); | ||||
| 
 | ||||
| 	r = sm_disk_get_nr_free(sm, &nr_free); | ||||
| 	if (r) | ||||
| 		return r; | ||||
| 
 | ||||
| 	r = sm_ll_commit(&smd->ll); | ||||
| 	if (r) | ||||
| 		return r; | ||||
|  | @ -202,10 +197,6 @@ static int sm_disk_commit(struct dm_space_map *sm) | |||
| 	smd->begin = 0; | ||||
| 	smd->nr_allocated_this_transaction = 0; | ||||
| 
 | ||||
| 	r = sm_disk_get_nr_free(sm, &nr_free); | ||||
| 	if (r) | ||||
| 		return r; | ||||
| 
 | ||||
| 	return 0; | ||||
| } | ||||
| 
 | ||||
|  |  | |||
|  | @ -574,11 +574,6 @@ struct dm_table *dm_swap_table(struct mapped_device *md, | |||
|  */ | ||||
| void dm_destroy_keyslot_manager(struct blk_keyslot_manager *ksm); | ||||
| 
 | ||||
| /*
 | ||||
|  * A wrapper around vmalloc. | ||||
|  */ | ||||
| void *dm_vcalloc(unsigned long nmemb, unsigned long elem_size); | ||||
| 
 | ||||
| /*-----------------------------------------------------------------
 | ||||
|  * Macros. | ||||
|  *---------------------------------------------------------------*/ | ||||
|  |  | |||
|  | @ -193,8 +193,22 @@ struct dm_name_list { | |||
| 	__u32 next;		/* offset to the next record from
 | ||||
| 				   the _start_ of this */ | ||||
| 	char name[0]; | ||||
| 
 | ||||
| 	/*
 | ||||
| 	 * The following members can be accessed by taking a pointer that | ||||
| 	 * points immediately after the terminating zero character in "name" | ||||
| 	 * and aligning this pointer to next 8-byte boundary. | ||||
| 	 * Uuid is present if the flag DM_NAME_LIST_FLAG_HAS_UUID is set. | ||||
| 	 * | ||||
| 	 * __u32 event_nr; | ||||
| 	 * __u32 flags; | ||||
| 	 * char uuid[0]; | ||||
| 	 */ | ||||
| }; | ||||
| 
 | ||||
| #define DM_NAME_LIST_FLAG_HAS_UUID		1 | ||||
| #define DM_NAME_LIST_FLAG_DOESNT_HAVE_UUID	2 | ||||
| 
 | ||||
| /*
 | ||||
|  * Used to retrieve the target versions | ||||
|  */ | ||||
|  | @ -272,9 +286,9 @@ enum { | |||
| #define DM_DEV_SET_GEOMETRY	_IOWR(DM_IOCTL, DM_DEV_SET_GEOMETRY_CMD, struct dm_ioctl) | ||||
| 
 | ||||
| #define DM_VERSION_MAJOR	4 | ||||
| #define DM_VERSION_MINOR	44 | ||||
| #define DM_VERSION_MINOR	45 | ||||
| #define DM_VERSION_PATCHLEVEL	0 | ||||
| #define DM_VERSION_EXTRA	"-ioctl (2021-02-01)" | ||||
| #define DM_VERSION_EXTRA	"-ioctl (2021-03-22)" | ||||
| 
 | ||||
| /* Status bits */ | ||||
| #define DM_READONLY_FLAG	(1 << 0) /* In/Out */ | ||||
|  |  | |||
		Loading…
	
	Add table
		
		Reference in a new issue
	
	 Linus Torvalds
						Linus Torvalds