mirror of
				git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
				synced 2025-10-31 16:54:21 +00:00 
			
		
		
		
	ocfs2/dlm: use bitmap API instead of hand-writing it
Use bitmap_zero/bitmap_copy/bitmap_qeual directly for bitmap operations. Link: https://lkml.kernel.org/r/20221007124846.186453-3-joseph.qi@linux.alibaba.com Signed-off-by: Joseph Qi <joseph.qi@linux.alibaba.com> Cc: Changwei Ge <gechangwei@live.cn> Cc: Gang He <ghe@suse.com> Cc: Joel Becker <jlbec@evilplan.org> Cc: Jun Piao <piaojun@huawei.com> Cc: Junxiao Bi <junxiao.bi@oracle.com> Cc: Mark Fasheh <mark@fasheh.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
		
							parent
							
								
									6d4a93b680
								
							
						
					
					
						commit
						b270f492dc
					
				
					 4 changed files with 24 additions and 27 deletions
				
			
		|  | @ -1094,7 +1094,7 @@ static inline enum dlm_status dlm_err_to_dlm_status(int err) | |||
| static inline void dlm_node_iter_init(unsigned long *map, | ||||
| 				      struct dlm_node_iter *iter) | ||||
| { | ||||
| 	memcpy(iter->node_map, map, sizeof(iter->node_map)); | ||||
| 	bitmap_copy(iter->node_map, map, O2NM_MAX_NODES); | ||||
| 	iter->curnode = -1; | ||||
| } | ||||
| 
 | ||||
|  |  | |||
|  | @ -1576,8 +1576,8 @@ static int dlm_should_restart_join(struct dlm_ctxt *dlm, | |||
| 	spin_lock(&dlm->spinlock); | ||||
| 	/* For now, we restart the process if the node maps have
 | ||||
| 	 * changed at all */ | ||||
| 	ret = memcmp(ctxt->live_map, dlm->live_nodes_map, | ||||
| 		     sizeof(dlm->live_nodes_map)); | ||||
| 	ret = !bitmap_equal(ctxt->live_map, dlm->live_nodes_map, | ||||
| 			    O2NM_MAX_NODES); | ||||
| 	spin_unlock(&dlm->spinlock); | ||||
| 
 | ||||
| 	if (ret) | ||||
|  | @ -1607,10 +1607,8 @@ static int dlm_try_to_join_domain(struct dlm_ctxt *dlm) | |||
| 	o2hb_fill_node_map(dlm->live_nodes_map, O2NM_MAX_NODES); | ||||
| 
 | ||||
| 	spin_lock(&dlm->spinlock); | ||||
| 	memcpy(ctxt->live_map, dlm->live_nodes_map, sizeof(ctxt->live_map)); | ||||
| 
 | ||||
| 	bitmap_copy(ctxt->live_map, dlm->live_nodes_map, O2NM_MAX_NODES); | ||||
| 	__dlm_set_joining_node(dlm, dlm->node_num); | ||||
| 
 | ||||
| 	spin_unlock(&dlm->spinlock); | ||||
| 
 | ||||
| 	node = -1; | ||||
|  | @ -1643,8 +1641,7 @@ static int dlm_try_to_join_domain(struct dlm_ctxt *dlm) | |||
| 	 * yes_resp_map. Copy that into our domain map and send a join | ||||
| 	 * assert message to clean up everyone elses state. */ | ||||
| 	spin_lock(&dlm->spinlock); | ||||
| 	memcpy(dlm->domain_map, ctxt->yes_resp_map, | ||||
| 	       sizeof(ctxt->yes_resp_map)); | ||||
| 	bitmap_copy(dlm->domain_map, ctxt->yes_resp_map, O2NM_MAX_NODES); | ||||
| 	set_bit(dlm->node_num, dlm->domain_map); | ||||
| 	spin_unlock(&dlm->spinlock); | ||||
| 
 | ||||
|  | @ -2009,9 +2006,9 @@ static struct dlm_ctxt *dlm_alloc_ctxt(const char *domain, | |||
| 	mlog(0, "dlm->recovery_map=%p, &(dlm->recovery_map[0])=%p\n", | ||||
| 		  dlm->recovery_map, &(dlm->recovery_map[0])); | ||||
| 
 | ||||
| 	memset(dlm->recovery_map, 0, sizeof(dlm->recovery_map)); | ||||
| 	memset(dlm->live_nodes_map, 0, sizeof(dlm->live_nodes_map)); | ||||
| 	memset(dlm->domain_map, 0, sizeof(dlm->domain_map)); | ||||
| 	bitmap_zero(dlm->recovery_map, O2NM_MAX_NODES); | ||||
| 	bitmap_zero(dlm->live_nodes_map, O2NM_MAX_NODES); | ||||
| 	bitmap_zero(dlm->domain_map, O2NM_MAX_NODES); | ||||
| 
 | ||||
| 	dlm->dlm_thread_task = NULL; | ||||
| 	dlm->dlm_reco_thread_task = NULL; | ||||
|  |  | |||
|  | @ -258,12 +258,12 @@ static void dlm_init_mle(struct dlm_master_list_entry *mle, | |||
| 	mle->type = type; | ||||
| 	INIT_HLIST_NODE(&mle->master_hash_node); | ||||
| 	INIT_LIST_HEAD(&mle->hb_events); | ||||
| 	memset(mle->maybe_map, 0, sizeof(mle->maybe_map)); | ||||
| 	bitmap_zero(mle->maybe_map, O2NM_MAX_NODES); | ||||
| 	spin_lock_init(&mle->spinlock); | ||||
| 	init_waitqueue_head(&mle->wq); | ||||
| 	atomic_set(&mle->woken, 0); | ||||
| 	kref_init(&mle->mle_refs); | ||||
| 	memset(mle->response_map, 0, sizeof(mle->response_map)); | ||||
| 	bitmap_zero(mle->response_map, O2NM_MAX_NODES); | ||||
| 	mle->master = O2NM_MAX_NODES; | ||||
| 	mle->new_master = O2NM_MAX_NODES; | ||||
| 	mle->inuse = 0; | ||||
|  | @ -290,8 +290,8 @@ static void dlm_init_mle(struct dlm_master_list_entry *mle, | |||
| 	atomic_inc(&dlm->mle_cur_count[mle->type]); | ||||
| 
 | ||||
| 	/* copy off the node_map and register hb callbacks on our copy */ | ||||
| 	memcpy(mle->node_map, dlm->domain_map, sizeof(mle->node_map)); | ||||
| 	memcpy(mle->vote_map, dlm->domain_map, sizeof(mle->vote_map)); | ||||
| 	bitmap_copy(mle->node_map, dlm->domain_map, O2NM_MAX_NODES); | ||||
| 	bitmap_copy(mle->vote_map, dlm->domain_map, O2NM_MAX_NODES); | ||||
| 	clear_bit(dlm->node_num, mle->vote_map); | ||||
| 	clear_bit(dlm->node_num, mle->node_map); | ||||
| 
 | ||||
|  | @ -572,7 +572,7 @@ static void dlm_init_lockres(struct dlm_ctxt *dlm, | |||
| 	spin_unlock(&dlm->track_lock); | ||||
| 
 | ||||
| 	memset(res->lvb, 0, DLM_LVB_LEN); | ||||
| 	memset(res->refmap, 0, sizeof(res->refmap)); | ||||
| 	bitmap_zero(res->refmap, O2NM_MAX_NODES); | ||||
| } | ||||
| 
 | ||||
| struct dlm_lock_resource *dlm_new_lockres(struct dlm_ctxt *dlm, | ||||
|  | @ -1036,10 +1036,10 @@ recheck: | |||
| 
 | ||||
| 	spin_lock(&mle->spinlock); | ||||
| 	m = mle->master; | ||||
| 	map_changed = (memcmp(mle->vote_map, mle->node_map, | ||||
| 			      sizeof(mle->vote_map)) != 0); | ||||
| 	voting_done = (memcmp(mle->vote_map, mle->response_map, | ||||
| 			     sizeof(mle->vote_map)) == 0); | ||||
| 	map_changed = !bitmap_equal(mle->vote_map, mle->node_map, | ||||
| 				    O2NM_MAX_NODES); | ||||
| 	voting_done = bitmap_equal(mle->vote_map, mle->response_map, | ||||
| 				   O2NM_MAX_NODES); | ||||
| 
 | ||||
| 	/* restart if we hit any errors */ | ||||
| 	if (map_changed) { | ||||
|  | @ -1277,11 +1277,11 @@ static int dlm_restart_lock_mastery(struct dlm_ctxt *dlm, | |||
| 
 | ||||
| 			/* now blank out everything, as if we had never
 | ||||
| 			 * contacted anyone */ | ||||
| 			memset(mle->maybe_map, 0, sizeof(mle->maybe_map)); | ||||
| 			memset(mle->response_map, 0, sizeof(mle->response_map)); | ||||
| 			bitmap_zero(mle->maybe_map, O2NM_MAX_NODES); | ||||
| 			bitmap_zero(mle->response_map, O2NM_MAX_NODES); | ||||
| 			/* reset the vote_map to the current node_map */ | ||||
| 			memcpy(mle->vote_map, mle->node_map, | ||||
| 			       sizeof(mle->node_map)); | ||||
| 			bitmap_copy(mle->vote_map, mle->node_map, | ||||
| 				    O2NM_MAX_NODES); | ||||
| 			/* put myself into the maybe map */ | ||||
| 			if (mle->type != DLM_MLE_BLOCK) | ||||
| 				set_bit(dlm->node_num, mle->maybe_map); | ||||
|  | @ -2094,7 +2094,7 @@ static void dlm_assert_master_worker(struct dlm_work_item *item, void *data) | |||
| 	flags = item->u.am.flags; | ||||
| 
 | ||||
| 	spin_lock(&dlm->spinlock); | ||||
| 	memcpy(nodemap, dlm->domain_map, sizeof(nodemap)); | ||||
| 	bitmap_copy(nodemap, dlm->domain_map, O2NM_MAX_NODES); | ||||
| 	spin_unlock(&dlm->spinlock); | ||||
| 
 | ||||
| 	clear_bit(dlm->node_num, nodemap); | ||||
|  | @ -3447,7 +3447,7 @@ int dlm_finish_migration(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, | |||
| 		ret = 0; | ||||
| 	} | ||||
| 
 | ||||
| 	memset(iter.node_map, 0, sizeof(iter.node_map)); | ||||
| 	bitmap_zero(iter.node_map, O2NM_MAX_NODES); | ||||
| 	set_bit(old_master, iter.node_map); | ||||
| 	mlog(0, "doing assert master of %.*s back to %u\n", | ||||
| 	     res->lockname.len, res->lockname.name, old_master); | ||||
|  |  | |||
|  | @ -733,7 +733,7 @@ static int dlm_init_recovery_area(struct dlm_ctxt *dlm, u8 dead_node) | |||
| 	struct dlm_reco_node_data *ndata; | ||||
| 
 | ||||
| 	spin_lock(&dlm->spinlock); | ||||
| 	memcpy(dlm->reco.node_map, dlm->domain_map, sizeof(dlm->domain_map)); | ||||
| 	bitmap_copy(dlm->reco.node_map, dlm->domain_map, O2NM_MAX_NODES); | ||||
| 	/* nodes can only be removed (by dying) after dropping
 | ||||
| 	 * this lock, and death will be trapped later, so this should do */ | ||||
| 	spin_unlock(&dlm->spinlock); | ||||
|  |  | |||
		Loading…
	
	Add table
		
		Reference in a new issue
	
	 Joseph Qi
						Joseph Qi