mirror of
				git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
				synced 2025-10-31 16:54:21 +00:00 
			
		
		
		
	cpumask: IA64: Introduce cpumask_of_{node,pcibus} to replace {node,pcibus}_to_cpumask
Impact: New APIs The old node_to_cpumask/node_to_pcibus returned a cpumask_t: these return a pointer to a struct cpumask. Part of removing cpumasks from the stack. We can also use the new for_each_cpu_and() to avoid a temporary cpumask, and a gratuitous test in sn_topology_show. (Includes fix from KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>) Signed-off-by: Rusty Russell <rusty@rustcorp.com.au> Cc: Tony Luck <tony.luck@intel.com> Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
This commit is contained in:
		
							parent
							
								
									86c6f274f5
								
							
						
					
					
						commit
						fbb776c3ca
					
				
					 4 changed files with 30 additions and 29 deletions
				
			
		|  | @ -34,6 +34,7 @@ | |||
|  * Returns a bitmask of CPUs on Node 'node'. | ||||
|  */ | ||||
| #define node_to_cpumask(node) (node_to_cpu_mask[node]) | ||||
| #define cpumask_of_node(node) (&node_to_cpu_mask[node]) | ||||
| 
 | ||||
| /*
 | ||||
|  * Returns the number of the node containing Node 'nid'. | ||||
|  | @ -45,7 +46,7 @@ | |||
| /*
 | ||||
|  * Returns the number of the first CPU on Node 'node'. | ||||
|  */ | ||||
| #define node_to_first_cpu(node) (first_cpu(node_to_cpumask(node))) | ||||
| #define node_to_first_cpu(node) (cpumask_first(cpumask_of_node(node))) | ||||
| 
 | ||||
| /*
 | ||||
|  * Determines the node for a given pci bus | ||||
|  | @ -121,6 +122,10 @@ extern void arch_fix_phys_package_id(int num, u32 slot); | |||
| 					node_to_cpumask(pcibus_to_node(bus)) \ | ||||
| 				) | ||||
| 
 | ||||
| #define cpumask_of_pcibus(bus)	(pcibus_to_node(bus) == -1 ?		\ | ||||
| 				 cpu_all_mask :				\ | ||||
| 				 cpumask_from_node(pcibus_to_node(bus))) | ||||
| 
 | ||||
| #include <asm-generic/topology.h> | ||||
| 
 | ||||
| #endif /* _ASM_IA64_TOPOLOGY_H */ | ||||
|  |  | |||
|  | @ -1001,7 +1001,7 @@ acpi_map_iosapic(acpi_handle handle, u32 depth, void *context, void **ret) | |||
| 	node = pxm_to_node(pxm); | ||||
| 
 | ||||
| 	if (node >= MAX_NUMNODES || !node_online(node) || | ||||
| 	    cpus_empty(node_to_cpumask(node))) | ||||
| 	    cpumask_empty(cpumask_of_node(node))) | ||||
| 		return AE_OK; | ||||
| 
 | ||||
| 	/* We know a gsi to node mapping! */ | ||||
|  |  | |||
|  | @ -695,32 +695,31 @@ get_target_cpu (unsigned int gsi, int irq) | |||
| #ifdef CONFIG_NUMA | ||||
| 	{ | ||||
| 		int num_cpus, cpu_index, iosapic_index, numa_cpu, i = 0; | ||||
| 		cpumask_t cpu_mask; | ||||
| 		const struct cpumask *cpu_mask; | ||||
| 
 | ||||
| 		iosapic_index = find_iosapic(gsi); | ||||
| 		if (iosapic_index < 0 || | ||||
| 		    iosapic_lists[iosapic_index].node == MAX_NUMNODES) | ||||
| 			goto skip_numa_setup; | ||||
| 
 | ||||
| 		cpu_mask = node_to_cpumask(iosapic_lists[iosapic_index].node); | ||||
| 		cpus_and(cpu_mask, cpu_mask, domain); | ||||
| 		for_each_cpu_mask(numa_cpu, cpu_mask) { | ||||
| 			if (!cpu_online(numa_cpu)) | ||||
| 				cpu_clear(numa_cpu, cpu_mask); | ||||
| 		cpu_mask = cpumask_of_node(iosapic_lists[iosapic_index].node); | ||||
| 		num_cpus = 0; | ||||
| 		for_each_cpu_and(numa_cpu, cpu_mask, &domain) { | ||||
| 			if (cpu_online(numa_cpu)) | ||||
| 				num_cpus++; | ||||
| 		} | ||||
| 
 | ||||
| 		num_cpus = cpus_weight(cpu_mask); | ||||
| 
 | ||||
| 		if (!num_cpus) | ||||
| 			goto skip_numa_setup; | ||||
| 
 | ||||
| 		/* Use irq assignment to distribute across cpus in node */ | ||||
| 		cpu_index = irq % num_cpus; | ||||
| 
 | ||||
| 		for (numa_cpu = first_cpu(cpu_mask) ; i < cpu_index ; i++) | ||||
| 			numa_cpu = next_cpu(numa_cpu, cpu_mask); | ||||
| 		for_each_cpu_and(numa_cpu, cpu_mask, &domain) | ||||
| 			if (cpu_online(numa_cpu) && i++ >= cpu_index) | ||||
| 				break; | ||||
| 
 | ||||
| 		if (numa_cpu != NR_CPUS) | ||||
| 		if (numa_cpu < nr_cpu_ids) | ||||
| 			return cpu_physical_id(numa_cpu); | ||||
| 	} | ||||
| skip_numa_setup: | ||||
|  | @ -731,7 +730,7 @@ skip_numa_setup: | |||
| 	 * case of NUMA.) | ||||
| 	 */ | ||||
| 	do { | ||||
| 		if (++cpu >= NR_CPUS) | ||||
| 		if (++cpu >= nr_cpu_ids) | ||||
| 			cpu = 0; | ||||
| 	} while (!cpu_online(cpu) || !cpu_isset(cpu, domain)); | ||||
| 
 | ||||
|  |  | |||
|  | @ -385,7 +385,6 @@ static int sn_topology_show(struct seq_file *s, void *d) | |||
| 	int j; | ||||
| 	const char *slabname; | ||||
| 	int ordinal; | ||||
| 	cpumask_t cpumask; | ||||
| 	char slice; | ||||
| 	struct cpuinfo_ia64 *c; | ||||
| 	struct sn_hwperf_port_info *ptdata; | ||||
|  | @ -473,23 +472,21 @@ static int sn_topology_show(struct seq_file *s, void *d) | |||
| 		 * CPUs on this node, if any | ||||
| 		 */ | ||||
| 		if (!SN_HWPERF_IS_IONODE(obj)) { | ||||
| 			cpumask = node_to_cpumask(ordinal); | ||||
| 			for_each_online_cpu(i) { | ||||
| 				if (cpu_isset(i, cpumask)) { | ||||
| 					slice = 'a' + cpuid_to_slice(i); | ||||
| 					c = cpu_data(i); | ||||
| 					seq_printf(s, "cpu %d %s%c local" | ||||
| 						" freq %luMHz, arch ia64", | ||||
| 						i, obj->location, slice, | ||||
| 						c->proc_freq / 1000000); | ||||
| 					for_each_online_cpu(j) { | ||||
| 						seq_printf(s, j ? ":%d" : ", dist %d", | ||||
| 							node_distance( | ||||
| 			for_each_cpu_and(i, cpu_online_mask, | ||||
| 					 cpumask_of_node(ordinal)) { | ||||
| 				slice = 'a' + cpuid_to_slice(i); | ||||
| 				c = cpu_data(i); | ||||
| 				seq_printf(s, "cpu %d %s%c local" | ||||
| 					   " freq %luMHz, arch ia64", | ||||
| 					   i, obj->location, slice, | ||||
| 					   c->proc_freq / 1000000); | ||||
| 				for_each_online_cpu(j) { | ||||
| 					seq_printf(s, j ? ":%d" : ", dist %d", | ||||
| 						   node_distance( | ||||
| 						    	cpu_to_node(i), | ||||
| 						    	cpu_to_node(j))); | ||||
| 					} | ||||
| 					seq_putc(s, '\n'); | ||||
| 				} | ||||
| 				seq_putc(s, '\n'); | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
|  |  | |||
		Loading…
	
	Add table
		
		Reference in a new issue
	
	 Rusty Russell
						Rusty Russell