mirror of
				git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
				synced 2025-11-01 09:13:37 +00:00 
			
		
		
		
	Merge branch 'memdup_user_nul' into work.misc
This commit is contained in:
		
						commit
						7e935c7ca1
					
				
					 596 changed files with 5024 additions and 2946 deletions
				
			
		| 
						 | 
				
			
			@ -22,8 +22,7 @@ Required properties:
 | 
			
		|||
Optional properties:
 | 
			
		||||
- ti,hwmods:	Name of the hwmods associated to the eDMA CC
 | 
			
		||||
- ti,edma-memcpy-channels: List of channels allocated to be used for memcpy, iow
 | 
			
		||||
		these channels will be SW triggered channels. The list must
 | 
			
		||||
		contain 16 bits numbers, see example.
 | 
			
		||||
		these channels will be SW triggered channels. See example.
 | 
			
		||||
- ti,edma-reserved-slot-ranges: PaRAM slot ranges which should not be used by
 | 
			
		||||
		the driver, they are allocated to be used by for example the
 | 
			
		||||
		DSP. See example.
 | 
			
		||||
| 
						 | 
				
			
			@ -56,10 +55,9 @@ edma: edma@49000000 {
 | 
			
		|||
	ti,tptcs = <&edma_tptc0 7>, <&edma_tptc1 7>, <&edma_tptc2 0>;
 | 
			
		||||
 | 
			
		||||
	/* Channel 20 and 21 is allocated for memcpy */
 | 
			
		||||
	ti,edma-memcpy-channels = /bits/ 16 <20 21>;
 | 
			
		||||
	/* The following PaRAM slots are reserved: 35-45 and 100-110 */
 | 
			
		||||
	ti,edma-reserved-slot-ranges = /bits/ 16 <35 10>,
 | 
			
		||||
				       /bits/ 16 <100 10>;
 | 
			
		||||
	ti,edma-memcpy-channels = <20 21>;
 | 
			
		||||
	/* The following PaRAM slots are reserved: 35-44 and 100-109 */
 | 
			
		||||
	ti,edma-reserved-slot-ranges = <35 10>, <100 10>;
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
edma_tptc0: tptc@49800000 {
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -11,6 +11,10 @@ Required properties:
 | 
			
		|||
      0 = active high
 | 
			
		||||
      1 = active low
 | 
			
		||||
 | 
			
		||||
Optional properties:
 | 
			
		||||
- little-endian : GPIO registers are used as little endian. If not
 | 
			
		||||
                  present registers are used as big endian by default.
 | 
			
		||||
 | 
			
		||||
Example:
 | 
			
		||||
 | 
			
		||||
gpio0: gpio@1100 {
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -12,7 +12,7 @@ Each key is represented as a sub-node of "allwinner,sun4i-a10-lradc-keys":
 | 
			
		|||
Required subnode-properties:
 | 
			
		||||
	- label: Descriptive name of the key.
 | 
			
		||||
	- linux,code: Keycode to emit.
 | 
			
		||||
	- channel: Channel this key is attached to, mut be 0 or 1.
 | 
			
		||||
	- channel: Channel this key is attached to, must be 0 or 1.
 | 
			
		||||
	- voltage: Voltage in µV at lradc input when this key is pressed.
 | 
			
		||||
 | 
			
		||||
Example:
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -6,7 +6,9 @@ used for what purposes, but which don't use an on-flash partition table such
 | 
			
		|||
as RedBoot.
 | 
			
		||||
 | 
			
		||||
The partition table should be a subnode of the mtd node and should be named
 | 
			
		||||
'partitions'. Partitions are defined in subnodes of the partitions node.
 | 
			
		||||
'partitions'. This node should have the following property:
 | 
			
		||||
- compatible : (required) must be "fixed-partitions"
 | 
			
		||||
Partitions are then defined in subnodes of the partitions node.
 | 
			
		||||
 | 
			
		||||
For backwards compatibility partitions as direct subnodes of the mtd device are
 | 
			
		||||
supported. This use is discouraged.
 | 
			
		||||
| 
						 | 
				
			
			@ -36,6 +38,7 @@ Examples:
 | 
			
		|||
 | 
			
		||||
flash@0 {
 | 
			
		||||
	partitions {
 | 
			
		||||
		compatible = "fixed-partitions";
 | 
			
		||||
		#address-cells = <1>;
 | 
			
		||||
		#size-cells = <1>;
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -53,6 +56,7 @@ flash@0 {
 | 
			
		|||
 | 
			
		||||
flash@1 {
 | 
			
		||||
	partitions {
 | 
			
		||||
		compatible = "fixed-partitions";
 | 
			
		||||
		#address-cells = <1>;
 | 
			
		||||
		#size-cells = <2>;
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -66,6 +70,7 @@ flash@1 {
 | 
			
		|||
 | 
			
		||||
flash@2 {
 | 
			
		||||
	partitions {
 | 
			
		||||
		compatible = "fixed-partitions";
 | 
			
		||||
		#address-cells = <2>;
 | 
			
		||||
		#size-cells = <2>;
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -181,17 +181,3 @@ For general information, go to the Intel support website at:
 | 
			
		|||
If an issue is identified with the released source code on the supported
 | 
			
		||||
kernel with a supported adapter, email the specific information related to the
 | 
			
		||||
issue to e1000-devel@lists.sourceforge.net.
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
License
 | 
			
		||||
=======
 | 
			
		||||
 | 
			
		||||
This software program is released under the terms of a license agreement
 | 
			
		||||
between you ('Licensee') and Intel. Do not use or load this software or any
 | 
			
		||||
associated materials (collectively, the 'Software') until you have carefully
 | 
			
		||||
read the full terms and conditions of the file COPYING located in this software
 | 
			
		||||
package. By loading or using the Software, you agree to the terms of this
 | 
			
		||||
Agreement. If you do not agree with the terms of this Agreement, do not install
 | 
			
		||||
or use the Software.
 | 
			
		||||
 | 
			
		||||
* Other names and brands may be claimed as the property of others.
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
							
								
								
									
										20
									
								
								MAINTAINERS
									
										
									
									
									
								
							
							
						
						
									
										20
									
								
								MAINTAINERS
									
										
									
									
									
								
							| 
						 | 
				
			
			@ -2975,6 +2975,7 @@ F:	kernel/cpuset.c
 | 
			
		|||
CONTROL GROUP - MEMORY RESOURCE CONTROLLER (MEMCG)
 | 
			
		||||
M:	Johannes Weiner <hannes@cmpxchg.org>
 | 
			
		||||
M:	Michal Hocko <mhocko@kernel.org>
 | 
			
		||||
M:	Vladimir Davydov <vdavydov@virtuozzo.com>
 | 
			
		||||
L:	cgroups@vger.kernel.org
 | 
			
		||||
L:	linux-mm@kvack.org
 | 
			
		||||
S:	Maintained
 | 
			
		||||
| 
						 | 
				
			
			@ -5577,7 +5578,7 @@ R:	Jesse Brandeburg <jesse.brandeburg@intel.com>
 | 
			
		|||
R:	Shannon Nelson <shannon.nelson@intel.com>
 | 
			
		||||
R:	Carolyn Wyborny <carolyn.wyborny@intel.com>
 | 
			
		||||
R:	Don Skidmore <donald.c.skidmore@intel.com>
 | 
			
		||||
R:	Matthew Vick <matthew.vick@intel.com>
 | 
			
		||||
R:	Bruce Allan <bruce.w.allan@intel.com>
 | 
			
		||||
R:	John Ronciak <john.ronciak@intel.com>
 | 
			
		||||
R:	Mitch Williams <mitch.a.williams@intel.com>
 | 
			
		||||
L:	intel-wired-lan@lists.osuosl.org
 | 
			
		||||
| 
						 | 
				
			
			@ -8286,7 +8287,7 @@ F:	include/linux/delayacct.h
 | 
			
		|||
F:	kernel/delayacct.c
 | 
			
		||||
 | 
			
		||||
PERFORMANCE EVENTS SUBSYSTEM
 | 
			
		||||
M:	Peter Zijlstra <a.p.zijlstra@chello.nl>
 | 
			
		||||
M:	Peter Zijlstra <peterz@infradead.org>
 | 
			
		||||
M:	Ingo Molnar <mingo@redhat.com>
 | 
			
		||||
M:	Arnaldo Carvalho de Melo <acme@kernel.org>
 | 
			
		||||
L:	linux-kernel@vger.kernel.org
 | 
			
		||||
| 
						 | 
				
			
			@ -8379,6 +8380,14 @@ L:	linux-samsung-soc@vger.kernel.org (moderated for non-subscribers)
 | 
			
		|||
S:	Maintained
 | 
			
		||||
F:	drivers/pinctrl/samsung/
 | 
			
		||||
 | 
			
		||||
PIN CONTROLLER - SINGLE
 | 
			
		||||
M:	Tony Lindgren <tony@atomide.com>
 | 
			
		||||
M:	Haojian Zhuang <haojian.zhuang@linaro.org>
 | 
			
		||||
L:	linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 | 
			
		||||
L:	linux-omap@vger.kernel.org
 | 
			
		||||
S:	Maintained
 | 
			
		||||
F:	drivers/pinctrl/pinctrl-single.c
 | 
			
		||||
 | 
			
		||||
PIN CONTROLLER - ST SPEAR
 | 
			
		||||
M:	Viresh Kumar <vireshk@kernel.org>
 | 
			
		||||
L:	spear-devel@list.st.com
 | 
			
		||||
| 
						 | 
				
			
			@ -8945,6 +8954,13 @@ F:	drivers/rpmsg/
 | 
			
		|||
F:	Documentation/rpmsg.txt
 | 
			
		||||
F:	include/linux/rpmsg.h
 | 
			
		||||
 | 
			
		||||
RENESAS ETHERNET DRIVERS
 | 
			
		||||
R:	Sergei Shtylyov <sergei.shtylyov@cogentembedded.com>
 | 
			
		||||
L:	netdev@vger.kernel.org
 | 
			
		||||
L:	linux-sh@vger.kernel.org
 | 
			
		||||
F:	drivers/net/ethernet/renesas/
 | 
			
		||||
F:	include/linux/sh_eth.h
 | 
			
		||||
 | 
			
		||||
RESET CONTROLLER FRAMEWORK
 | 
			
		||||
M:	Philipp Zabel <p.zabel@pengutronix.de>
 | 
			
		||||
S:	Maintained
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
							
								
								
									
										2
									
								
								Makefile
									
										
									
									
									
								
							
							
						
						
									
										2
									
								
								Makefile
									
										
									
									
									
								
							| 
						 | 
				
			
			@ -1,7 +1,7 @@
 | 
			
		|||
VERSION = 4
 | 
			
		||||
PATCHLEVEL = 4
 | 
			
		||||
SUBLEVEL = 0
 | 
			
		||||
EXTRAVERSION = -rc4
 | 
			
		||||
EXTRAVERSION = -rc7
 | 
			
		||||
NAME = Blurry Fish Butt
 | 
			
		||||
 | 
			
		||||
# *DOCUMENTATION*
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -445,6 +445,7 @@ config LINUX_LINK_BASE
 | 
			
		|||
	  However some customers have peripherals mapped at this addr, so
 | 
			
		||||
	  Linux needs to be scooted a bit.
 | 
			
		||||
	  If you don't know what the above means, leave this setting alone.
 | 
			
		||||
	  This needs to match memory start address specified in Device Tree
 | 
			
		||||
 | 
			
		||||
config HIGHMEM
 | 
			
		||||
	bool "High Memory Support"
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -81,7 +81,7 @@ endif
 | 
			
		|||
LIBGCC	:= $(shell $(CC) $(cflags-y) --print-libgcc-file-name)
 | 
			
		||||
 | 
			
		||||
# Modules with short calls might break for calls into builtin-kernel
 | 
			
		||||
KBUILD_CFLAGS_MODULE	+= -mlong-calls
 | 
			
		||||
KBUILD_CFLAGS_MODULE	+= -mlong-calls -mno-millicode
 | 
			
		||||
 | 
			
		||||
# Finally dump eveything into kernel build system
 | 
			
		||||
KBUILD_CFLAGS	+= $(cflags-y)
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -46,6 +46,7 @@
 | 
			
		|||
			snps,pbl = < 32 >;
 | 
			
		||||
			clocks = <&apbclk>;
 | 
			
		||||
			clock-names = "stmmaceth";
 | 
			
		||||
			max-speed = <100>;
 | 
			
		||||
		};
 | 
			
		||||
 | 
			
		||||
		ehci@0x40000 {
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -17,7 +17,8 @@
 | 
			
		|||
 | 
			
		||||
	memory {
 | 
			
		||||
		device_type = "memory";
 | 
			
		||||
		reg = <0x0 0x80000000 0x0 0x40000000	/* 1 GB low mem */
 | 
			
		||||
		/* CONFIG_LINUX_LINK_BASE needs to match low mem start */
 | 
			
		||||
		reg = <0x0 0x80000000 0x0 0x20000000	/* 512 MB low mem */
 | 
			
		||||
		       0x1 0x00000000 0x0 0x40000000>;	/* 1 GB highmem */
 | 
			
		||||
	};
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -62,9 +62,7 @@ extern int ioc_exists;
 | 
			
		|||
#define ARC_REG_IC_IVIC		0x10
 | 
			
		||||
#define ARC_REG_IC_CTRL		0x11
 | 
			
		||||
#define ARC_REG_IC_IVIL		0x19
 | 
			
		||||
#if defined(CONFIG_ARC_MMU_V3) || defined(CONFIG_ARC_MMU_V4)
 | 
			
		||||
#define ARC_REG_IC_PTAG		0x1E
 | 
			
		||||
#endif
 | 
			
		||||
#define ARC_REG_IC_PTAG_HI	0x1F
 | 
			
		||||
 | 
			
		||||
/* Bit val in IC_CTRL */
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -23,7 +23,7 @@
 | 
			
		|||
 * @dt_compat:		Array of device tree 'compatible' strings
 | 
			
		||||
 * 			(XXX: although only 1st entry is looked at)
 | 
			
		||||
 * @init_early:		Very early callback [called from setup_arch()]
 | 
			
		||||
 * @init_cpu_smp:	for each CPU as it is coming up (SMP as well as UP)
 | 
			
		||||
 * @init_per_cpu:	for each CPU as it is coming up (SMP as well as UP)
 | 
			
		||||
 * 			[(M):init_IRQ(), (o):start_kernel_secondary()]
 | 
			
		||||
 * @init_machine:	arch initcall level callback (e.g. populate static
 | 
			
		||||
 * 			platform devices or parse Devicetree)
 | 
			
		||||
| 
						 | 
				
			
			@ -35,7 +35,7 @@ struct machine_desc {
 | 
			
		|||
	const char		**dt_compat;
 | 
			
		||||
	void			(*init_early)(void);
 | 
			
		||||
#ifdef CONFIG_SMP
 | 
			
		||||
	void			(*init_cpu_smp)(unsigned int);
 | 
			
		||||
	void			(*init_per_cpu)(unsigned int);
 | 
			
		||||
#endif
 | 
			
		||||
	void			(*init_machine)(void);
 | 
			
		||||
	void			(*init_late)(void);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -48,7 +48,7 @@ extern int smp_ipi_irq_setup(int cpu, int irq);
 | 
			
		|||
 * @init_early_smp:	A SMP specific h/w block can init itself
 | 
			
		||||
 * 			Could be common across platforms so not covered by
 | 
			
		||||
 * 			mach_desc->init_early()
 | 
			
		||||
 * @init_irq_cpu:	Called for each core so SMP h/w block driver can do
 | 
			
		||||
 * @init_per_cpu:	Called for each core so SMP h/w block driver can do
 | 
			
		||||
 * 			any needed setup per cpu (e.g. IPI request)
 | 
			
		||||
 * @cpu_kick:		For Master to kickstart a cpu (optionally at a PC)
 | 
			
		||||
 * @ipi_send:		To send IPI to a @cpu
 | 
			
		||||
| 
						 | 
				
			
			@ -57,7 +57,7 @@ extern int smp_ipi_irq_setup(int cpu, int irq);
 | 
			
		|||
struct plat_smp_ops {
 | 
			
		||||
	const char 	*info;
 | 
			
		||||
	void		(*init_early_smp)(void);
 | 
			
		||||
	void		(*init_irq_cpu)(int cpu);
 | 
			
		||||
	void		(*init_per_cpu)(int cpu);
 | 
			
		||||
	void		(*cpu_kick)(int cpu, unsigned long pc);
 | 
			
		||||
	void		(*ipi_send)(int cpu);
 | 
			
		||||
	void		(*ipi_clear)(int irq);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -112,7 +112,6 @@ struct unwind_frame_info {
 | 
			
		|||
 | 
			
		||||
extern int arc_unwind(struct unwind_frame_info *frame);
 | 
			
		||||
extern void arc_unwind_init(void);
 | 
			
		||||
extern void arc_unwind_setup(void);
 | 
			
		||||
extern void *unwind_add_table(struct module *module, const void *table_start,
 | 
			
		||||
			      unsigned long table_size);
 | 
			
		||||
extern void unwind_remove_table(void *handle, int init_only);
 | 
			
		||||
| 
						 | 
				
			
			@ -152,9 +151,6 @@ static inline void arc_unwind_init(void)
 | 
			
		|||
{
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static inline void arc_unwind_setup(void)
 | 
			
		||||
{
 | 
			
		||||
}
 | 
			
		||||
#define unwind_add_table(a, b, c)
 | 
			
		||||
#define unwind_remove_table(a, b)
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -106,10 +106,21 @@ static struct irq_chip arcv2_irq_chip = {
 | 
			
		|||
static int arcv2_irq_map(struct irq_domain *d, unsigned int irq,
 | 
			
		||||
			 irq_hw_number_t hw)
 | 
			
		||||
{
 | 
			
		||||
	if (irq == TIMER0_IRQ || irq == IPI_IRQ)
 | 
			
		||||
	/*
 | 
			
		||||
	 * core intc IRQs [16, 23]:
 | 
			
		||||
	 * Statically assigned always private-per-core (Timers, WDT, IPI, PCT)
 | 
			
		||||
	 */
 | 
			
		||||
	if (hw < 24) {
 | 
			
		||||
		/*
 | 
			
		||||
		 * A subsequent request_percpu_irq() fails if percpu_devid is
 | 
			
		||||
		 * not set. That in turns sets NOAUTOEN, meaning each core needs
 | 
			
		||||
		 * to call enable_percpu_irq()
 | 
			
		||||
		 */
 | 
			
		||||
		irq_set_percpu_devid(irq);
 | 
			
		||||
		irq_set_chip_and_handler(irq, &arcv2_irq_chip, handle_percpu_irq);
 | 
			
		||||
	else
 | 
			
		||||
	} else {
 | 
			
		||||
		irq_set_chip_and_handler(irq, &arcv2_irq_chip, handle_level_irq);
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return 0;
 | 
			
		||||
}
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -29,11 +29,11 @@ void __init init_IRQ(void)
 | 
			
		|||
 | 
			
		||||
#ifdef CONFIG_SMP
 | 
			
		||||
	/* a SMP H/w block could do IPI IRQ request here */
 | 
			
		||||
	if (plat_smp_ops.init_irq_cpu)
 | 
			
		||||
		plat_smp_ops.init_irq_cpu(smp_processor_id());
 | 
			
		||||
	if (plat_smp_ops.init_per_cpu)
 | 
			
		||||
		plat_smp_ops.init_per_cpu(smp_processor_id());
 | 
			
		||||
 | 
			
		||||
	if (machine_desc->init_cpu_smp)
 | 
			
		||||
		machine_desc->init_cpu_smp(smp_processor_id());
 | 
			
		||||
	if (machine_desc->init_per_cpu)
 | 
			
		||||
		machine_desc->init_per_cpu(smp_processor_id());
 | 
			
		||||
#endif
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -51,6 +51,18 @@ void arch_do_IRQ(unsigned int irq, struct pt_regs *regs)
 | 
			
		|||
	set_irq_regs(old_regs);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
 * API called for requesting percpu interrupts - called by each CPU
 | 
			
		||||
 *  - For boot CPU, actually request the IRQ with genirq core + enables
 | 
			
		||||
 *  - For subsequent callers only enable called locally
 | 
			
		||||
 *
 | 
			
		||||
 * Relies on being called by boot cpu first (i.e. request called ahead) of
 | 
			
		||||
 * any enable as expected by genirq. Hence Suitable only for TIMER, IPI
 | 
			
		||||
 * which are guaranteed to be setup on boot core first.
 | 
			
		||||
 * Late probed peripherals such as perf can't use this as there no guarantee
 | 
			
		||||
 * of being called on boot CPU first.
 | 
			
		||||
 */
 | 
			
		||||
 | 
			
		||||
void arc_request_percpu_irq(int irq, int cpu,
 | 
			
		||||
                            irqreturn_t (*isr)(int irq, void *dev),
 | 
			
		||||
                            const char *irq_nm,
 | 
			
		||||
| 
						 | 
				
			
			@ -60,14 +72,17 @@ void arc_request_percpu_irq(int irq, int cpu,
 | 
			
		|||
	if (!cpu) {
 | 
			
		||||
		int rc;
 | 
			
		||||
 | 
			
		||||
#ifdef CONFIG_ISA_ARCOMPACT
 | 
			
		||||
		/*
 | 
			
		||||
		 * These 2 calls are essential to making percpu IRQ APIs work
 | 
			
		||||
		 * Ideally these details could be hidden in irq chip map function
 | 
			
		||||
		 * but the issue is IPIs IRQs being static (non-DT) and platform
 | 
			
		||||
		 * specific, so we can't identify them there.
 | 
			
		||||
		 * A subsequent request_percpu_irq() fails if percpu_devid is
 | 
			
		||||
		 * not set. That in turns sets NOAUTOEN, meaning each core needs
 | 
			
		||||
		 * to call enable_percpu_irq()
 | 
			
		||||
		 *
 | 
			
		||||
		 * For ARCv2, this is done in irq map function since we know
 | 
			
		||||
		 * which irqs are strictly per cpu
 | 
			
		||||
		 */
 | 
			
		||||
		irq_set_percpu_devid(irq);
 | 
			
		||||
		irq_modify_status(irq, IRQ_NOAUTOEN, 0);  /* @irq, @clr, @set */
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
		rc = request_percpu_irq(irq, isr, irq_nm, percpu_dev);
 | 
			
		||||
		if (rc)
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -132,7 +132,7 @@ static void mcip_probe_n_setup(void)
 | 
			
		|||
struct plat_smp_ops plat_smp_ops = {
 | 
			
		||||
	.info		= smp_cpuinfo_buf,
 | 
			
		||||
	.init_early_smp	= mcip_probe_n_setup,
 | 
			
		||||
	.init_irq_cpu	= mcip_setup_per_cpu,
 | 
			
		||||
	.init_per_cpu	= mcip_setup_per_cpu,
 | 
			
		||||
	.ipi_send	= mcip_ipi_send,
 | 
			
		||||
	.ipi_clear	= mcip_ipi_clear,
 | 
			
		||||
};
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -428,12 +428,11 @@ static irqreturn_t arc_pmu_intr(int irq, void *dev)
 | 
			
		|||
 | 
			
		||||
#endif /* CONFIG_ISA_ARCV2 */
 | 
			
		||||
 | 
			
		||||
void arc_cpu_pmu_irq_init(void)
 | 
			
		||||
static void arc_cpu_pmu_irq_init(void *data)
 | 
			
		||||
{
 | 
			
		||||
	struct arc_pmu_cpu *pmu_cpu = this_cpu_ptr(&arc_pmu_cpu);
 | 
			
		||||
	int irq = *(int *)data;
 | 
			
		||||
 | 
			
		||||
	arc_request_percpu_irq(arc_pmu->irq, smp_processor_id(), arc_pmu_intr,
 | 
			
		||||
			       "ARC perf counters", pmu_cpu);
 | 
			
		||||
	enable_percpu_irq(irq, IRQ_TYPE_NONE);
 | 
			
		||||
 | 
			
		||||
	/* Clear all pending interrupt flags */
 | 
			
		||||
	write_aux_reg(ARC_REG_PCT_INT_ACT, 0xffffffff);
 | 
			
		||||
| 
						 | 
				
			
			@ -515,7 +514,6 @@ static int arc_pmu_device_probe(struct platform_device *pdev)
 | 
			
		|||
 | 
			
		||||
	if (has_interrupts) {
 | 
			
		||||
		int irq = platform_get_irq(pdev, 0);
 | 
			
		||||
		unsigned long flags;
 | 
			
		||||
 | 
			
		||||
		if (irq < 0) {
 | 
			
		||||
			pr_err("Cannot get IRQ number for the platform\n");
 | 
			
		||||
| 
						 | 
				
			
			@ -524,24 +522,12 @@ static int arc_pmu_device_probe(struct platform_device *pdev)
 | 
			
		|||
 | 
			
		||||
		arc_pmu->irq = irq;
 | 
			
		||||
 | 
			
		||||
		/*
 | 
			
		||||
		 * arc_cpu_pmu_irq_init() needs to be called on all cores for
 | 
			
		||||
		 * their respective local PMU.
 | 
			
		||||
		 * However we use opencoded on_each_cpu() to ensure it is called
 | 
			
		||||
		 * on core0 first, so that arc_request_percpu_irq() sets up
 | 
			
		||||
		 * AUTOEN etc. Otherwise enable_percpu_irq() fails to enable
 | 
			
		||||
		 * perf IRQ on non master cores.
 | 
			
		||||
		 * see arc_request_percpu_irq()
 | 
			
		||||
		 */
 | 
			
		||||
		preempt_disable();
 | 
			
		||||
		local_irq_save(flags);
 | 
			
		||||
		arc_cpu_pmu_irq_init();
 | 
			
		||||
		local_irq_restore(flags);
 | 
			
		||||
		smp_call_function((smp_call_func_t)arc_cpu_pmu_irq_init, 0, 1);
 | 
			
		||||
		preempt_enable();
 | 
			
		||||
		/* intc map function ensures irq_set_percpu_devid() called */
 | 
			
		||||
		request_percpu_irq(irq, arc_pmu_intr, "ARC perf counters",
 | 
			
		||||
				   this_cpu_ptr(&arc_pmu_cpu));
 | 
			
		||||
 | 
			
		||||
		on_each_cpu(arc_cpu_pmu_irq_init, &irq, 1);
 | 
			
		||||
 | 
			
		||||
		/* Clean all pending interrupt flags */
 | 
			
		||||
		write_aux_reg(ARC_REG_PCT_INT_ACT, 0xffffffff);
 | 
			
		||||
	} else
 | 
			
		||||
		arc_pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -429,7 +429,6 @@ void __init setup_arch(char **cmdline_p)
 | 
			
		|||
#endif
 | 
			
		||||
 | 
			
		||||
	arc_unwind_init();
 | 
			
		||||
	arc_unwind_setup();
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static int __init customize_machine(void)
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -132,11 +132,11 @@ void start_kernel_secondary(void)
 | 
			
		|||
	pr_info("## CPU%u LIVE ##: Executing Code...\n", cpu);
 | 
			
		||||
 | 
			
		||||
	/* Some SMP H/w setup - for each cpu */
 | 
			
		||||
	if (plat_smp_ops.init_irq_cpu)
 | 
			
		||||
		plat_smp_ops.init_irq_cpu(cpu);
 | 
			
		||||
	if (plat_smp_ops.init_per_cpu)
 | 
			
		||||
		plat_smp_ops.init_per_cpu(cpu);
 | 
			
		||||
 | 
			
		||||
	if (machine_desc->init_cpu_smp)
 | 
			
		||||
		machine_desc->init_cpu_smp(cpu);
 | 
			
		||||
	if (machine_desc->init_per_cpu)
 | 
			
		||||
		machine_desc->init_per_cpu(cpu);
 | 
			
		||||
 | 
			
		||||
	arc_local_timer_setup();
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -170,6 +170,23 @@ static struct unwind_table *find_table(unsigned long pc)
 | 
			
		|||
 | 
			
		||||
static unsigned long read_pointer(const u8 **pLoc,
 | 
			
		||||
				  const void *end, signed ptrType);
 | 
			
		||||
static void init_unwind_hdr(struct unwind_table *table,
 | 
			
		||||
			    void *(*alloc) (unsigned long));
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
 * wrappers for header alloc (vs. calling one vs. other at call site)
 | 
			
		||||
 * to elide section mismatches warnings
 | 
			
		||||
 */
 | 
			
		||||
static void *__init unw_hdr_alloc_early(unsigned long sz)
 | 
			
		||||
{
 | 
			
		||||
	return __alloc_bootmem_nopanic(sz, sizeof(unsigned int),
 | 
			
		||||
				       MAX_DMA_ADDRESS);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static void *unw_hdr_alloc(unsigned long sz)
 | 
			
		||||
{
 | 
			
		||||
	return kmalloc(sz, GFP_KERNEL);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static void init_unwind_table(struct unwind_table *table, const char *name,
 | 
			
		||||
			      const void *core_start, unsigned long core_size,
 | 
			
		||||
| 
						 | 
				
			
			@ -209,6 +226,8 @@ void __init arc_unwind_init(void)
 | 
			
		|||
			  __start_unwind, __end_unwind - __start_unwind,
 | 
			
		||||
			  NULL, 0);
 | 
			
		||||
	  /*__start_unwind_hdr, __end_unwind_hdr - __start_unwind_hdr);*/
 | 
			
		||||
 | 
			
		||||
	init_unwind_hdr(&root_table, unw_hdr_alloc_early);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static const u32 bad_cie, not_fde;
 | 
			
		||||
| 
						 | 
				
			
			@ -241,8 +260,8 @@ static void swap_eh_frame_hdr_table_entries(void *p1, void *p2, int size)
 | 
			
		|||
	e2->fde = v;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static void __init setup_unwind_table(struct unwind_table *table,
 | 
			
		||||
				      void *(*alloc) (unsigned long))
 | 
			
		||||
static void init_unwind_hdr(struct unwind_table *table,
 | 
			
		||||
			    void *(*alloc) (unsigned long))
 | 
			
		||||
{
 | 
			
		||||
	const u8 *ptr;
 | 
			
		||||
	unsigned long tableSize = table->size, hdrSize;
 | 
			
		||||
| 
						 | 
				
			
			@ -277,10 +296,10 @@ static void __init setup_unwind_table(struct unwind_table *table,
 | 
			
		|||
		if (cie == ¬_fde)
 | 
			
		||||
			continue;
 | 
			
		||||
		if (cie == NULL || cie == &bad_cie)
 | 
			
		||||
			return;
 | 
			
		||||
			goto ret_err;
 | 
			
		||||
		ptrType = fde_pointer_type(cie);
 | 
			
		||||
		if (ptrType < 0)
 | 
			
		||||
			return;
 | 
			
		||||
			goto ret_err;
 | 
			
		||||
 | 
			
		||||
		ptr = (const u8 *)(fde + 2);
 | 
			
		||||
		if (!read_pointer(&ptr, (const u8 *)(fde + 1) + *fde,
 | 
			
		||||
| 
						 | 
				
			
			@ -296,13 +315,15 @@ static void __init setup_unwind_table(struct unwind_table *table,
 | 
			
		|||
	}
 | 
			
		||||
 | 
			
		||||
	if (tableSize || !n)
 | 
			
		||||
		return;
 | 
			
		||||
		goto ret_err;
 | 
			
		||||
 | 
			
		||||
	hdrSize = 4 + sizeof(unsigned long) + sizeof(unsigned int)
 | 
			
		||||
	    + 2 * n * sizeof(unsigned long);
 | 
			
		||||
 | 
			
		||||
	header = alloc(hdrSize);
 | 
			
		||||
	if (!header)
 | 
			
		||||
		return;
 | 
			
		||||
		goto ret_err;
 | 
			
		||||
 | 
			
		||||
	header->version = 1;
 | 
			
		||||
	header->eh_frame_ptr_enc = DW_EH_PE_abs | DW_EH_PE_native;
 | 
			
		||||
	header->fde_count_enc = DW_EH_PE_abs | DW_EH_PE_data4;
 | 
			
		||||
| 
						 | 
				
			
			@ -340,18 +361,10 @@ static void __init setup_unwind_table(struct unwind_table *table,
 | 
			
		|||
	table->hdrsz = hdrSize;
 | 
			
		||||
	smp_wmb();
 | 
			
		||||
	table->header = (const void *)header;
 | 
			
		||||
}
 | 
			
		||||
	return;
 | 
			
		||||
 | 
			
		||||
static void *__init balloc(unsigned long sz)
 | 
			
		||||
{
 | 
			
		||||
	return __alloc_bootmem_nopanic(sz,
 | 
			
		||||
				       sizeof(unsigned int),
 | 
			
		||||
				       __pa(MAX_DMA_ADDRESS));
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void __init arc_unwind_setup(void)
 | 
			
		||||
{
 | 
			
		||||
	setup_unwind_table(&root_table, balloc);
 | 
			
		||||
ret_err:
 | 
			
		||||
	panic("Attention !!! Dwarf FDE parsing errors\n");;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#ifdef CONFIG_MODULES
 | 
			
		||||
| 
						 | 
				
			
			@ -377,6 +390,8 @@ void *unwind_add_table(struct module *module, const void *table_start,
 | 
			
		|||
			  table_start, table_size,
 | 
			
		||||
			  NULL, 0);
 | 
			
		||||
 | 
			
		||||
	init_unwind_hdr(table, unw_hdr_alloc);
 | 
			
		||||
 | 
			
		||||
#ifdef UNWIND_DEBUG
 | 
			
		||||
	unw_debug("Table added for [%s] %lx %lx\n",
 | 
			
		||||
		module->name, table->core.pc, table->core.range);
 | 
			
		||||
| 
						 | 
				
			
			@ -439,6 +454,7 @@ void unwind_remove_table(void *handle, int init_only)
 | 
			
		|||
	info.init_only = init_only;
 | 
			
		||||
 | 
			
		||||
	unlink_table(&info); /* XXX: SMP */
 | 
			
		||||
	kfree(table->header);
 | 
			
		||||
	kfree(table);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -588,9 +604,6 @@ static signed fde_pointer_type(const u32 *cie)
 | 
			
		|||
	const u8 *ptr = (const u8 *)(cie + 2);
 | 
			
		||||
	unsigned version = *ptr;
 | 
			
		||||
 | 
			
		||||
	if (version != 1)
 | 
			
		||||
		return -1;	/* unsupported */
 | 
			
		||||
 | 
			
		||||
	if (*++ptr) {
 | 
			
		||||
		const char *aug;
 | 
			
		||||
		const u8 *end = (const u8 *)(cie + 1) + *cie;
 | 
			
		||||
| 
						 | 
				
			
			@ -1002,9 +1015,7 @@ int arc_unwind(struct unwind_frame_info *frame)
 | 
			
		|||
		ptr = (const u8 *)(cie + 2);
 | 
			
		||||
		end = (const u8 *)(cie + 1) + *cie;
 | 
			
		||||
		frame->call_frame = 1;
 | 
			
		||||
		if ((state.version = *ptr) != 1)
 | 
			
		||||
			cie = NULL;	/* unsupported version */
 | 
			
		||||
		else if (*++ptr) {
 | 
			
		||||
		if (*++ptr) {
 | 
			
		||||
			/* check if augmentation size is first (thus present) */
 | 
			
		||||
			if (*ptr == 'z') {
 | 
			
		||||
				while (++ptr < end && *ptr) {
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -111,7 +111,7 @@ void __kunmap_atomic(void *kv)
 | 
			
		|||
}
 | 
			
		||||
EXPORT_SYMBOL(__kunmap_atomic);
 | 
			
		||||
 | 
			
		||||
noinline pte_t *alloc_kmap_pgtable(unsigned long kvaddr)
 | 
			
		||||
static noinline pte_t * __init alloc_kmap_pgtable(unsigned long kvaddr)
 | 
			
		||||
{
 | 
			
		||||
	pgd_t *pgd_k;
 | 
			
		||||
	pud_t *pud_k;
 | 
			
		||||
| 
						 | 
				
			
			@ -127,7 +127,7 @@ noinline pte_t *alloc_kmap_pgtable(unsigned long kvaddr)
 | 
			
		|||
	return pte_k;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void kmap_init(void)
 | 
			
		||||
void __init kmap_init(void)
 | 
			
		||||
{
 | 
			
		||||
	/* Due to recursive include hell, we can't do this in processor.h */
 | 
			
		||||
	BUILD_BUG_ON(PAGE_OFFSET < (VMALLOC_END + FIXMAP_SIZE + PKMAP_SIZE));
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -51,7 +51,9 @@ void __init early_init_dt_add_memory_arch(u64 base, u64 size)
 | 
			
		|||
	int in_use = 0;
 | 
			
		||||
 | 
			
		||||
	if (!low_mem_sz) {
 | 
			
		||||
		BUG_ON(base != low_mem_start);
 | 
			
		||||
		if (base != low_mem_start)
 | 
			
		||||
			panic("CONFIG_LINUX_LINK_BASE != DT memory { }");
 | 
			
		||||
 | 
			
		||||
		low_mem_sz = size;
 | 
			
		||||
		in_use = 1;
 | 
			
		||||
	} else {
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -74,7 +74,7 @@
 | 
			
		|||
		reg = <0x48240200 0x100>;
 | 
			
		||||
		interrupts = <GIC_PPI 11 IRQ_TYPE_LEVEL_HIGH>;
 | 
			
		||||
		interrupt-parent = <&gic>;
 | 
			
		||||
		clocks = <&dpll_mpu_m2_ck>;
 | 
			
		||||
		clocks = <&mpu_periphclk>;
 | 
			
		||||
	};
 | 
			
		||||
 | 
			
		||||
	local_timer: timer@48240600 {
 | 
			
		||||
| 
						 | 
				
			
			@ -82,7 +82,7 @@
 | 
			
		|||
		reg = <0x48240600 0x100>;
 | 
			
		||||
		interrupts = <GIC_PPI 13 IRQ_TYPE_LEVEL_HIGH>;
 | 
			
		||||
		interrupt-parent = <&gic>;
 | 
			
		||||
		clocks = <&dpll_mpu_m2_ck>;
 | 
			
		||||
		clocks = <&mpu_periphclk>;
 | 
			
		||||
	};
 | 
			
		||||
 | 
			
		||||
	l2-cache-controller@48242000 {
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -259,6 +259,14 @@
 | 
			
		|||
		ti,invert-autoidle-bit;
 | 
			
		||||
	};
 | 
			
		||||
 | 
			
		||||
	mpu_periphclk: mpu_periphclk {
 | 
			
		||||
		#clock-cells = <0>;
 | 
			
		||||
		compatible = "fixed-factor-clock";
 | 
			
		||||
		clocks = <&dpll_mpu_m2_ck>;
 | 
			
		||||
		clock-mult = <1>;
 | 
			
		||||
		clock-div = <2>;
 | 
			
		||||
	};
 | 
			
		||||
 | 
			
		||||
	dpll_ddr_ck: dpll_ddr_ck {
 | 
			
		||||
		#clock-cells = <0>;
 | 
			
		||||
		compatible = "ti,am3-dpll-clock";
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -184,6 +184,7 @@
 | 
			
		|||
							regulator-name = "VDD_SDHC_1V8";
 | 
			
		||||
							regulator-min-microvolt = <1800000>;
 | 
			
		||||
							regulator-max-microvolt = <1800000>;
 | 
			
		||||
							regulator-always-on;
 | 
			
		||||
						};
 | 
			
		||||
					};
 | 
			
		||||
				};
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -118,7 +118,8 @@
 | 
			
		|||
		sdhci0: sdhci@ab0000 {
 | 
			
		||||
			compatible = "mrvl,pxav3-mmc";
 | 
			
		||||
			reg = <0xab0000 0x200>;
 | 
			
		||||
			clocks = <&chip_clk CLKID_SDIO1XIN>;
 | 
			
		||||
			clocks = <&chip_clk CLKID_SDIO1XIN>, <&chip_clk CLKID_SDIO>;
 | 
			
		||||
			clock-names = "io", "core";
 | 
			
		||||
			interrupts = <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>;
 | 
			
		||||
			status = "disabled";
 | 
			
		||||
		};
 | 
			
		||||
| 
						 | 
				
			
			@ -126,7 +127,8 @@
 | 
			
		|||
		sdhci1: sdhci@ab0800 {
 | 
			
		||||
			compatible = "mrvl,pxav3-mmc";
 | 
			
		||||
			reg = <0xab0800 0x200>;
 | 
			
		||||
			clocks = <&chip_clk CLKID_SDIO1XIN>;
 | 
			
		||||
			clocks = <&chip_clk CLKID_SDIO1XIN>, <&chip_clk CLKID_SDIO>;
 | 
			
		||||
			clock-names = "io", "core";
 | 
			
		||||
			interrupts = <GIC_SPI 20 IRQ_TYPE_LEVEL_HIGH>;
 | 
			
		||||
			status = "disabled";
 | 
			
		||||
		};
 | 
			
		||||
| 
						 | 
				
			
			@ -135,7 +137,7 @@
 | 
			
		|||
			compatible = "mrvl,pxav3-mmc";
 | 
			
		||||
			reg = <0xab1000 0x200>;
 | 
			
		||||
			interrupts = <GIC_SPI 28 IRQ_TYPE_LEVEL_HIGH>;
 | 
			
		||||
			clocks = <&chip_clk CLKID_NFC_ECC>, <&chip_clk CLKID_NFC>;
 | 
			
		||||
			clocks = <&chip_clk CLKID_NFC_ECC>, <&chip_clk CLKID_SDIO>;
 | 
			
		||||
			clock-names = "io", "core";
 | 
			
		||||
			status = "disabled";
 | 
			
		||||
		};
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -218,6 +218,7 @@
 | 
			
		|||
			reg = <0x480c8000 0x2000>;
 | 
			
		||||
			interrupts = <77>;
 | 
			
		||||
			ti,hwmods = "mailbox";
 | 
			
		||||
			#mbox-cells = <1>;
 | 
			
		||||
			ti,mbox-num-users = <4>;
 | 
			
		||||
			ti,mbox-num-fifos = <12>;
 | 
			
		||||
			mbox_dsp: mbox_dsp {
 | 
			
		||||
| 
						 | 
				
			
			@ -279,8 +280,11 @@
 | 
			
		|||
			ti,spi-num-cs = <4>;
 | 
			
		||||
			ti,hwmods = "mcspi1";
 | 
			
		||||
			dmas = <&edma 16 &edma 17
 | 
			
		||||
				&edma 18 &edma 19>;
 | 
			
		||||
			dma-names = "tx0", "rx0", "tx1", "rx1";
 | 
			
		||||
				&edma 18 &edma 19
 | 
			
		||||
				&edma 20 &edma 21
 | 
			
		||||
				&edma 22 &edma 23>;
 | 
			
		||||
			dma-names = "tx0", "rx0", "tx1", "rx1",
 | 
			
		||||
				    "tx2", "rx2", "tx3", "rx3";
 | 
			
		||||
		};
 | 
			
		||||
 | 
			
		||||
		mmc1: mmc@48060000 {
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -154,7 +154,7 @@
 | 
			
		|||
&fec {
 | 
			
		||||
	pinctrl-names = "default";
 | 
			
		||||
	pinctrl-0 = <&pinctrl_enet>;
 | 
			
		||||
	phy-mode = "rgmii";
 | 
			
		||||
	phy-mode = "rgmii-id";
 | 
			
		||||
	phy-reset-gpios = <&gpio1 30 GPIO_ACTIVE_HIGH>;
 | 
			
		||||
	status = "okay";
 | 
			
		||||
};
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -94,7 +94,7 @@
 | 
			
		|||
&fec {
 | 
			
		||||
	pinctrl-names = "default";
 | 
			
		||||
	pinctrl-0 = <&pinctrl_enet>;
 | 
			
		||||
	phy-mode = "rgmii";
 | 
			
		||||
	phy-mode = "rgmii-id";
 | 
			
		||||
	phy-reset-gpios = <&gpio1 30 GPIO_ACTIVE_LOW>;
 | 
			
		||||
	status = "okay";
 | 
			
		||||
};
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -154,7 +154,7 @@
 | 
			
		|||
&fec {
 | 
			
		||||
	pinctrl-names = "default";
 | 
			
		||||
	pinctrl-0 = <&pinctrl_enet>;
 | 
			
		||||
	phy-mode = "rgmii";
 | 
			
		||||
	phy-mode = "rgmii-id";
 | 
			
		||||
	phy-reset-gpios = <&gpio1 30 GPIO_ACTIVE_LOW>;
 | 
			
		||||
	status = "okay";
 | 
			
		||||
};
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -155,7 +155,7 @@
 | 
			
		|||
&fec {
 | 
			
		||||
	pinctrl-names = "default";
 | 
			
		||||
	pinctrl-0 = <&pinctrl_enet>;
 | 
			
		||||
	phy-mode = "rgmii";
 | 
			
		||||
	phy-mode = "rgmii-id";
 | 
			
		||||
	phy-reset-gpios = <&gpio1 30 GPIO_ACTIVE_LOW>;
 | 
			
		||||
	status = "okay";
 | 
			
		||||
};
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -145,7 +145,7 @@
 | 
			
		|||
&fec {
 | 
			
		||||
	pinctrl-names = "default";
 | 
			
		||||
	pinctrl-0 = <&pinctrl_enet>;
 | 
			
		||||
	phy-mode = "rgmii";
 | 
			
		||||
	phy-mode = "rgmii-id";
 | 
			
		||||
	phy-reset-gpios = <&gpio1 30 GPIO_ACTIVE_LOW>;
 | 
			
		||||
	status = "okay";
 | 
			
		||||
};
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -113,14 +113,14 @@
 | 
			
		|||
&clks {
 | 
			
		||||
	assigned-clocks = <&clks IMX6QDL_PLL4_BYPASS_SRC>,
 | 
			
		||||
			  <&clks IMX6QDL_PLL4_BYPASS>,
 | 
			
		||||
			  <&clks IMX6QDL_CLK_PLL4_POST_DIV>,
 | 
			
		||||
			  <&clks IMX6QDL_CLK_LDB_DI0_SEL>,
 | 
			
		||||
			  <&clks IMX6QDL_CLK_LDB_DI1_SEL>;
 | 
			
		||||
			  <&clks IMX6QDL_CLK_LDB_DI1_SEL>,
 | 
			
		||||
			  <&clks IMX6QDL_CLK_PLL4_POST_DIV>;
 | 
			
		||||
	assigned-clock-parents = <&clks IMX6QDL_CLK_LVDS2_IN>,
 | 
			
		||||
				 <&clks IMX6QDL_PLL4_BYPASS_SRC>,
 | 
			
		||||
				 <&clks IMX6QDL_CLK_PLL3_USB_OTG>,
 | 
			
		||||
				 <&clks IMX6QDL_CLK_PLL3_USB_OTG>;
 | 
			
		||||
	assigned-clock-rates = <0>, <0>, <24576000>;
 | 
			
		||||
	assigned-clock-rates = <0>, <0>, <0>, <0>, <24576000>;
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
&ecspi1 {
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -189,3 +189,7 @@
 | 
			
		|||
	};
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
&uart3 {
 | 
			
		||||
	interrupts-extended = <&wakeupgen GIC_SPI 74 IRQ_TYPE_LEVEL_HIGH
 | 
			
		||||
			       &omap4_pmx_core OMAP4_UART3_RX>;
 | 
			
		||||
};
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -83,6 +83,7 @@
 | 
			
		|||
		reg = <0x5d>;
 | 
			
		||||
		interrupt-parent = <&pio>;
 | 
			
		||||
		interrupts = <0 3 IRQ_TYPE_LEVEL_HIGH>; /* PA3 */
 | 
			
		||||
		touchscreen-swapped-x-y;
 | 
			
		||||
	};
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -399,7 +399,7 @@
 | 
			
		|||
 | 
			
		||||
	/* CPU DFLL clock */
 | 
			
		||||
	clock@0,70110000 {
 | 
			
		||||
		status = "okay";
 | 
			
		||||
		status = "disabled";
 | 
			
		||||
		vdd-cpu-supply = <&vdd_cpu>;
 | 
			
		||||
		nvidia,i2c-fs-rate = <400000>;
 | 
			
		||||
	};
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -18,8 +18,3 @@
 | 
			
		|||
		reg = <0x80000000 0x10000000>;
 | 
			
		||||
	};
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
&L2 {
 | 
			
		||||
	arm,data-latency = <2 1 2>;
 | 
			
		||||
	arm,tag-latency = <3 2 3>;
 | 
			
		||||
};
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -19,7 +19,7 @@
 | 
			
		|||
		reg = <0x40006000 0x1000>;
 | 
			
		||||
		cache-unified;
 | 
			
		||||
		cache-level = <2>;
 | 
			
		||||
		arm,data-latency = <1 1 1>;
 | 
			
		||||
		arm,data-latency = <3 3 3>;
 | 
			
		||||
		arm,tag-latency = <2 2 2>;
 | 
			
		||||
	};
 | 
			
		||||
};
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -178,8 +178,10 @@
 | 
			
		|||
				compatible = "fsl,vf610-sai";
 | 
			
		||||
				reg = <0x40031000 0x1000>;
 | 
			
		||||
				interrupts = <86 IRQ_TYPE_LEVEL_HIGH>;
 | 
			
		||||
				clocks = <&clks VF610_CLK_SAI2>;
 | 
			
		||||
				clock-names = "sai";
 | 
			
		||||
				clocks = <&clks VF610_CLK_SAI2>,
 | 
			
		||||
					<&clks VF610_CLK_SAI2_DIV>,
 | 
			
		||||
					<&clks 0>, <&clks 0>;
 | 
			
		||||
				clock-names = "bus", "mclk1", "mclk2", "mclk3";
 | 
			
		||||
				dma-names = "tx", "rx";
 | 
			
		||||
				dmas = <&edma0 0 21>,
 | 
			
		||||
					<&edma0 0 20>;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -21,6 +21,7 @@
 | 
			
		|||
#ifndef __ASSEMBLY__
 | 
			
		||||
 | 
			
		||||
#include <linux/io.h>
 | 
			
		||||
#include <asm/barrier.h>
 | 
			
		||||
 | 
			
		||||
#define __ACCESS_CP15(CRn, Op1, CRm, Op2)	p15, Op1, %0, CRn, CRm, Op2
 | 
			
		||||
#define __ACCESS_CP15_64(Op1, CRm)		p15, Op1, %Q0, %R0, CRm
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -510,10 +510,14 @@ __copy_to_user_std(void __user *to, const void *from, unsigned long n);
 | 
			
		|||
static inline unsigned long __must_check
 | 
			
		||||
__copy_to_user(void __user *to, const void *from, unsigned long n)
 | 
			
		||||
{
 | 
			
		||||
#ifndef CONFIG_UACCESS_WITH_MEMCPY
 | 
			
		||||
	unsigned int __ua_flags = uaccess_save_and_enable();
 | 
			
		||||
	n = arm_copy_to_user(to, from, n);
 | 
			
		||||
	uaccess_restore(__ua_flags);
 | 
			
		||||
	return n;
 | 
			
		||||
#else
 | 
			
		||||
	return arm_copy_to_user(to, from, n);
 | 
			
		||||
#endif
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
extern unsigned long __must_check
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -95,6 +95,22 @@ void __show_regs(struct pt_regs *regs)
 | 
			
		|||
{
 | 
			
		||||
	unsigned long flags;
 | 
			
		||||
	char buf[64];
 | 
			
		||||
#ifndef CONFIG_CPU_V7M
 | 
			
		||||
	unsigned int domain;
 | 
			
		||||
#ifdef CONFIG_CPU_SW_DOMAIN_PAN
 | 
			
		||||
	/*
 | 
			
		||||
	 * Get the domain register for the parent context. In user
 | 
			
		||||
	 * mode, we don't save the DACR, so lets use what it should
 | 
			
		||||
	 * be. For other modes, we place it after the pt_regs struct.
 | 
			
		||||
	 */
 | 
			
		||||
	if (user_mode(regs))
 | 
			
		||||
		domain = DACR_UACCESS_ENABLE;
 | 
			
		||||
	else
 | 
			
		||||
		domain = *(unsigned int *)(regs + 1);
 | 
			
		||||
#else
 | 
			
		||||
	domain = get_domain();
 | 
			
		||||
#endif
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
	show_regs_print_info(KERN_DEFAULT);
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -123,21 +139,8 @@ void __show_regs(struct pt_regs *regs)
 | 
			
		|||
 | 
			
		||||
#ifndef CONFIG_CPU_V7M
 | 
			
		||||
	{
 | 
			
		||||
		unsigned int domain = get_domain();
 | 
			
		||||
		const char *segment;
 | 
			
		||||
 | 
			
		||||
#ifdef CONFIG_CPU_SW_DOMAIN_PAN
 | 
			
		||||
		/*
 | 
			
		||||
		 * Get the domain register for the parent context. In user
 | 
			
		||||
		 * mode, we don't save the DACR, so lets use what it should
 | 
			
		||||
		 * be. For other modes, we place it after the pt_regs struct.
 | 
			
		||||
		 */
 | 
			
		||||
		if (user_mode(regs))
 | 
			
		||||
			domain = DACR_UACCESS_ENABLE;
 | 
			
		||||
		else
 | 
			
		||||
			domain = *(unsigned int *)(regs + 1);
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
		if ((domain & domain_mask(DOMAIN_USER)) ==
 | 
			
		||||
		    domain_val(DOMAIN_USER, DOMAIN_NOACCESS))
 | 
			
		||||
			segment = "none";
 | 
			
		||||
| 
						 | 
				
			
			@ -163,11 +166,11 @@ void __show_regs(struct pt_regs *regs)
 | 
			
		|||
		buf[0] = '\0';
 | 
			
		||||
#ifdef CONFIG_CPU_CP15_MMU
 | 
			
		||||
		{
 | 
			
		||||
			unsigned int transbase, dac = get_domain();
 | 
			
		||||
			unsigned int transbase;
 | 
			
		||||
			asm("mrc p15, 0, %0, c2, c0\n\t"
 | 
			
		||||
			    : "=r" (transbase));
 | 
			
		||||
			snprintf(buf, sizeof(buf), "  Table: %08x  DAC: %08x",
 | 
			
		||||
			  	transbase, dac);
 | 
			
		||||
				transbase, domain);
 | 
			
		||||
		}
 | 
			
		||||
#endif
 | 
			
		||||
		asm("mrc p15, 0, %0, c1, c0\n" : "=r" (ctrl));
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -36,10 +36,10 @@
 | 
			
		|||
 */
 | 
			
		||||
#define __user_swpX_asm(data, addr, res, temp, B)		\
 | 
			
		||||
	__asm__ __volatile__(					\
 | 
			
		||||
	"	mov		%2, %1\n"			\
 | 
			
		||||
	"0:	ldrex"B"	%1, [%3]\n"			\
 | 
			
		||||
	"1:	strex"B"	%0, %2, [%3]\n"			\
 | 
			
		||||
	"0:	ldrex"B"	%2, [%3]\n"			\
 | 
			
		||||
	"1:	strex"B"	%0, %1, [%3]\n"			\
 | 
			
		||||
	"	cmp		%0, #0\n"			\
 | 
			
		||||
	"	moveq		%1, %2\n"			\
 | 
			
		||||
	"	movne		%0, %4\n"			\
 | 
			
		||||
	"2:\n"							\
 | 
			
		||||
	"	.section	 .text.fixup,\"ax\"\n"		\
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -88,6 +88,7 @@ pin_page_for_write(const void __user *_addr, pte_t **ptep, spinlock_t **ptlp)
 | 
			
		|||
static unsigned long noinline
 | 
			
		||||
__copy_to_user_memcpy(void __user *to, const void *from, unsigned long n)
 | 
			
		||||
{
 | 
			
		||||
	unsigned long ua_flags;
 | 
			
		||||
	int atomic;
 | 
			
		||||
 | 
			
		||||
	if (unlikely(segment_eq(get_fs(), KERNEL_DS))) {
 | 
			
		||||
| 
						 | 
				
			
			@ -118,7 +119,9 @@ __copy_to_user_memcpy(void __user *to, const void *from, unsigned long n)
 | 
			
		|||
		if (tocopy > n)
 | 
			
		||||
			tocopy = n;
 | 
			
		||||
 | 
			
		||||
		ua_flags = uaccess_save_and_enable();
 | 
			
		||||
		memcpy((void *)to, from, tocopy);
 | 
			
		||||
		uaccess_restore(ua_flags);
 | 
			
		||||
		to += tocopy;
 | 
			
		||||
		from += tocopy;
 | 
			
		||||
		n -= tocopy;
 | 
			
		||||
| 
						 | 
				
			
			@ -145,14 +148,21 @@ arm_copy_to_user(void __user *to, const void *from, unsigned long n)
 | 
			
		|||
	 * With frame pointer disabled, tail call optimization kicks in
 | 
			
		||||
	 * as well making this test almost invisible.
 | 
			
		||||
	 */
 | 
			
		||||
	if (n < 64)
 | 
			
		||||
		return __copy_to_user_std(to, from, n);
 | 
			
		||||
	return __copy_to_user_memcpy(to, from, n);
 | 
			
		||||
	if (n < 64) {
 | 
			
		||||
		unsigned long ua_flags = uaccess_save_and_enable();
 | 
			
		||||
		n = __copy_to_user_std(to, from, n);
 | 
			
		||||
		uaccess_restore(ua_flags);
 | 
			
		||||
	} else {
 | 
			
		||||
		n = __copy_to_user_memcpy(to, from, n);
 | 
			
		||||
	}
 | 
			
		||||
	return n;
 | 
			
		||||
}
 | 
			
		||||
	
 | 
			
		||||
static unsigned long noinline
 | 
			
		||||
__clear_user_memset(void __user *addr, unsigned long n)
 | 
			
		||||
{
 | 
			
		||||
	unsigned long ua_flags;
 | 
			
		||||
 | 
			
		||||
	if (unlikely(segment_eq(get_fs(), KERNEL_DS))) {
 | 
			
		||||
		memset((void *)addr, 0, n);
 | 
			
		||||
		return 0;
 | 
			
		||||
| 
						 | 
				
			
			@ -175,7 +185,9 @@ __clear_user_memset(void __user *addr, unsigned long n)
 | 
			
		|||
		if (tocopy > n)
 | 
			
		||||
			tocopy = n;
 | 
			
		||||
 | 
			
		||||
		ua_flags = uaccess_save_and_enable();
 | 
			
		||||
		memset((void *)addr, 0, tocopy);
 | 
			
		||||
		uaccess_restore(ua_flags);
 | 
			
		||||
		addr += tocopy;
 | 
			
		||||
		n -= tocopy;
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -193,9 +205,14 @@ out:
 | 
			
		|||
unsigned long arm_clear_user(void __user *addr, unsigned long n)
 | 
			
		||||
{
 | 
			
		||||
	/* See rational for this in __copy_to_user() above. */
 | 
			
		||||
	if (n < 64)
 | 
			
		||||
		return __clear_user_std(addr, n);
 | 
			
		||||
	return __clear_user_memset(addr, n);
 | 
			
		||||
	if (n < 64) {
 | 
			
		||||
		unsigned long ua_flags = uaccess_save_and_enable();
 | 
			
		||||
		n = __clear_user_std(addr, n);
 | 
			
		||||
		uaccess_restore(ua_flags);
 | 
			
		||||
	} else {
 | 
			
		||||
		n = __clear_user_memset(addr, n);
 | 
			
		||||
	}
 | 
			
		||||
	return n;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#if 0
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -4,7 +4,6 @@ menuconfig ARCH_AT91
 | 
			
		|||
	select ARCH_REQUIRE_GPIOLIB
 | 
			
		||||
	select COMMON_CLK_AT91
 | 
			
		||||
	select PINCTRL
 | 
			
		||||
	select PINCTRL_AT91
 | 
			
		||||
	select SOC_BUS
 | 
			
		||||
 | 
			
		||||
if ARCH_AT91
 | 
			
		||||
| 
						 | 
				
			
			@ -17,6 +16,7 @@ config SOC_SAMA5D2
 | 
			
		|||
	select HAVE_AT91_USB_CLK
 | 
			
		||||
	select HAVE_AT91_H32MX
 | 
			
		||||
	select HAVE_AT91_GENERATED_CLK
 | 
			
		||||
	select PINCTRL_AT91PIO4
 | 
			
		||||
	help
 | 
			
		||||
	  Select this if ou are using one of Atmel's SAMA5D2 family SoC.
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -27,6 +27,7 @@ config SOC_SAMA5D3
 | 
			
		|||
	select HAVE_AT91_UTMI
 | 
			
		||||
	select HAVE_AT91_SMD
 | 
			
		||||
	select HAVE_AT91_USB_CLK
 | 
			
		||||
	select PINCTRL_AT91
 | 
			
		||||
	help
 | 
			
		||||
	  Select this if you are using one of Atmel's SAMA5D3 family SoC.
 | 
			
		||||
	  This support covers SAMA5D31, SAMA5D33, SAMA5D34, SAMA5D35, SAMA5D36.
 | 
			
		||||
| 
						 | 
				
			
			@ -40,6 +41,7 @@ config SOC_SAMA5D4
 | 
			
		|||
	select HAVE_AT91_SMD
 | 
			
		||||
	select HAVE_AT91_USB_CLK
 | 
			
		||||
	select HAVE_AT91_H32MX
 | 
			
		||||
	select PINCTRL_AT91
 | 
			
		||||
	help
 | 
			
		||||
	  Select this if you are using one of Atmel's SAMA5D4 family SoC.
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -50,6 +52,7 @@ config SOC_AT91RM9200
 | 
			
		|||
	select CPU_ARM920T
 | 
			
		||||
	select HAVE_AT91_USB_CLK
 | 
			
		||||
	select MIGHT_HAVE_PCI
 | 
			
		||||
	select PINCTRL_AT91
 | 
			
		||||
	select SOC_SAM_V4_V5
 | 
			
		||||
	select SRAM if PM
 | 
			
		||||
	help
 | 
			
		||||
| 
						 | 
				
			
			@ -65,6 +68,7 @@ config SOC_AT91SAM9
 | 
			
		|||
	select HAVE_AT91_UTMI
 | 
			
		||||
	select HAVE_FB_ATMEL
 | 
			
		||||
	select MEMORY
 | 
			
		||||
	select PINCTRL_AT91
 | 
			
		||||
	select SOC_SAM_V4_V5
 | 
			
		||||
	select SRAM if PM
 | 
			
		||||
	help
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -41,8 +41,10 @@
 | 
			
		|||
 * implementation should be moved down into the pinctrl driver and get
 | 
			
		||||
 * called as part of the generic suspend/resume path.
 | 
			
		||||
 */
 | 
			
		||||
#ifdef CONFIG_PINCTRL_AT91
 | 
			
		||||
extern void at91_pinctrl_gpio_suspend(void);
 | 
			
		||||
extern void at91_pinctrl_gpio_resume(void);
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
static struct {
 | 
			
		||||
	unsigned long uhp_udp_mask;
 | 
			
		||||
| 
						 | 
				
			
			@ -151,8 +153,9 @@ static void at91_pm_suspend(suspend_state_t state)
 | 
			
		|||
 | 
			
		||||
static int at91_pm_enter(suspend_state_t state)
 | 
			
		||||
{
 | 
			
		||||
#ifdef CONFIG_PINCTRL_AT91
 | 
			
		||||
	at91_pinctrl_gpio_suspend();
 | 
			
		||||
 | 
			
		||||
#endif
 | 
			
		||||
	switch (state) {
 | 
			
		||||
	/*
 | 
			
		||||
	 * Suspend-to-RAM is like STANDBY plus slow clock mode, so
 | 
			
		||||
| 
						 | 
				
			
			@ -192,7 +195,9 @@ static int at91_pm_enter(suspend_state_t state)
 | 
			
		|||
error:
 | 
			
		||||
	target_state = PM_SUSPEND_ON;
 | 
			
		||||
 | 
			
		||||
#ifdef CONFIG_PINCTRL_AT91
 | 
			
		||||
	at91_pinctrl_gpio_resume();
 | 
			
		||||
#endif
 | 
			
		||||
	return 0;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -748,8 +748,12 @@ static void exynos5_powerdown_conf(enum sys_powerdown mode)
 | 
			
		|||
void exynos_sys_powerdown_conf(enum sys_powerdown mode)
 | 
			
		||||
{
 | 
			
		||||
	unsigned int i;
 | 
			
		||||
	const struct exynos_pmu_data *pmu_data;
 | 
			
		||||
 | 
			
		||||
	const struct exynos_pmu_data *pmu_data = pmu_context->pmu_data;
 | 
			
		||||
	if (!pmu_context)
 | 
			
		||||
		return;
 | 
			
		||||
 | 
			
		||||
	pmu_data = pmu_context->pmu_data;
 | 
			
		||||
 | 
			
		||||
	if (pmu_data->powerdown_conf)
 | 
			
		||||
		pmu_data->powerdown_conf(mode);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -143,7 +143,7 @@ static inline void __indirect_writesl(volatile void __iomem *bus_addr,
 | 
			
		|||
		writel(*vaddr++, bus_addr);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static inline unsigned char __indirect_readb(const volatile void __iomem *p)
 | 
			
		||||
static inline u8 __indirect_readb(const volatile void __iomem *p)
 | 
			
		||||
{
 | 
			
		||||
	u32 addr = (u32)p;
 | 
			
		||||
	u32 n, byte_enables, data;
 | 
			
		||||
| 
						 | 
				
			
			@ -166,7 +166,7 @@ static inline void __indirect_readsb(const volatile void __iomem *bus_addr,
 | 
			
		|||
		*vaddr++ = readb(bus_addr);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static inline unsigned short __indirect_readw(const volatile void __iomem *p)
 | 
			
		||||
static inline u16 __indirect_readw(const volatile void __iomem *p)
 | 
			
		||||
{
 | 
			
		||||
	u32 addr = (u32)p;
 | 
			
		||||
	u32 n, byte_enables, data;
 | 
			
		||||
| 
						 | 
				
			
			@ -189,7 +189,7 @@ static inline void __indirect_readsw(const volatile void __iomem *bus_addr,
 | 
			
		|||
		*vaddr++ = readw(bus_addr);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static inline unsigned long __indirect_readl(const volatile void __iomem *p)
 | 
			
		||||
static inline u32 __indirect_readl(const volatile void __iomem *p)
 | 
			
		||||
{
 | 
			
		||||
	u32 addr = (__force u32)p;
 | 
			
		||||
	u32 data;
 | 
			
		||||
| 
						 | 
				
			
			@ -350,7 +350,7 @@ static inline void insl(u32 io_addr, void *p, u32 count)
 | 
			
		|||
					((unsigned long)p <= (PIO_MASK + PIO_OFFSET)))
 | 
			
		||||
 | 
			
		||||
#define	ioread8(p)			ioread8(p)
 | 
			
		||||
static inline unsigned int ioread8(const void __iomem *addr)
 | 
			
		||||
static inline u8 ioread8(const void __iomem *addr)
 | 
			
		||||
{
 | 
			
		||||
	unsigned long port = (unsigned long __force)addr;
 | 
			
		||||
	if (__is_io_address(port))
 | 
			
		||||
| 
						 | 
				
			
			@ -378,7 +378,7 @@ static inline void ioread8_rep(const void __iomem *addr, void *vaddr, u32 count)
 | 
			
		|||
}
 | 
			
		||||
 | 
			
		||||
#define	ioread16(p)			ioread16(p)
 | 
			
		||||
static inline unsigned int ioread16(const void __iomem *addr)
 | 
			
		||||
static inline u16 ioread16(const void __iomem *addr)
 | 
			
		||||
{
 | 
			
		||||
	unsigned long port = (unsigned long __force)addr;
 | 
			
		||||
	if (__is_io_address(port))
 | 
			
		||||
| 
						 | 
				
			
			@ -407,7 +407,7 @@ static inline void ioread16_rep(const void __iomem *addr, void *vaddr,
 | 
			
		|||
}
 | 
			
		||||
 | 
			
		||||
#define	ioread32(p)			ioread32(p)
 | 
			
		||||
static inline unsigned int ioread32(const void __iomem *addr)
 | 
			
		||||
static inline u32 ioread32(const void __iomem *addr)
 | 
			
		||||
{
 | 
			
		||||
	unsigned long port = (unsigned long __force)addr;
 | 
			
		||||
	if (__is_io_address(port))
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -65,6 +65,8 @@ config SOC_AM43XX
 | 
			
		|||
	select MACH_OMAP_GENERIC
 | 
			
		||||
	select MIGHT_HAVE_CACHE_L2X0
 | 
			
		||||
	select HAVE_ARM_SCU
 | 
			
		||||
	select GENERIC_CLOCKEVENTS_BROADCAST
 | 
			
		||||
	select HAVE_ARM_TWD
 | 
			
		||||
 | 
			
		||||
config SOC_DRA7XX
 | 
			
		||||
	bool "TI DRA7XX"
 | 
			
		||||
| 
						 | 
				
			
			@ -121,6 +123,7 @@ config ARCH_OMAP2PLUS_TYPICAL
 | 
			
		|||
	select NEON if CPU_V7
 | 
			
		||||
	select PM
 | 
			
		||||
	select REGULATOR
 | 
			
		||||
	select REGULATOR_FIXED_VOLTAGE
 | 
			
		||||
	select TWL4030_CORE if ARCH_OMAP3 || ARCH_OMAP4
 | 
			
		||||
	select TWL4030_POWER if ARCH_OMAP3 || ARCH_OMAP4
 | 
			
		||||
	select VFP
 | 
			
		||||
| 
						 | 
				
			
			@ -201,7 +204,6 @@ config MACH_OMAP3_PANDORA
 | 
			
		|||
	depends on ARCH_OMAP3
 | 
			
		||||
	default y
 | 
			
		||||
	select OMAP_PACKAGE_CBB
 | 
			
		||||
	select REGULATOR_FIXED_VOLTAGE if REGULATOR
 | 
			
		||||
 | 
			
		||||
config MACH_NOKIA_N810
 | 
			
		||||
       bool
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -320,6 +320,12 @@ static int __init omap_dm_timer_init_one(struct omap_dm_timer *timer,
 | 
			
		|||
	return r;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#if !defined(CONFIG_SMP) && defined(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST)
 | 
			
		||||
void tick_broadcast(const struct cpumask *mask)
 | 
			
		||||
{
 | 
			
		||||
}
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
static void __init omap2_gp_clockevent_init(int gptimer_id,
 | 
			
		||||
						const char *fck_source,
 | 
			
		||||
						const char *property)
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -889,6 +889,7 @@ static void __init e680_init(void)
 | 
			
		|||
 | 
			
		||||
	pxa_set_keypad_info(&e680_keypad_platform_data);
 | 
			
		||||
 | 
			
		||||
	pwm_add_table(ezx_pwm_lookup, ARRAY_SIZE(ezx_pwm_lookup));
 | 
			
		||||
	platform_add_devices(ARRAY_AND_SIZE(ezx_devices));
 | 
			
		||||
	platform_add_devices(ARRAY_AND_SIZE(e680_devices));
 | 
			
		||||
}
 | 
			
		||||
| 
						 | 
				
			
			@ -956,6 +957,7 @@ static void __init a1200_init(void)
 | 
			
		|||
 | 
			
		||||
	pxa_set_keypad_info(&a1200_keypad_platform_data);
 | 
			
		||||
 | 
			
		||||
	pwm_add_table(ezx_pwm_lookup, ARRAY_SIZE(ezx_pwm_lookup));
 | 
			
		||||
	platform_add_devices(ARRAY_AND_SIZE(ezx_devices));
 | 
			
		||||
	platform_add_devices(ARRAY_AND_SIZE(a1200_devices));
 | 
			
		||||
}
 | 
			
		||||
| 
						 | 
				
			
			@ -1148,6 +1150,7 @@ static void __init a910_init(void)
 | 
			
		|||
		platform_device_register(&a910_camera);
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	pwm_add_table(ezx_pwm_lookup, ARRAY_SIZE(ezx_pwm_lookup));
 | 
			
		||||
	platform_add_devices(ARRAY_AND_SIZE(ezx_devices));
 | 
			
		||||
	platform_add_devices(ARRAY_AND_SIZE(a910_devices));
 | 
			
		||||
}
 | 
			
		||||
| 
						 | 
				
			
			@ -1215,6 +1218,7 @@ static void __init e6_init(void)
 | 
			
		|||
 | 
			
		||||
	pxa_set_keypad_info(&e6_keypad_platform_data);
 | 
			
		||||
 | 
			
		||||
	pwm_add_table(ezx_pwm_lookup, ARRAY_SIZE(ezx_pwm_lookup));
 | 
			
		||||
	platform_add_devices(ARRAY_AND_SIZE(ezx_devices));
 | 
			
		||||
	platform_add_devices(ARRAY_AND_SIZE(e6_devices));
 | 
			
		||||
}
 | 
			
		||||
| 
						 | 
				
			
			@ -1256,6 +1260,7 @@ static void __init e2_init(void)
 | 
			
		|||
 | 
			
		||||
	pxa_set_keypad_info(&e2_keypad_platform_data);
 | 
			
		||||
 | 
			
		||||
	pwm_add_table(ezx_pwm_lookup, ARRAY_SIZE(ezx_pwm_lookup));
 | 
			
		||||
	platform_add_devices(ARRAY_AND_SIZE(ezx_devices));
 | 
			
		||||
	platform_add_devices(ARRAY_AND_SIZE(e2_devices));
 | 
			
		||||
}
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -20,7 +20,7 @@
 | 
			
		|||
#include <plat/cpu.h>
 | 
			
		||||
#include <plat/cpu-freq-core.h>
 | 
			
		||||
 | 
			
		||||
static struct cpufreq_frequency_table s3c2440_plls_12[] __initdata = {
 | 
			
		||||
static struct cpufreq_frequency_table s3c2440_plls_12[] = {
 | 
			
		||||
	{ .frequency = 75000000,	.driver_data = PLLVAL(0x75, 3, 3),  }, 	/* FVco 600.000000 */
 | 
			
		||||
	{ .frequency = 80000000,	.driver_data = PLLVAL(0x98, 4, 3),  }, 	/* FVco 640.000000 */
 | 
			
		||||
	{ .frequency = 90000000,	.driver_data = PLLVAL(0x70, 2, 3),  }, 	/* FVco 720.000000 */
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -20,7 +20,7 @@
 | 
			
		|||
#include <plat/cpu.h>
 | 
			
		||||
#include <plat/cpu-freq-core.h>
 | 
			
		||||
 | 
			
		||||
static struct cpufreq_frequency_table s3c2440_plls_169344[] __initdata = {
 | 
			
		||||
static struct cpufreq_frequency_table s3c2440_plls_169344[] = {
 | 
			
		||||
	{ .frequency = 78019200,	.driver_data = PLLVAL(121, 5, 3), 	}, 	/* FVco 624.153600 */
 | 
			
		||||
	{ .frequency = 84067200,	.driver_data = PLLVAL(131, 5, 3), 	}, 	/* FVco 672.537600 */
 | 
			
		||||
	{ .frequency = 90115200,	.driver_data = PLLVAL(141, 5, 3), 	}, 	/* FVco 720.921600 */
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -165,13 +165,28 @@ static void flush_context(unsigned int cpu)
 | 
			
		|||
		__flush_icache_all();
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static int is_reserved_asid(u64 asid)
 | 
			
		||||
static bool check_update_reserved_asid(u64 asid, u64 newasid)
 | 
			
		||||
{
 | 
			
		||||
	int cpu;
 | 
			
		||||
	for_each_possible_cpu(cpu)
 | 
			
		||||
		if (per_cpu(reserved_asids, cpu) == asid)
 | 
			
		||||
			return 1;
 | 
			
		||||
	return 0;
 | 
			
		||||
	bool hit = false;
 | 
			
		||||
 | 
			
		||||
	/*
 | 
			
		||||
	 * Iterate over the set of reserved ASIDs looking for a match.
 | 
			
		||||
	 * If we find one, then we can update our mm to use newasid
 | 
			
		||||
	 * (i.e. the same ASID in the current generation) but we can't
 | 
			
		||||
	 * exit the loop early, since we need to ensure that all copies
 | 
			
		||||
	 * of the old ASID are updated to reflect the mm. Failure to do
 | 
			
		||||
	 * so could result in us missing the reserved ASID in a future
 | 
			
		||||
	 * generation.
 | 
			
		||||
	 */
 | 
			
		||||
	for_each_possible_cpu(cpu) {
 | 
			
		||||
		if (per_cpu(reserved_asids, cpu) == asid) {
 | 
			
		||||
			hit = true;
 | 
			
		||||
			per_cpu(reserved_asids, cpu) = newasid;
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return hit;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static u64 new_context(struct mm_struct *mm, unsigned int cpu)
 | 
			
		||||
| 
						 | 
				
			
			@ -181,12 +196,14 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
 | 
			
		|||
	u64 generation = atomic64_read(&asid_generation);
 | 
			
		||||
 | 
			
		||||
	if (asid != 0) {
 | 
			
		||||
		u64 newasid = generation | (asid & ~ASID_MASK);
 | 
			
		||||
 | 
			
		||||
		/*
 | 
			
		||||
		 * If our current ASID was active during a rollover, we
 | 
			
		||||
		 * can continue to use it and this was just a false alarm.
 | 
			
		||||
		 */
 | 
			
		||||
		if (is_reserved_asid(asid))
 | 
			
		||||
			return generation | (asid & ~ASID_MASK);
 | 
			
		||||
		if (check_update_reserved_asid(asid, newasid))
 | 
			
		||||
			return newasid;
 | 
			
		||||
 | 
			
		||||
		/*
 | 
			
		||||
		 * We had a valid ASID in a previous life, so try to re-use
 | 
			
		||||
| 
						 | 
				
			
			@ -194,7 +211,7 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
 | 
			
		|||
		 */
 | 
			
		||||
		asid &= ~ASID_MASK;
 | 
			
		||||
		if (!__test_and_set_bit(asid, asid_map))
 | 
			
		||||
			goto bump_gen;
 | 
			
		||||
			return newasid;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	/*
 | 
			
		||||
| 
						 | 
				
			
			@ -216,11 +233,8 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
 | 
			
		|||
 | 
			
		||||
	__set_bit(asid, asid_map);
 | 
			
		||||
	cur_idx = asid;
 | 
			
		||||
 | 
			
		||||
bump_gen:
 | 
			
		||||
	asid |= generation;
 | 
			
		||||
	cpumask_clear(mm_cpumask(mm));
 | 
			
		||||
	return asid;
 | 
			
		||||
	return asid | generation;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk)
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -1521,7 +1521,7 @@ static int __map_sg_chunk(struct device *dev, struct scatterlist *sg,
 | 
			
		|||
		return -ENOMEM;
 | 
			
		||||
 | 
			
		||||
	for (count = 0, s = sg; count < (size >> PAGE_SHIFT); s = sg_next(s)) {
 | 
			
		||||
		phys_addr_t phys = sg_phys(s) & PAGE_MASK;
 | 
			
		||||
		phys_addr_t phys = page_to_phys(sg_page(s));
 | 
			
		||||
		unsigned int len = PAGE_ALIGN(s->offset + s->length);
 | 
			
		||||
 | 
			
		||||
		if (!is_coherent &&
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -22,6 +22,7 @@
 | 
			
		|||
#include <linux/memblock.h>
 | 
			
		||||
#include <linux/dma-contiguous.h>
 | 
			
		||||
#include <linux/sizes.h>
 | 
			
		||||
#include <linux/stop_machine.h>
 | 
			
		||||
 | 
			
		||||
#include <asm/cp15.h>
 | 
			
		||||
#include <asm/mach-types.h>
 | 
			
		||||
| 
						 | 
				
			
			@ -627,12 +628,10 @@ static struct section_perm ro_perms[] = {
 | 
			
		|||
 * safe to be called with preemption disabled, as under stop_machine().
 | 
			
		||||
 */
 | 
			
		||||
static inline void section_update(unsigned long addr, pmdval_t mask,
 | 
			
		||||
				  pmdval_t prot)
 | 
			
		||||
				  pmdval_t prot, struct mm_struct *mm)
 | 
			
		||||
{
 | 
			
		||||
	struct mm_struct *mm;
 | 
			
		||||
	pmd_t *pmd;
 | 
			
		||||
 | 
			
		||||
	mm = current->active_mm;
 | 
			
		||||
	pmd = pmd_offset(pud_offset(pgd_offset(mm, addr), addr), addr);
 | 
			
		||||
 | 
			
		||||
#ifdef CONFIG_ARM_LPAE
 | 
			
		||||
| 
						 | 
				
			
			@ -656,49 +655,82 @@ static inline bool arch_has_strict_perms(void)
 | 
			
		|||
	return !!(get_cr() & CR_XP);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#define set_section_perms(perms, field)	{				\
 | 
			
		||||
	size_t i;							\
 | 
			
		||||
	unsigned long addr;						\
 | 
			
		||||
									\
 | 
			
		||||
	if (!arch_has_strict_perms())					\
 | 
			
		||||
		return;							\
 | 
			
		||||
									\
 | 
			
		||||
	for (i = 0; i < ARRAY_SIZE(perms); i++) {			\
 | 
			
		||||
		if (!IS_ALIGNED(perms[i].start, SECTION_SIZE) ||	\
 | 
			
		||||
		    !IS_ALIGNED(perms[i].end, SECTION_SIZE)) {		\
 | 
			
		||||
			pr_err("BUG: section %lx-%lx not aligned to %lx\n", \
 | 
			
		||||
				perms[i].start, perms[i].end,		\
 | 
			
		||||
				SECTION_SIZE);				\
 | 
			
		||||
			continue;					\
 | 
			
		||||
		}							\
 | 
			
		||||
									\
 | 
			
		||||
		for (addr = perms[i].start;				\
 | 
			
		||||
		     addr < perms[i].end;				\
 | 
			
		||||
		     addr += SECTION_SIZE)				\
 | 
			
		||||
			section_update(addr, perms[i].mask,		\
 | 
			
		||||
				       perms[i].field);			\
 | 
			
		||||
	}								\
 | 
			
		||||
void set_section_perms(struct section_perm *perms, int n, bool set,
 | 
			
		||||
			struct mm_struct *mm)
 | 
			
		||||
{
 | 
			
		||||
	size_t i;
 | 
			
		||||
	unsigned long addr;
 | 
			
		||||
 | 
			
		||||
	if (!arch_has_strict_perms())
 | 
			
		||||
		return;
 | 
			
		||||
 | 
			
		||||
	for (i = 0; i < n; i++) {
 | 
			
		||||
		if (!IS_ALIGNED(perms[i].start, SECTION_SIZE) ||
 | 
			
		||||
		    !IS_ALIGNED(perms[i].end, SECTION_SIZE)) {
 | 
			
		||||
			pr_err("BUG: section %lx-%lx not aligned to %lx\n",
 | 
			
		||||
				perms[i].start, perms[i].end,
 | 
			
		||||
				SECTION_SIZE);
 | 
			
		||||
			continue;
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		for (addr = perms[i].start;
 | 
			
		||||
		     addr < perms[i].end;
 | 
			
		||||
		     addr += SECTION_SIZE)
 | 
			
		||||
			section_update(addr, perms[i].mask,
 | 
			
		||||
				set ? perms[i].prot : perms[i].clear, mm);
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static inline void fix_kernmem_perms(void)
 | 
			
		||||
static void update_sections_early(struct section_perm perms[], int n)
 | 
			
		||||
{
 | 
			
		||||
	set_section_perms(nx_perms, prot);
 | 
			
		||||
	struct task_struct *t, *s;
 | 
			
		||||
 | 
			
		||||
	read_lock(&tasklist_lock);
 | 
			
		||||
	for_each_process(t) {
 | 
			
		||||
		if (t->flags & PF_KTHREAD)
 | 
			
		||||
			continue;
 | 
			
		||||
		for_each_thread(t, s)
 | 
			
		||||
			set_section_perms(perms, n, true, s->mm);
 | 
			
		||||
	}
 | 
			
		||||
	read_unlock(&tasklist_lock);
 | 
			
		||||
	set_section_perms(perms, n, true, current->active_mm);
 | 
			
		||||
	set_section_perms(perms, n, true, &init_mm);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
int __fix_kernmem_perms(void *unused)
 | 
			
		||||
{
 | 
			
		||||
	update_sections_early(nx_perms, ARRAY_SIZE(nx_perms));
 | 
			
		||||
	return 0;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void fix_kernmem_perms(void)
 | 
			
		||||
{
 | 
			
		||||
	stop_machine(__fix_kernmem_perms, NULL, NULL);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#ifdef CONFIG_DEBUG_RODATA
 | 
			
		||||
int __mark_rodata_ro(void *unused)
 | 
			
		||||
{
 | 
			
		||||
	update_sections_early(ro_perms, ARRAY_SIZE(ro_perms));
 | 
			
		||||
	return 0;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void mark_rodata_ro(void)
 | 
			
		||||
{
 | 
			
		||||
	set_section_perms(ro_perms, prot);
 | 
			
		||||
	stop_machine(__mark_rodata_ro, NULL, NULL);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void set_kernel_text_rw(void)
 | 
			
		||||
{
 | 
			
		||||
	set_section_perms(ro_perms, clear);
 | 
			
		||||
	set_section_perms(ro_perms, ARRAY_SIZE(ro_perms), false,
 | 
			
		||||
				current->active_mm);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void set_kernel_text_ro(void)
 | 
			
		||||
{
 | 
			
		||||
	set_section_perms(ro_perms, prot);
 | 
			
		||||
	set_section_perms(ro_perms, ARRAY_SIZE(ro_perms), true,
 | 
			
		||||
				current->active_mm);
 | 
			
		||||
}
 | 
			
		||||
#endif /* CONFIG_DEBUG_RODATA */
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -95,7 +95,7 @@ ENDPROC(cpu_v7_dcache_clean_area)
 | 
			
		|||
.equ	cpu_v7_suspend_size, 4 * 9
 | 
			
		||||
#ifdef CONFIG_ARM_CPU_SUSPEND
 | 
			
		||||
ENTRY(cpu_v7_do_suspend)
 | 
			
		||||
	stmfd	sp!, {r4 - r10, lr}
 | 
			
		||||
	stmfd	sp!, {r4 - r11, lr}
 | 
			
		||||
	mrc	p15, 0, r4, c13, c0, 0	@ FCSE/PID
 | 
			
		||||
	mrc	p15, 0, r5, c13, c0, 3	@ User r/o thread ID
 | 
			
		||||
	stmia	r0!, {r4 - r5}
 | 
			
		||||
| 
						 | 
				
			
			@ -112,7 +112,7 @@ ENTRY(cpu_v7_do_suspend)
 | 
			
		|||
	mrc	p15, 0, r9, c1, c0, 1	@ Auxiliary control register
 | 
			
		||||
	mrc	p15, 0, r10, c1, c0, 2	@ Co-processor access control
 | 
			
		||||
	stmia	r0, {r5 - r11}
 | 
			
		||||
	ldmfd	sp!, {r4 - r10, pc}
 | 
			
		||||
	ldmfd	sp!, {r4 - r11, pc}
 | 
			
		||||
ENDPROC(cpu_v7_do_suspend)
 | 
			
		||||
 | 
			
		||||
ENTRY(cpu_v7_do_resume)
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -269,6 +269,7 @@
 | 
			
		|||
			clock-frequency = <0>;	/* Updated by bootloader */
 | 
			
		||||
			voltage-ranges = <1800 1800 3300 3300>;
 | 
			
		||||
			sdhci,auto-cmd12;
 | 
			
		||||
			little-endian;
 | 
			
		||||
			bus-width = <4>;
 | 
			
		||||
		};
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -277,6 +278,7 @@
 | 
			
		|||
			reg = <0x0 0x2300000 0x0 0x10000>;
 | 
			
		||||
			interrupts = <0 36 0x4>; /* Level high type */
 | 
			
		||||
			gpio-controller;
 | 
			
		||||
			little-endian;
 | 
			
		||||
			#gpio-cells = <2>;
 | 
			
		||||
			interrupt-controller;
 | 
			
		||||
			#interrupt-cells = <2>;
 | 
			
		||||
| 
						 | 
				
			
			@ -287,6 +289,7 @@
 | 
			
		|||
			reg = <0x0 0x2310000 0x0 0x10000>;
 | 
			
		||||
			interrupts = <0 36 0x4>; /* Level high type */
 | 
			
		||||
			gpio-controller;
 | 
			
		||||
			little-endian;
 | 
			
		||||
			#gpio-cells = <2>;
 | 
			
		||||
			interrupt-controller;
 | 
			
		||||
			#interrupt-cells = <2>;
 | 
			
		||||
| 
						 | 
				
			
			@ -297,6 +300,7 @@
 | 
			
		|||
			reg = <0x0 0x2320000 0x0 0x10000>;
 | 
			
		||||
			interrupts = <0 37 0x4>; /* Level high type */
 | 
			
		||||
			gpio-controller;
 | 
			
		||||
			little-endian;
 | 
			
		||||
			#gpio-cells = <2>;
 | 
			
		||||
			interrupt-controller;
 | 
			
		||||
			#interrupt-cells = <2>;
 | 
			
		||||
| 
						 | 
				
			
			@ -307,6 +311,7 @@
 | 
			
		|||
			reg = <0x0 0x2330000 0x0 0x10000>;
 | 
			
		||||
			interrupts = <0 37 0x4>; /* Level high type */
 | 
			
		||||
			gpio-controller;
 | 
			
		||||
			little-endian;
 | 
			
		||||
			#gpio-cells = <2>;
 | 
			
		||||
			interrupt-controller;
 | 
			
		||||
			#interrupt-cells = <2>;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -77,6 +77,7 @@
 | 
			
		|||
#ifndef __ASSEMBLY__
 | 
			
		||||
 | 
			
		||||
#include <linux/stringify.h>
 | 
			
		||||
#include <asm/barrier.h>
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
 * Low-level accessors
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -276,10 +276,14 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
 | 
			
		|||
	 * hardware updates of the pte (ptep_set_access_flags safely changes
 | 
			
		||||
	 * valid ptes without going through an invalid entry).
 | 
			
		||||
	 */
 | 
			
		||||
	if (IS_ENABLED(CONFIG_DEBUG_VM) && IS_ENABLED(CONFIG_ARM64_HW_AFDBM) &&
 | 
			
		||||
	    pte_valid(*ptep)) {
 | 
			
		||||
		BUG_ON(!pte_young(pte));
 | 
			
		||||
		BUG_ON(pte_write(*ptep) && !pte_dirty(pte));
 | 
			
		||||
	if (IS_ENABLED(CONFIG_ARM64_HW_AFDBM) &&
 | 
			
		||||
	    pte_valid(*ptep) && pte_valid(pte)) {
 | 
			
		||||
		VM_WARN_ONCE(!pte_young(pte),
 | 
			
		||||
			     "%s: racy access flag clearing: 0x%016llx -> 0x%016llx",
 | 
			
		||||
			     __func__, pte_val(*ptep), pte_val(pte));
 | 
			
		||||
		VM_WARN_ONCE(pte_write(*ptep) && !pte_dirty(pte),
 | 
			
		||||
			     "%s: racy dirty state clearing: 0x%016llx -> 0x%016llx",
 | 
			
		||||
			     __func__, pte_val(*ptep), pte_val(pte));
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	set_pte(ptep, pte);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -5,6 +5,7 @@
 | 
			
		|||
 */
 | 
			
		||||
 | 
			
		||||
#include <asm-generic/vmlinux.lds.h>
 | 
			
		||||
#include <asm/cache.h>
 | 
			
		||||
#include <asm/kernel-pgtable.h>
 | 
			
		||||
#include <asm/thread_info.h>
 | 
			
		||||
#include <asm/memory.h>
 | 
			
		||||
| 
						 | 
				
			
			@ -140,7 +141,7 @@ SECTIONS
 | 
			
		|||
		ARM_EXIT_KEEP(EXIT_DATA)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	PERCPU_SECTION(64)
 | 
			
		||||
	PERCPU_SECTION(L1_CACHE_BYTES)
 | 
			
		||||
 | 
			
		||||
	. = ALIGN(PAGE_SIZE);
 | 
			
		||||
	__init_end = .;
 | 
			
		||||
| 
						 | 
				
			
			@ -158,7 +159,7 @@ SECTIONS
 | 
			
		|||
	. = ALIGN(PAGE_SIZE);
 | 
			
		||||
	_data = .;
 | 
			
		||||
	_sdata = .;
 | 
			
		||||
	RW_DATA_SECTION(64, PAGE_SIZE, THREAD_SIZE)
 | 
			
		||||
	RW_DATA_SECTION(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE)
 | 
			
		||||
	PECOFF_EDATA_PADDING
 | 
			
		||||
	_edata = .;
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -14,7 +14,7 @@
 | 
			
		|||
 *  Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
 | 
			
		||||
 *  Copyright (C) 2009 Jaswinder Singh Rajput
 | 
			
		||||
 *  Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
 | 
			
		||||
 *  Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
 | 
			
		||||
 *  Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra
 | 
			
		||||
 *  Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
 | 
			
		||||
 *
 | 
			
		||||
 * ppc:
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -11,7 +11,7 @@
 | 
			
		|||
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
#define NR_syscalls			322 /* length of syscall table */
 | 
			
		||||
#define NR_syscalls			323 /* length of syscall table */
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
 * The following defines stop scripts/checksyscalls.sh from complaining about
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -335,5 +335,6 @@
 | 
			
		|||
#define __NR_userfaultfd		1343
 | 
			
		||||
#define __NR_membarrier			1344
 | 
			
		||||
#define __NR_kcmp			1345
 | 
			
		||||
#define __NR_mlock2			1346
 | 
			
		||||
 | 
			
		||||
#endif /* _UAPI_ASM_IA64_UNISTD_H */
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -1771,5 +1771,6 @@ sys_call_table:
 | 
			
		|||
	data8 sys_userfaultfd
 | 
			
		||||
	data8 sys_membarrier
 | 
			
		||||
	data8 sys_kcmp				// 1345
 | 
			
		||||
	data8 sys_mlock2
 | 
			
		||||
 | 
			
		||||
	.org sys_call_table + 8*NR_syscalls	// guard against failures to increase NR_syscalls
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -61,7 +61,8 @@ static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl,
 | 
			
		|||
	/* FIXME this part of code is untested */
 | 
			
		||||
	for_each_sg(sgl, sg, nents, i) {
 | 
			
		||||
		sg->dma_address = sg_phys(sg);
 | 
			
		||||
		__dma_sync(sg_phys(sg), sg->length, direction);
 | 
			
		||||
		__dma_sync(page_to_phys(sg_page(sg)) + sg->offset,
 | 
			
		||||
							sg->length, direction);
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return nents;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -599,7 +599,7 @@ extern void __put_user_unknown(void);
 | 
			
		|||
 * On error, the variable @x is set to zero.
 | 
			
		||||
 */
 | 
			
		||||
#define __get_user_unaligned(x,ptr) \
 | 
			
		||||
	__get_user__unalignednocheck((x),(ptr),sizeof(*(ptr)))
 | 
			
		||||
	__get_user_unaligned_nocheck((x),(ptr),sizeof(*(ptr)))
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
 * Yuck.  We need two variants, one for 64bit operation and one
 | 
			
		||||
| 
						 | 
				
			
			@ -620,8 +620,8 @@ extern void __get_user_unaligned_unknown(void);
 | 
			
		|||
do {									\
 | 
			
		||||
	switch (size) {							\
 | 
			
		||||
	case 1: __get_data_asm(val, "lb", ptr); break;			\
 | 
			
		||||
	case 2: __get_user_unaligned_asm(val, "ulh", ptr); break;	\
 | 
			
		||||
	case 4: __get_user_unaligned_asm(val, "ulw", ptr); break;	\
 | 
			
		||||
	case 2: __get_data_unaligned_asm(val, "ulh", ptr); break;	\
 | 
			
		||||
	case 4: __get_data_unaligned_asm(val, "ulw", ptr); break;	\
 | 
			
		||||
	case 8: __GET_USER_UNALIGNED_DW(val, ptr); break;		\
 | 
			
		||||
	default: __get_user_unaligned_unknown(); break;			\
 | 
			
		||||
	}								\
 | 
			
		||||
| 
						 | 
				
			
			@ -1122,9 +1122,15 @@ extern size_t __copy_in_user_eva(void *__to, const void *__from, size_t __n);
 | 
			
		|||
	__cu_to = (to);							\
 | 
			
		||||
	__cu_from = (from);						\
 | 
			
		||||
	__cu_len = (n);							\
 | 
			
		||||
	might_fault();							\
 | 
			
		||||
	__cu_len = __invoke_copy_from_user(__cu_to, __cu_from,		\
 | 
			
		||||
					   __cu_len);			\
 | 
			
		||||
	if (eva_kernel_access()) {					\
 | 
			
		||||
		__cu_len = __invoke_copy_from_kernel(__cu_to,		\
 | 
			
		||||
						     __cu_from,		\
 | 
			
		||||
						     __cu_len);		\
 | 
			
		||||
	} else {							\
 | 
			
		||||
		might_fault();						\
 | 
			
		||||
		__cu_len = __invoke_copy_from_user(__cu_to, __cu_from,	\
 | 
			
		||||
						   __cu_len);		\
 | 
			
		||||
	}								\
 | 
			
		||||
	__cu_len;							\
 | 
			
		||||
})
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -1229,16 +1235,28 @@ __clear_user(void __user *addr, __kernel_size_t size)
 | 
			
		|||
{
 | 
			
		||||
	__kernel_size_t res;
 | 
			
		||||
 | 
			
		||||
	might_fault();
 | 
			
		||||
	__asm__ __volatile__(
 | 
			
		||||
		"move\t$4, %1\n\t"
 | 
			
		||||
		"move\t$5, $0\n\t"
 | 
			
		||||
		"move\t$6, %2\n\t"
 | 
			
		||||
		__MODULE_JAL(__bzero)
 | 
			
		||||
		"move\t%0, $6"
 | 
			
		||||
		: "=r" (res)
 | 
			
		||||
		: "r" (addr), "r" (size)
 | 
			
		||||
		: "$4", "$5", "$6", __UA_t0, __UA_t1, "$31");
 | 
			
		||||
	if (eva_kernel_access()) {
 | 
			
		||||
		__asm__ __volatile__(
 | 
			
		||||
			"move\t$4, %1\n\t"
 | 
			
		||||
			"move\t$5, $0\n\t"
 | 
			
		||||
			"move\t$6, %2\n\t"
 | 
			
		||||
			__MODULE_JAL(__bzero_kernel)
 | 
			
		||||
			"move\t%0, $6"
 | 
			
		||||
			: "=r" (res)
 | 
			
		||||
			: "r" (addr), "r" (size)
 | 
			
		||||
			: "$4", "$5", "$6", __UA_t0, __UA_t1, "$31");
 | 
			
		||||
	} else {
 | 
			
		||||
		might_fault();
 | 
			
		||||
		__asm__ __volatile__(
 | 
			
		||||
			"move\t$4, %1\n\t"
 | 
			
		||||
			"move\t$5, $0\n\t"
 | 
			
		||||
			"move\t$6, %2\n\t"
 | 
			
		||||
			__MODULE_JAL(__bzero)
 | 
			
		||||
			"move\t%0, $6"
 | 
			
		||||
			: "=r" (res)
 | 
			
		||||
			: "r" (addr), "r" (size)
 | 
			
		||||
			: "$4", "$5", "$6", __UA_t0, __UA_t1, "$31");
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return res;
 | 
			
		||||
}
 | 
			
		||||
| 
						 | 
				
			
			@ -1384,7 +1402,7 @@ static inline long strlen_user(const char __user *s)
 | 
			
		|||
		might_fault();
 | 
			
		||||
		__asm__ __volatile__(
 | 
			
		||||
			"move\t$4, %1\n\t"
 | 
			
		||||
			__MODULE_JAL(__strlen_kernel_asm)
 | 
			
		||||
			__MODULE_JAL(__strlen_user_asm)
 | 
			
		||||
			"move\t%0, $2"
 | 
			
		||||
			: "=r" (res)
 | 
			
		||||
			: "r" (s)
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -257,7 +257,6 @@ LEAF(mips_cps_core_init)
 | 
			
		|||
	has_mt	t0, 3f
 | 
			
		||||
 | 
			
		||||
	.set	push
 | 
			
		||||
	.set	mips64r2
 | 
			
		||||
	.set	mt
 | 
			
		||||
 | 
			
		||||
	/* Only allow 1 TC per VPE to execute... */
 | 
			
		||||
| 
						 | 
				
			
			@ -376,7 +375,6 @@ LEAF(mips_cps_boot_vpes)
 | 
			
		|||
	 nop
 | 
			
		||||
 | 
			
		||||
	.set	push
 | 
			
		||||
	.set	mips64r2
 | 
			
		||||
	.set	mt
 | 
			
		||||
 | 
			
		||||
1:	/* Enter VPE configuration state */
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -17,6 +17,7 @@
 | 
			
		|||
#include <asm/fpu.h>
 | 
			
		||||
#include <asm/msa.h>
 | 
			
		||||
 | 
			
		||||
extern void *__bzero_kernel(void *__s, size_t __count);
 | 
			
		||||
extern void *__bzero(void *__s, size_t __count);
 | 
			
		||||
extern long __strncpy_from_kernel_nocheck_asm(char *__to,
 | 
			
		||||
					      const char *__from, long __len);
 | 
			
		||||
| 
						 | 
				
			
			@ -64,6 +65,7 @@ EXPORT_SYMBOL(__copy_from_user_eva);
 | 
			
		|||
EXPORT_SYMBOL(__copy_in_user_eva);
 | 
			
		||||
EXPORT_SYMBOL(__copy_to_user_eva);
 | 
			
		||||
EXPORT_SYMBOL(__copy_user_inatomic_eva);
 | 
			
		||||
EXPORT_SYMBOL(__bzero_kernel);
 | 
			
		||||
#endif
 | 
			
		||||
EXPORT_SYMBOL(__bzero);
 | 
			
		||||
EXPORT_SYMBOL(__strncpy_from_kernel_nocheck_asm);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -283,6 +283,8 @@ LEAF(memset)
 | 
			
		|||
1:
 | 
			
		||||
#ifndef CONFIG_EVA
 | 
			
		||||
FEXPORT(__bzero)
 | 
			
		||||
#else
 | 
			
		||||
FEXPORT(__bzero_kernel)
 | 
			
		||||
#endif
 | 
			
		||||
	__BUILD_BZERO LEGACY_MODE
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -145,7 +145,7 @@ static void *mips_dma_alloc_coherent(struct device *dev, size_t size,
 | 
			
		|||
 | 
			
		||||
	gfp = massage_gfp_flags(dev, gfp);
 | 
			
		||||
 | 
			
		||||
	if (IS_ENABLED(CONFIG_DMA_CMA) && !(gfp & GFP_ATOMIC))
 | 
			
		||||
	if (IS_ENABLED(CONFIG_DMA_CMA) && gfpflags_allow_blocking(gfp))
 | 
			
		||||
		page = dma_alloc_from_contiguous(dev,
 | 
			
		||||
					count, get_order(size));
 | 
			
		||||
	if (!page)
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -221,7 +221,6 @@ int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
 | 
			
		|||
static int rt288x_pci_probe(struct platform_device *pdev)
 | 
			
		||||
{
 | 
			
		||||
	void __iomem *io_map_base;
 | 
			
		||||
	int i;
 | 
			
		||||
 | 
			
		||||
	rt2880_pci_base = ioremap_nocache(RT2880_PCI_BASE, PAGE_SIZE);
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -39,7 +39,6 @@ extern void msp_serial_setup(void);
 | 
			
		|||
void msp7120_reset(void)
 | 
			
		||||
{
 | 
			
		||||
	void *start, *end, *iptr;
 | 
			
		||||
	register int i;
 | 
			
		||||
 | 
			
		||||
	/* Diasble all interrupts */
 | 
			
		||||
	local_irq_disable();
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -26,7 +26,7 @@ static inline void kb_wait(void)
 | 
			
		|||
/* XXX This ends up at the ARC firmware prompt ...  */
 | 
			
		||||
void sni_machine_restart(char *command)
 | 
			
		||||
{
 | 
			
		||||
	int i, j;
 | 
			
		||||
	int i;
 | 
			
		||||
 | 
			
		||||
	/* This does a normal via the keyboard controller like a PC.
 | 
			
		||||
	   We can do that easier ...  */
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -26,8 +26,8 @@ aflags-vdso := $(ccflags-vdso) \
 | 
			
		|||
# the comments on that file.
 | 
			
		||||
#
 | 
			
		||||
ifndef CONFIG_CPU_MIPSR6
 | 
			
		||||
  ifeq ($(call ld-ifversion, -gt, 22400000, y),)
 | 
			
		||||
    $(warning MIPS VDSO requires binutils > 2.24)
 | 
			
		||||
  ifeq ($(call ld-ifversion, -lt, 22500000, y),)
 | 
			
		||||
    $(warning MIPS VDSO requires binutils >= 2.25)
 | 
			
		||||
    obj-vdso-y := $(filter-out gettimeofday.o, $(obj-vdso-y))
 | 
			
		||||
    ccflags-vdso += -DDISABLE_MIPS_VDSO
 | 
			
		||||
  endif
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -372,7 +372,8 @@ static inline pte_t pte_mkspecial(pte_t pte)	{ return pte; }
 | 
			
		|||
 */
 | 
			
		||||
#ifdef CONFIG_HUGETLB_PAGE
 | 
			
		||||
#define pte_huge(pte)           (pte_val(pte) & _PAGE_HUGE)
 | 
			
		||||
#define pte_mkhuge(pte)         (__pte(pte_val(pte) | _PAGE_HUGE))
 | 
			
		||||
#define pte_mkhuge(pte)         (__pte(pte_val(pte) | \
 | 
			
		||||
				 (parisc_requires_coherency() ? 0 : _PAGE_HUGE)))
 | 
			
		||||
#else
 | 
			
		||||
#define pte_huge(pte)           (0)
 | 
			
		||||
#define pte_mkhuge(pte)         (pte)
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -360,8 +360,9 @@
 | 
			
		|||
#define __NR_execveat		(__NR_Linux + 342)
 | 
			
		||||
#define __NR_membarrier		(__NR_Linux + 343)
 | 
			
		||||
#define __NR_userfaultfd	(__NR_Linux + 344)
 | 
			
		||||
#define __NR_mlock2		(__NR_Linux + 345)
 | 
			
		||||
 | 
			
		||||
#define __NR_Linux_syscalls	(__NR_userfaultfd + 1)
 | 
			
		||||
#define __NR_Linux_syscalls	(__NR_mlock2 + 1)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
#define __IGNORE_select		/* newselect */
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -171,24 +171,6 @@ void pcibios_set_master(struct pci_dev *dev)
 | 
			
		|||
}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
void __init pcibios_init_bus(struct pci_bus *bus)
 | 
			
		||||
{
 | 
			
		||||
	struct pci_dev *dev = bus->self;
 | 
			
		||||
	unsigned short bridge_ctl;
 | 
			
		||||
 | 
			
		||||
	/* We deal only with pci controllers and pci-pci bridges. */
 | 
			
		||||
	if (!dev || (dev->class >> 8) != PCI_CLASS_BRIDGE_PCI)
 | 
			
		||||
		return;
 | 
			
		||||
 | 
			
		||||
	/* PCI-PCI bridge - set the cache line and default latency
 | 
			
		||||
	   (32) for primary and secondary buses. */
 | 
			
		||||
	pci_write_config_byte(dev, PCI_SEC_LATENCY_TIMER, 32);
 | 
			
		||||
 | 
			
		||||
	pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &bridge_ctl);
 | 
			
		||||
	bridge_ctl |= PCI_BRIDGE_CTL_PARITY | PCI_BRIDGE_CTL_SERR;
 | 
			
		||||
	pci_write_config_word(dev, PCI_BRIDGE_CONTROL, bridge_ctl);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
 * pcibios align resources() is called every time generic PCI code
 | 
			
		||||
 * wants to generate a new address. The process of looking for
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -435,6 +435,55 @@ handle_signal(struct ksignal *ksig, struct pt_regs *regs, int in_syscall)
 | 
			
		|||
		regs->gr[28]);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
 * Check how the syscall number gets loaded into %r20 within
 | 
			
		||||
 * the delay branch in userspace and adjust as needed.
 | 
			
		||||
 */
 | 
			
		||||
 | 
			
		||||
static void check_syscallno_in_delay_branch(struct pt_regs *regs)
 | 
			
		||||
{
 | 
			
		||||
	u32 opcode, source_reg;
 | 
			
		||||
	u32 __user *uaddr;
 | 
			
		||||
	int err;
 | 
			
		||||
 | 
			
		||||
	/* Usually we don't have to restore %r20 (the system call number)
 | 
			
		||||
	 * because it gets loaded in the delay slot of the branch external
 | 
			
		||||
	 * instruction via the ldi instruction.
 | 
			
		||||
	 * In some cases a register-to-register copy instruction might have
 | 
			
		||||
	 * been used instead, in which case we need to copy the syscall
 | 
			
		||||
	 * number into the source register before returning to userspace.
 | 
			
		||||
	 */
 | 
			
		||||
 | 
			
		||||
	/* A syscall is just a branch, so all we have to do is fiddle the
 | 
			
		||||
	 * return pointer so that the ble instruction gets executed again.
 | 
			
		||||
	 */
 | 
			
		||||
	regs->gr[31] -= 8; /* delayed branching */
 | 
			
		||||
 | 
			
		||||
	/* Get assembler opcode of code in delay branch */
 | 
			
		||||
	uaddr = (unsigned int *) ((regs->gr[31] & ~3) + 4);
 | 
			
		||||
	err = get_user(opcode, uaddr);
 | 
			
		||||
	if (err)
 | 
			
		||||
		return;
 | 
			
		||||
 | 
			
		||||
	/* Check if delay branch uses "ldi int,%r20" */
 | 
			
		||||
	if ((opcode & 0xffff0000) == 0x34140000)
 | 
			
		||||
		return;	/* everything ok, just return */
 | 
			
		||||
 | 
			
		||||
	/* Check if delay branch uses "nop" */
 | 
			
		||||
	if (opcode == INSN_NOP)
 | 
			
		||||
		return;
 | 
			
		||||
 | 
			
		||||
	/* Check if delay branch uses "copy %rX,%r20" */
 | 
			
		||||
	if ((opcode & 0xffe0ffff) == 0x08000254) {
 | 
			
		||||
		source_reg = (opcode >> 16) & 31;
 | 
			
		||||
		regs->gr[source_reg] = regs->gr[20];
 | 
			
		||||
		return;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	pr_warn("syscall restart: %s (pid %d): unexpected opcode 0x%08x\n",
 | 
			
		||||
		current->comm, task_pid_nr(current), opcode);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static inline void
 | 
			
		||||
syscall_restart(struct pt_regs *regs, struct k_sigaction *ka)
 | 
			
		||||
{
 | 
			
		||||
| 
						 | 
				
			
			@ -457,10 +506,7 @@ syscall_restart(struct pt_regs *regs, struct k_sigaction *ka)
 | 
			
		|||
		}
 | 
			
		||||
		/* fallthrough */
 | 
			
		||||
	case -ERESTARTNOINTR:
 | 
			
		||||
		/* A syscall is just a branch, so all
 | 
			
		||||
		 * we have to do is fiddle the return pointer.
 | 
			
		||||
		 */
 | 
			
		||||
		regs->gr[31] -= 8; /* delayed branching */
 | 
			
		||||
		check_syscallno_in_delay_branch(regs);
 | 
			
		||||
		break;
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
| 
						 | 
				
			
			@ -510,15 +556,9 @@ insert_restart_trampoline(struct pt_regs *regs)
 | 
			
		|||
	}
 | 
			
		||||
	case -ERESTARTNOHAND:
 | 
			
		||||
	case -ERESTARTSYS:
 | 
			
		||||
	case -ERESTARTNOINTR: {
 | 
			
		||||
		/* Hooray for delayed branching.  We don't
 | 
			
		||||
		 * have to restore %r20 (the system call
 | 
			
		||||
		 * number) because it gets loaded in the delay
 | 
			
		||||
		 * slot of the branch external instruction.
 | 
			
		||||
		 */
 | 
			
		||||
		regs->gr[31] -= 8;
 | 
			
		||||
	case -ERESTARTNOINTR:
 | 
			
		||||
		check_syscallno_in_delay_branch(regs);
 | 
			
		||||
		return;
 | 
			
		||||
	}
 | 
			
		||||
	default:
 | 
			
		||||
		break;
 | 
			
		||||
	}
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -440,6 +440,7 @@
 | 
			
		|||
	ENTRY_COMP(execveat)
 | 
			
		||||
	ENTRY_SAME(membarrier)
 | 
			
		||||
	ENTRY_SAME(userfaultfd)
 | 
			
		||||
	ENTRY_SAME(mlock2)		/* 345 */
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
.ifne (. - 90b) - (__NR_Linux_syscalls * (91b - 90b))
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -227,23 +227,15 @@
 | 
			
		|||
				reg = <0x520 0x20>;
 | 
			
		||||
 | 
			
		||||
				phy0: ethernet-phy@1f {
 | 
			
		||||
					interrupt-parent = <&mpic>;
 | 
			
		||||
					interrupts = <10 1>;
 | 
			
		||||
					reg = <0x1f>;
 | 
			
		||||
				};
 | 
			
		||||
				phy1: ethernet-phy@0 {
 | 
			
		||||
					interrupt-parent = <&mpic>;
 | 
			
		||||
					interrupts = <10 1>;
 | 
			
		||||
					reg = <0>;
 | 
			
		||||
				};
 | 
			
		||||
				phy2: ethernet-phy@1 {
 | 
			
		||||
					interrupt-parent = <&mpic>;
 | 
			
		||||
					interrupts = <10 1>;
 | 
			
		||||
					reg = <1>;
 | 
			
		||||
				};
 | 
			
		||||
				phy3: ethernet-phy@2 {
 | 
			
		||||
					interrupt-parent = <&mpic>;
 | 
			
		||||
					interrupts = <10 1>;
 | 
			
		||||
					reg = <2>;
 | 
			
		||||
				};
 | 
			
		||||
				tbi0: tbi-phy@11 {
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -370,16 +370,16 @@ COMPAT_SYS(execveat)
 | 
			
		|||
PPC64ONLY(switch_endian)
 | 
			
		||||
SYSCALL_SPU(userfaultfd)
 | 
			
		||||
SYSCALL_SPU(membarrier)
 | 
			
		||||
SYSCALL(semop)
 | 
			
		||||
SYSCALL(semget)
 | 
			
		||||
COMPAT_SYS(semctl)
 | 
			
		||||
COMPAT_SYS(semtimedop)
 | 
			
		||||
COMPAT_SYS(msgsnd)
 | 
			
		||||
COMPAT_SYS(msgrcv)
 | 
			
		||||
SYSCALL(msgget)
 | 
			
		||||
COMPAT_SYS(msgctl)
 | 
			
		||||
COMPAT_SYS(shmat)
 | 
			
		||||
SYSCALL(shmdt)
 | 
			
		||||
SYSCALL(shmget)
 | 
			
		||||
COMPAT_SYS(shmctl)
 | 
			
		||||
SYSCALL(ni_syscall)
 | 
			
		||||
SYSCALL(ni_syscall)
 | 
			
		||||
SYSCALL(ni_syscall)
 | 
			
		||||
SYSCALL(ni_syscall)
 | 
			
		||||
SYSCALL(ni_syscall)
 | 
			
		||||
SYSCALL(ni_syscall)
 | 
			
		||||
SYSCALL(ni_syscall)
 | 
			
		||||
SYSCALL(ni_syscall)
 | 
			
		||||
SYSCALL(ni_syscall)
 | 
			
		||||
SYSCALL(ni_syscall)
 | 
			
		||||
SYSCALL(ni_syscall)
 | 
			
		||||
SYSCALL(ni_syscall)
 | 
			
		||||
SYSCALL(mlock2)
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -388,18 +388,6 @@
 | 
			
		|||
#define __NR_switch_endian	363
 | 
			
		||||
#define __NR_userfaultfd	364
 | 
			
		||||
#define __NR_membarrier		365
 | 
			
		||||
#define __NR_semop		366
 | 
			
		||||
#define __NR_semget		367
 | 
			
		||||
#define __NR_semctl		368
 | 
			
		||||
#define __NR_semtimedop		369
 | 
			
		||||
#define __NR_msgsnd		370
 | 
			
		||||
#define __NR_msgrcv		371
 | 
			
		||||
#define __NR_msgget		372
 | 
			
		||||
#define __NR_msgctl		373
 | 
			
		||||
#define __NR_shmat		374
 | 
			
		||||
#define __NR_shmdt		375
 | 
			
		||||
#define __NR_shmget		376
 | 
			
		||||
#define __NR_shmctl		377
 | 
			
		||||
#define __NR_mlock2		378
 | 
			
		||||
 | 
			
		||||
#endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -590,16 +590,10 @@ static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus)
 | 
			
		|||
	eeh_ops->configure_bridge(pe);
 | 
			
		||||
	eeh_pe_restore_bars(pe);
 | 
			
		||||
 | 
			
		||||
	/*
 | 
			
		||||
	 * If it's PHB PE, the frozen state on all available PEs should have
 | 
			
		||||
	 * been cleared by the PHB reset. Otherwise, we unfreeze the PE and its
 | 
			
		||||
	 * child PEs because they might be in frozen state.
 | 
			
		||||
	 */
 | 
			
		||||
	if (!(pe->type & EEH_PE_PHB)) {
 | 
			
		||||
		rc = eeh_clear_pe_frozen_state(pe, false);
 | 
			
		||||
		if (rc)
 | 
			
		||||
			return rc;
 | 
			
		||||
	}
 | 
			
		||||
	/* Clear frozen state */
 | 
			
		||||
	rc = eeh_clear_pe_frozen_state(pe, false);
 | 
			
		||||
	if (rc)
 | 
			
		||||
		return rc;
 | 
			
		||||
 | 
			
		||||
	/* Give the system 5 seconds to finish running the user-space
 | 
			
		||||
	 * hotplug shutdown scripts, e.g. ifdown for ethernet.  Yes,
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -224,6 +224,12 @@ static void kvmppc_core_vcpu_put_hv(struct kvm_vcpu *vcpu)
 | 
			
		|||
 | 
			
		||||
static void kvmppc_set_msr_hv(struct kvm_vcpu *vcpu, u64 msr)
 | 
			
		||||
{
 | 
			
		||||
	/*
 | 
			
		||||
	 * Check for illegal transactional state bit combination
 | 
			
		||||
	 * and if we find it, force the TS field to a safe state.
 | 
			
		||||
	 */
 | 
			
		||||
	if ((msr & MSR_TS_MASK) == MSR_TS_MASK)
 | 
			
		||||
		msr &= ~MSR_TS_MASK;
 | 
			
		||||
	vcpu->arch.shregs.msr = msr;
 | 
			
		||||
	kvmppc_end_cede(vcpu);
 | 
			
		||||
}
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -43,11 +43,34 @@ static unsigned int opal_irq_count;
 | 
			
		|||
static unsigned int *opal_irqs;
 | 
			
		||||
 | 
			
		||||
static void opal_handle_irq_work(struct irq_work *work);
 | 
			
		||||
static __be64 last_outstanding_events;
 | 
			
		||||
static u64 last_outstanding_events;
 | 
			
		||||
static struct irq_work opal_event_irq_work = {
 | 
			
		||||
	.func = opal_handle_irq_work,
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
void opal_handle_events(uint64_t events)
 | 
			
		||||
{
 | 
			
		||||
	int virq, hwirq = 0;
 | 
			
		||||
	u64 mask = opal_event_irqchip.mask;
 | 
			
		||||
 | 
			
		||||
	if (!in_irq() && (events & mask)) {
 | 
			
		||||
		last_outstanding_events = events;
 | 
			
		||||
		irq_work_queue(&opal_event_irq_work);
 | 
			
		||||
		return;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	while (events & mask) {
 | 
			
		||||
		hwirq = fls64(events) - 1;
 | 
			
		||||
		if (BIT_ULL(hwirq) & mask) {
 | 
			
		||||
			virq = irq_find_mapping(opal_event_irqchip.domain,
 | 
			
		||||
						hwirq);
 | 
			
		||||
			if (virq)
 | 
			
		||||
				generic_handle_irq(virq);
 | 
			
		||||
		}
 | 
			
		||||
		events &= ~BIT_ULL(hwirq);
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static void opal_event_mask(struct irq_data *d)
 | 
			
		||||
{
 | 
			
		||||
	clear_bit(d->hwirq, &opal_event_irqchip.mask);
 | 
			
		||||
| 
						 | 
				
			
			@ -55,9 +78,21 @@ static void opal_event_mask(struct irq_data *d)
 | 
			
		|||
 | 
			
		||||
static void opal_event_unmask(struct irq_data *d)
 | 
			
		||||
{
 | 
			
		||||
	__be64 events;
 | 
			
		||||
 | 
			
		||||
	set_bit(d->hwirq, &opal_event_irqchip.mask);
 | 
			
		||||
 | 
			
		||||
	opal_poll_events(&last_outstanding_events);
 | 
			
		||||
	opal_poll_events(&events);
 | 
			
		||||
	last_outstanding_events = be64_to_cpu(events);
 | 
			
		||||
 | 
			
		||||
	/*
 | 
			
		||||
	 * We can't just handle the events now with opal_handle_events().
 | 
			
		||||
	 * If we did we would deadlock when opal_event_unmask() is called from
 | 
			
		||||
	 * handle_level_irq() with the irq descriptor lock held, because
 | 
			
		||||
	 * calling opal_handle_events() would call generic_handle_irq() and
 | 
			
		||||
	 * then handle_level_irq() which would try to take the descriptor lock
 | 
			
		||||
	 * again. Instead queue the events for later.
 | 
			
		||||
	 */
 | 
			
		||||
	if (last_outstanding_events & opal_event_irqchip.mask)
 | 
			
		||||
		/* Need to retrigger the interrupt */
 | 
			
		||||
		irq_work_queue(&opal_event_irq_work);
 | 
			
		||||
| 
						 | 
				
			
			@ -96,29 +131,6 @@ static int opal_event_map(struct irq_domain *d, unsigned int irq,
 | 
			
		|||
	return 0;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void opal_handle_events(uint64_t events)
 | 
			
		||||
{
 | 
			
		||||
	int virq, hwirq = 0;
 | 
			
		||||
	u64 mask = opal_event_irqchip.mask;
 | 
			
		||||
 | 
			
		||||
	if (!in_irq() && (events & mask)) {
 | 
			
		||||
		last_outstanding_events = events;
 | 
			
		||||
		irq_work_queue(&opal_event_irq_work);
 | 
			
		||||
		return;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	while (events & mask) {
 | 
			
		||||
		hwirq = fls64(events) - 1;
 | 
			
		||||
		if (BIT_ULL(hwirq) & mask) {
 | 
			
		||||
			virq = irq_find_mapping(opal_event_irqchip.domain,
 | 
			
		||||
						hwirq);
 | 
			
		||||
			if (virq)
 | 
			
		||||
				generic_handle_irq(virq);
 | 
			
		||||
		}
 | 
			
		||||
		events &= ~BIT_ULL(hwirq);
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static irqreturn_t opal_interrupt(int irq, void *data)
 | 
			
		||||
{
 | 
			
		||||
	__be64 events;
 | 
			
		||||
| 
						 | 
				
			
			@ -131,7 +143,7 @@ static irqreturn_t opal_interrupt(int irq, void *data)
 | 
			
		|||
 | 
			
		||||
static void opal_handle_irq_work(struct irq_work *work)
 | 
			
		||||
{
 | 
			
		||||
	opal_handle_events(be64_to_cpu(last_outstanding_events));
 | 
			
		||||
	opal_handle_events(last_outstanding_events);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static int opal_event_match(struct irq_domain *h, struct device_node *node,
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -278,7 +278,7 @@ static void opal_handle_message(void)
 | 
			
		|||
 | 
			
		||||
	/* Sanity check */
 | 
			
		||||
	if (type >= OPAL_MSG_TYPE_MAX) {
 | 
			
		||||
		pr_warning("%s: Unknown message type: %u\n", __func__, type);
 | 
			
		||||
		pr_warn_once("%s: Unknown message type: %u\n", __func__, type);
 | 
			
		||||
		return;
 | 
			
		||||
	}
 | 
			
		||||
	opal_message_do_notify(type, (void *)&msg);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -1920,16 +1920,23 @@ static int print_insn(char *buffer, unsigned char *code, unsigned long addr)
 | 
			
		|||
			}
 | 
			
		||||
			if (separator)
 | 
			
		||||
				ptr += sprintf(ptr, "%c", separator);
 | 
			
		||||
			/*
 | 
			
		||||
			 * Use four '%' characters below because of the
 | 
			
		||||
			 * following two conversions:
 | 
			
		||||
			 *
 | 
			
		||||
			 *  1) sprintf: %%%%r -> %%r
 | 
			
		||||
			 *  2) printk : %%r   -> %r
 | 
			
		||||
			 */
 | 
			
		||||
			if (operand->flags & OPERAND_GPR)
 | 
			
		||||
				ptr += sprintf(ptr, "%%r%i", value);
 | 
			
		||||
				ptr += sprintf(ptr, "%%%%r%i", value);
 | 
			
		||||
			else if (operand->flags & OPERAND_FPR)
 | 
			
		||||
				ptr += sprintf(ptr, "%%f%i", value);
 | 
			
		||||
				ptr += sprintf(ptr, "%%%%f%i", value);
 | 
			
		||||
			else if (operand->flags & OPERAND_AR)
 | 
			
		||||
				ptr += sprintf(ptr, "%%a%i", value);
 | 
			
		||||
				ptr += sprintf(ptr, "%%%%a%i", value);
 | 
			
		||||
			else if (operand->flags & OPERAND_CR)
 | 
			
		||||
				ptr += sprintf(ptr, "%%c%i", value);
 | 
			
		||||
				ptr += sprintf(ptr, "%%%%c%i", value);
 | 
			
		||||
			else if (operand->flags & OPERAND_VR)
 | 
			
		||||
				ptr += sprintf(ptr, "%%v%i", value);
 | 
			
		||||
				ptr += sprintf(ptr, "%%%%v%i", value);
 | 
			
		||||
			else if (operand->flags & OPERAND_PCREL)
 | 
			
		||||
				ptr += sprintf(ptr, "%lx", (signed int) value
 | 
			
		||||
								      + addr);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -278,7 +278,7 @@
 | 
			
		|||
#define __NR_fsetxattr		256
 | 
			
		||||
#define __NR_getxattr		257
 | 
			
		||||
#define __NR_lgetxattr		258
 | 
			
		||||
#define __NR_fgetxattr		269
 | 
			
		||||
#define __NR_fgetxattr		259
 | 
			
		||||
#define __NR_listxattr		260
 | 
			
		||||
#define __NR_llistxattr		261
 | 
			
		||||
#define __NR_flistxattr		262
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -10,7 +10,7 @@
 | 
			
		|||
 *  Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
 | 
			
		||||
 *  Copyright (C) 2009 Jaswinder Singh Rajput
 | 
			
		||||
 *  Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
 | 
			
		||||
 *  Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
 | 
			
		||||
 *  Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra
 | 
			
		||||
 *  Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
 | 
			
		||||
 *
 | 
			
		||||
 * ppc:
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -95,6 +95,7 @@
 | 
			
		|||
 * really available.  So we simply advertise only "crypto" support.
 | 
			
		||||
 */
 | 
			
		||||
#define HWCAP_SPARC_CRYPTO	0x04000000 /* CRYPTO insns available */
 | 
			
		||||
#define HWCAP_SPARC_ADI		0x08000000 /* ADI available */
 | 
			
		||||
 | 
			
		||||
#define CORE_DUMP_USE_REGSET
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -417,8 +417,9 @@
 | 
			
		|||
#define __NR_bpf		349
 | 
			
		||||
#define __NR_execveat		350
 | 
			
		||||
#define __NR_membarrier		351
 | 
			
		||||
#define __NR_userfaultfd	352
 | 
			
		||||
 | 
			
		||||
#define NR_syscalls		352
 | 
			
		||||
#define NR_syscalls		353
 | 
			
		||||
 | 
			
		||||
/* Bitmask values returned from kern_features system call.  */
 | 
			
		||||
#define KERN_FEATURE_MIXED_MODE_STACK	0x00000001
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -946,6 +946,12 @@ ENTRY(__retl_one)
 | 
			
		|||
	 mov	1, %o0
 | 
			
		||||
ENDPROC(__retl_one)
 | 
			
		||||
 | 
			
		||||
ENTRY(__retl_one_fp)
 | 
			
		||||
	VISExitHalf
 | 
			
		||||
	retl
 | 
			
		||||
	 mov	1, %o0
 | 
			
		||||
ENDPROC(__retl_one_fp)
 | 
			
		||||
 | 
			
		||||
ENTRY(__ret_one_asi)
 | 
			
		||||
	wr	%g0, ASI_AIUS, %asi
 | 
			
		||||
	ret
 | 
			
		||||
| 
						 | 
				
			
			@ -958,6 +964,13 @@ ENTRY(__retl_one_asi)
 | 
			
		|||
	 mov	1, %o0
 | 
			
		||||
ENDPROC(__retl_one_asi)
 | 
			
		||||
 | 
			
		||||
ENTRY(__retl_one_asi_fp)
 | 
			
		||||
	wr	%g0, ASI_AIUS, %asi
 | 
			
		||||
	VISExitHalf
 | 
			
		||||
	retl
 | 
			
		||||
	 mov	1, %o0
 | 
			
		||||
ENDPROC(__retl_one_asi_fp)
 | 
			
		||||
 | 
			
		||||
ENTRY(__retl_o1)
 | 
			
		||||
	retl
 | 
			
		||||
	 mov	%o1, %o0
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -9,7 +9,7 @@
 | 
			
		|||
 *  Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
 | 
			
		||||
 *  Copyright (C) 2009 Jaswinder Singh Rajput
 | 
			
		||||
 *  Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
 | 
			
		||||
 *  Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
 | 
			
		||||
 *  Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra
 | 
			
		||||
 */
 | 
			
		||||
 | 
			
		||||
#include <linux/perf_event.h>
 | 
			
		||||
| 
						 | 
				
			
			@ -1828,11 +1828,18 @@ static void perf_callchain_user_32(struct perf_callchain_entry *entry,
 | 
			
		|||
void
 | 
			
		||||
perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
 | 
			
		||||
{
 | 
			
		||||
	u64 saved_fault_address = current_thread_info()->fault_address;
 | 
			
		||||
	u8 saved_fault_code = get_thread_fault_code();
 | 
			
		||||
	mm_segment_t old_fs;
 | 
			
		||||
 | 
			
		||||
	perf_callchain_store(entry, regs->tpc);
 | 
			
		||||
 | 
			
		||||
	if (!current->mm)
 | 
			
		||||
		return;
 | 
			
		||||
 | 
			
		||||
	old_fs = get_fs();
 | 
			
		||||
	set_fs(USER_DS);
 | 
			
		||||
 | 
			
		||||
	flushw_user();
 | 
			
		||||
 | 
			
		||||
	pagefault_disable();
 | 
			
		||||
| 
						 | 
				
			
			@ -1843,4 +1850,8 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
 | 
			
		|||
		perf_callchain_user_64(entry, regs);
 | 
			
		||||
 | 
			
		||||
	pagefault_enable();
 | 
			
		||||
 | 
			
		||||
	set_fs(old_fs);
 | 
			
		||||
	set_thread_fault_code(saved_fault_code);
 | 
			
		||||
	current_thread_info()->fault_address = saved_fault_address;
 | 
			
		||||
}
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -73,7 +73,13 @@ rtrap_nmi:	ldx			[%sp + PTREGS_OFF + PT_V9_TSTATE], %l1
 | 
			
		|||
		andn			%l1, %l4, %l1
 | 
			
		||||
		srl			%l4, 20, %l4
 | 
			
		||||
		ba,pt			%xcc, rtrap_no_irq_enable
 | 
			
		||||
		 wrpr			%l4, %pil
 | 
			
		||||
		nop
 | 
			
		||||
		/* Do not actually set the %pil here.  We will do that
 | 
			
		||||
		 * below after we clear PSTATE_IE in the %pstate register.
 | 
			
		||||
		 * If we re-enable interrupts here, we can recurse down
 | 
			
		||||
		 * the hardirq stack potentially endlessly, causing a
 | 
			
		||||
		 * stack overflow.
 | 
			
		||||
		 */
 | 
			
		||||
 | 
			
		||||
		.align			64
 | 
			
		||||
		.globl			rtrap_irq, rtrap, irqsz_patchme, rtrap_xcall
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -380,7 +380,8 @@ static const char *hwcaps[] = {
 | 
			
		|||
	 */
 | 
			
		||||
	"mul32", "div32", "fsmuld", "v8plus", "popc", "vis", "vis2",
 | 
			
		||||
	"ASIBlkInit", "fmaf", "vis3", "hpc", "random", "trans", "fjfmau",
 | 
			
		||||
	"ima", "cspare", "pause", "cbcond",
 | 
			
		||||
	"ima", "cspare", "pause", "cbcond", NULL /*reserved for crypto */,
 | 
			
		||||
	"adp",
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
static const char *crypto_hwcaps[] = {
 | 
			
		||||
| 
						 | 
				
			
			@ -396,7 +397,7 @@ void cpucap_info(struct seq_file *m)
 | 
			
		|||
	seq_puts(m, "cpucaps\t\t: ");
 | 
			
		||||
	for (i = 0; i < ARRAY_SIZE(hwcaps); i++) {
 | 
			
		||||
		unsigned long bit = 1UL << i;
 | 
			
		||||
		if (caps & bit) {
 | 
			
		||||
		if (hwcaps[i] && (caps & bit)) {
 | 
			
		||||
			seq_printf(m, "%s%s",
 | 
			
		||||
				   printed ? "," : "", hwcaps[i]);
 | 
			
		||||
			printed++;
 | 
			
		||||
| 
						 | 
				
			
			@ -450,7 +451,7 @@ static void __init report_hwcaps(unsigned long caps)
 | 
			
		|||
 | 
			
		||||
	for (i = 0; i < ARRAY_SIZE(hwcaps); i++) {
 | 
			
		||||
		unsigned long bit = 1UL << i;
 | 
			
		||||
		if (caps & bit)
 | 
			
		||||
		if (hwcaps[i] && (caps & bit))
 | 
			
		||||
			report_one_hwcap(&printed, hwcaps[i]);
 | 
			
		||||
	}
 | 
			
		||||
	if (caps & HWCAP_SPARC_CRYPTO)
 | 
			
		||||
| 
						 | 
				
			
			@ -485,7 +486,7 @@ static unsigned long __init mdesc_cpu_hwcap_list(void)
 | 
			
		|||
		for (i = 0; i < ARRAY_SIZE(hwcaps); i++) {
 | 
			
		||||
			unsigned long bit = 1UL << i;
 | 
			
		||||
 | 
			
		||||
			if (!strcmp(prop, hwcaps[i])) {
 | 
			
		||||
			if (hwcaps[i] && !strcmp(prop, hwcaps[i])) {
 | 
			
		||||
				caps |= bit;
 | 
			
		||||
				break;
 | 
			
		||||
			}
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -87,4 +87,4 @@ sys_call_table:
 | 
			
		|||
/*335*/	.long sys_syncfs, sys_sendmmsg, sys_setns, sys_process_vm_readv, sys_process_vm_writev
 | 
			
		||||
/*340*/	.long sys_ni_syscall, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr
 | 
			
		||||
/*345*/	.long sys_renameat2, sys_seccomp, sys_getrandom, sys_memfd_create, sys_bpf
 | 
			
		||||
/*350*/	.long sys_execveat, sys_membarrier
 | 
			
		||||
/*350*/	.long sys_execveat, sys_membarrier, sys_userfaultfd
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -88,7 +88,7 @@ sys_call_table32:
 | 
			
		|||
	.word sys_syncfs, compat_sys_sendmmsg, sys_setns, compat_sys_process_vm_readv, compat_sys_process_vm_writev
 | 
			
		||||
/*340*/	.word sys_kern_features, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr
 | 
			
		||||
	.word sys32_renameat2, sys_seccomp, sys_getrandom, sys_memfd_create, sys_bpf
 | 
			
		||||
/*350*/	.word sys32_execveat, sys_membarrier
 | 
			
		||||
/*350*/	.word sys32_execveat, sys_membarrier, sys_userfaultfd
 | 
			
		||||
 | 
			
		||||
#endif /* CONFIG_COMPAT */
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -168,4 +168,4 @@ sys_call_table:
 | 
			
		|||
	.word sys_syncfs, sys_sendmmsg, sys_setns, sys_process_vm_readv, sys_process_vm_writev
 | 
			
		||||
/*340*/	.word sys_kern_features, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr
 | 
			
		||||
	.word sys_renameat2, sys_seccomp, sys_getrandom, sys_memfd_create, sys_bpf
 | 
			
		||||
/*350*/	.word sys64_execveat, sys_membarrier
 | 
			
		||||
/*350*/	.word sys64_execveat, sys_membarrier, sys_userfaultfd
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
Some files were not shown because too many files have changed in this diff Show more
		Loading…
	
	Add table
		
		Reference in a new issue