Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Three sets of overlapping changes.  Nothing serious.

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2016-10-02 21:17:07 -04:00
commit b50afd203a
141 changed files with 899 additions and 605 deletions

View file

@ -69,6 +69,7 @@ James Bottomley <jejb@mulgrave.(none)>
James Bottomley <jejb@titanic.il.steeleye.com> James Bottomley <jejb@titanic.il.steeleye.com>
James E Wilson <wilson@specifix.com> James E Wilson <wilson@specifix.com>
James Ketrenos <jketreno@io.(none)> James Ketrenos <jketreno@io.(none)>
Javi Merino <javi.merino@kernel.org> <javi.merino@arm.com>
<javier@osg.samsung.com> <javier.martinez@collabora.co.uk> <javier@osg.samsung.com> <javier.martinez@collabora.co.uk>
Jean Tourrilhes <jt@hpl.hp.com> Jean Tourrilhes <jt@hpl.hp.com>
Jeff Garzik <jgarzik@pretzel.yyz.us> Jeff Garzik <jgarzik@pretzel.yyz.us>

View file

@ -13,6 +13,7 @@ Required properties:
- touchscreen-size-y : See touchscreen.txt - touchscreen-size-y : See touchscreen.txt
Optional properties: Optional properties:
- firmware-name : File basename (string) for board specific firmware
- touchscreen-inverted-x : See touchscreen.txt - touchscreen-inverted-x : See touchscreen.txt
- touchscreen-inverted-y : See touchscreen.txt - touchscreen-inverted-y : See touchscreen.txt
- touchscreen-swapped-x-y : See touchscreen.txt - touchscreen-swapped-x-y : See touchscreen.txt

View file

@ -8753,7 +8753,7 @@ F: drivers/oprofile/
F: include/linux/oprofile.h F: include/linux/oprofile.h
ORACLE CLUSTER FILESYSTEM 2 (OCFS2) ORACLE CLUSTER FILESYSTEM 2 (OCFS2)
M: Mark Fasheh <mfasheh@suse.com> M: Mark Fasheh <mfasheh@versity.com>
M: Joel Becker <jlbec@evilplan.org> M: Joel Becker <jlbec@evilplan.org>
L: ocfs2-devel@oss.oracle.com (moderated for non-subscribers) L: ocfs2-devel@oss.oracle.com (moderated for non-subscribers)
W: http://ocfs2.wiki.kernel.org W: http://ocfs2.wiki.kernel.org
@ -11641,7 +11641,7 @@ F: Documentation/devicetree/bindings/thermal/
THERMAL/CPU_COOLING THERMAL/CPU_COOLING
M: Amit Daniel Kachhap <amit.kachhap@gmail.com> M: Amit Daniel Kachhap <amit.kachhap@gmail.com>
M: Viresh Kumar <viresh.kumar@linaro.org> M: Viresh Kumar <viresh.kumar@linaro.org>
M: Javi Merino <javi.merino@arm.com> M: Javi Merino <javi.merino@kernel.org>
L: linux-pm@vger.kernel.org L: linux-pm@vger.kernel.org
S: Supported S: Supported
F: Documentation/thermal/cpu-cooling-api.txt F: Documentation/thermal/cpu-cooling-api.txt

View file

@ -1,7 +1,7 @@
VERSION = 4 VERSION = 4
PATCHLEVEL = 8 PATCHLEVEL = 8
SUBLEVEL = 0 SUBLEVEL = 0
EXTRAVERSION = -rc7 EXTRAVERSION =
NAME = Psychotic Stoned Sheep NAME = Psychotic Stoned Sheep
# *DOCUMENTATION* # *DOCUMENTATION*

View file

@ -779,7 +779,7 @@ __armv7_mmu_cache_on:
orrne r0, r0, #1 @ MMU enabled orrne r0, r0, #1 @ MMU enabled
movne r1, #0xfffffffd @ domain 0 = client movne r1, #0xfffffffd @ domain 0 = client
bic r6, r6, #1 << 31 @ 32-bit translation system bic r6, r6, #1 << 31 @ 32-bit translation system
bic r6, r6, #3 << 0 @ use only ttbr0 bic r6, r6, #(7 << 0) | (1 << 4) @ use only ttbr0
mcrne p15, 0, r3, c2, c0, 0 @ load page table pointer mcrne p15, 0, r3, c2, c0, 0 @ load page table pointer
mcrne p15, 0, r1, c3, c0, 0 @ load domain access control mcrne p15, 0, r1, c3, c0, 0 @ load domain access control
mcrne p15, 0, r6, c2, c0, 2 @ load ttb control mcrne p15, 0, r6, c2, c0, 2 @ load ttb control

View file

@ -111,7 +111,7 @@ static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
/* The ARM override for dma_max_pfn() */ /* The ARM override for dma_max_pfn() */
static inline unsigned long dma_max_pfn(struct device *dev) static inline unsigned long dma_max_pfn(struct device *dev)
{ {
return PHYS_PFN_OFFSET + dma_to_pfn(dev, *dev->dma_mask); return dma_to_pfn(dev, *dev->dma_mask);
} }
#define dma_max_pfn(dev) dma_max_pfn(dev) #define dma_max_pfn(dev) dma_max_pfn(dev)

View file

@ -88,6 +88,8 @@ void __init arm_dt_init_cpu_maps(void)
return; return;
for_each_child_of_node(cpus, cpu) { for_each_child_of_node(cpus, cpu) {
const __be32 *cell;
int prop_bytes;
u32 hwid; u32 hwid;
if (of_node_cmp(cpu->type, "cpu")) if (of_node_cmp(cpu->type, "cpu"))
@ -99,7 +101,8 @@ void __init arm_dt_init_cpu_maps(void)
* properties is considered invalid to build the * properties is considered invalid to build the
* cpu_logical_map. * cpu_logical_map.
*/ */
if (of_property_read_u32(cpu, "reg", &hwid)) { cell = of_get_property(cpu, "reg", &prop_bytes);
if (!cell || prop_bytes < sizeof(*cell)) {
pr_debug(" * %s missing reg property\n", pr_debug(" * %s missing reg property\n",
cpu->full_name); cpu->full_name);
of_node_put(cpu); of_node_put(cpu);
@ -107,10 +110,15 @@ void __init arm_dt_init_cpu_maps(void)
} }
/* /*
* 8 MSBs must be set to 0 in the DT since the reg property * Bits n:24 must be set to 0 in the DT since the reg property
* defines the MPIDR[23:0]. * defines the MPIDR[23:0].
*/ */
if (hwid & ~MPIDR_HWID_BITMASK) { do {
hwid = be32_to_cpu(*cell++);
prop_bytes -= sizeof(*cell);
} while (!hwid && prop_bytes > 0);
if (prop_bytes || (hwid & ~MPIDR_HWID_BITMASK)) {
of_node_put(cpu); of_node_put(cpu);
return; return;
} }

View file

@ -61,8 +61,6 @@
#define AARCH64_BREAK_KGDB_DYN_DBG \ #define AARCH64_BREAK_KGDB_DYN_DBG \
(AARCH64_BREAK_MON | (KGDB_DYN_DBG_BRK_IMM << 5)) (AARCH64_BREAK_MON | (KGDB_DYN_DBG_BRK_IMM << 5))
#define KGDB_DYN_BRK_INS_BYTE(x) \
((AARCH64_BREAK_KGDB_DYN_DBG >> (8 * (x))) & 0xff)
#define CACHE_FLUSH_IS_SAFE 1 #define CACHE_FLUSH_IS_SAFE 1

View file

@ -19,10 +19,13 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>. * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/ */
#include <linux/bug.h>
#include <linux/irq.h> #include <linux/irq.h>
#include <linux/kdebug.h> #include <linux/kdebug.h>
#include <linux/kgdb.h> #include <linux/kgdb.h>
#include <linux/kprobes.h> #include <linux/kprobes.h>
#include <asm/debug-monitors.h>
#include <asm/insn.h>
#include <asm/traps.h> #include <asm/traps.h>
struct dbg_reg_def_t dbg_reg_def[DBG_MAX_REG_NUM] = { struct dbg_reg_def_t dbg_reg_def[DBG_MAX_REG_NUM] = {
@ -338,15 +341,24 @@ void kgdb_arch_exit(void)
unregister_die_notifier(&kgdb_notifier); unregister_die_notifier(&kgdb_notifier);
} }
/* struct kgdb_arch arch_kgdb_ops;
* ARM instructions are always in LE.
* Break instruction is encoded in LE format int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
*/ {
struct kgdb_arch arch_kgdb_ops = { int err;
.gdb_bpt_instr = {
KGDB_DYN_BRK_INS_BYTE(0), BUILD_BUG_ON(AARCH64_INSN_SIZE != BREAK_INSTR_SIZE);
KGDB_DYN_BRK_INS_BYTE(1),
KGDB_DYN_BRK_INS_BYTE(2), err = aarch64_insn_read((void *)bpt->bpt_addr, (u32 *)bpt->saved_instr);
KGDB_DYN_BRK_INS_BYTE(3), if (err)
} return err;
};
return aarch64_insn_write((void *)bpt->bpt_addr,
(u32)AARCH64_BREAK_KGDB_DYN_DBG);
}
int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt)
{
return aarch64_insn_write((void *)bpt->bpt_addr,
*(u32 *)bpt->saved_instr);
}

View file

@ -201,12 +201,6 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle)
return ret; return ret;
} }
static void smp_store_cpu_info(unsigned int cpuid)
{
store_cpu_topology(cpuid);
numa_store_cpu_info(cpuid);
}
/* /*
* This is the secondary CPU boot entry. We're using this CPUs * This is the secondary CPU boot entry. We're using this CPUs
* idle thread stack, but a set of temporary page tables. * idle thread stack, but a set of temporary page tables.
@ -254,7 +248,7 @@ asmlinkage void secondary_start_kernel(void)
*/ */
notify_cpu_starting(cpu); notify_cpu_starting(cpu);
smp_store_cpu_info(cpu); store_cpu_topology(cpu);
/* /*
* OK, now it's safe to let the boot CPU continue. Wait for * OK, now it's safe to let the boot CPU continue. Wait for
@ -689,10 +683,13 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
{ {
int err; int err;
unsigned int cpu; unsigned int cpu;
unsigned int this_cpu;
init_cpu_topology(); init_cpu_topology();
smp_store_cpu_info(smp_processor_id()); this_cpu = smp_processor_id();
store_cpu_topology(this_cpu);
numa_store_cpu_info(this_cpu);
/* /*
* If UP is mandated by "nosmp" (which implies "maxcpus=0"), don't set * If UP is mandated by "nosmp" (which implies "maxcpus=0"), don't set
@ -719,6 +716,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
continue; continue;
set_cpu_present(cpu, true); set_cpu_present(cpu, true);
numa_store_cpu_info(cpu);
} }
} }

View file

@ -65,6 +65,7 @@ config MIPS
select ARCH_CLOCKSOURCE_DATA select ARCH_CLOCKSOURCE_DATA
select HANDLE_DOMAIN_IRQ select HANDLE_DOMAIN_IRQ
select HAVE_EXIT_THREAD select HAVE_EXIT_THREAD
select HAVE_REGS_AND_STACK_ACCESS_API
menu "Machine selection" menu "Machine selection"

View file

@ -113,42 +113,6 @@ config SPINLOCK_TEST
help help
Add several files to the debugfs to test spinlock speed. Add several files to the debugfs to test spinlock speed.
if CPU_MIPSR6
choice
prompt "Compact branch policy"
default MIPS_COMPACT_BRANCHES_OPTIMAL
config MIPS_COMPACT_BRANCHES_NEVER
bool "Never (force delay slot branches)"
help
Pass the -mcompact-branches=never flag to the compiler in order to
force it to always emit branches with delay slots, and make no use
of the compact branch instructions introduced by MIPSr6. This is
useful if you suspect there may be an issue with compact branches in
either the compiler or the CPU.
config MIPS_COMPACT_BRANCHES_OPTIMAL
bool "Optimal (use where beneficial)"
help
Pass the -mcompact-branches=optimal flag to the compiler in order for
it to make use of compact branch instructions where it deems them
beneficial, and use branches with delay slots elsewhere. This is the
default compiler behaviour, and should be used unless you have a
reason to choose otherwise.
config MIPS_COMPACT_BRANCHES_ALWAYS
bool "Always (force compact branches)"
help
Pass the -mcompact-branches=always flag to the compiler in order to
force it to always emit compact branches, making no use of branch
instructions with delay slots. This can result in more compact code
which may be beneficial in some scenarios.
endchoice
endif # CPU_MIPSR6
config SCACHE_DEBUGFS config SCACHE_DEBUGFS
bool "L2 cache debugfs entries" bool "L2 cache debugfs entries"
depends on DEBUG_FS depends on DEBUG_FS

View file

@ -203,10 +203,6 @@ endif
toolchain-virt := $(call cc-option-yn,$(mips-cflags) -mvirt) toolchain-virt := $(call cc-option-yn,$(mips-cflags) -mvirt)
cflags-$(toolchain-virt) += -DTOOLCHAIN_SUPPORTS_VIRT cflags-$(toolchain-virt) += -DTOOLCHAIN_SUPPORTS_VIRT
cflags-$(CONFIG_MIPS_COMPACT_BRANCHES_NEVER) += -mcompact-branches=never
cflags-$(CONFIG_MIPS_COMPACT_BRANCHES_OPTIMAL) += -mcompact-branches=optimal
cflags-$(CONFIG_MIPS_COMPACT_BRANCHES_ALWAYS) += -mcompact-branches=always
# #
# Firmware support # Firmware support
# #

View file

@ -96,7 +96,7 @@ static struct clk * __init ath79_reg_ffclk(const char *name,
struct clk *clk; struct clk *clk;
clk = clk_register_fixed_factor(NULL, name, parent_name, 0, mult, div); clk = clk_register_fixed_factor(NULL, name, parent_name, 0, mult, div);
if (!clk) if (IS_ERR(clk))
panic("failed to allocate %s clock structure", name); panic("failed to allocate %s clock structure", name);
return clk; return clk;

View file

@ -1619,6 +1619,12 @@ static int __init octeon_irq_init_gpio(
return -ENOMEM; return -ENOMEM;
} }
/*
* Clear the OF_POPULATED flag that was set by of_irq_init()
* so that all GPIO devices will be probed.
*/
of_node_clear_flag(gpio_node, OF_POPULATED);
return 0; return 0;
} }
/* /*

View file

@ -1059,7 +1059,7 @@ static int __init octeon_publish_devices(void)
{ {
return of_platform_bus_probe(NULL, octeon_ids, NULL); return of_platform_bus_probe(NULL, octeon_ids, NULL);
} }
device_initcall(octeon_publish_devices); arch_initcall(octeon_publish_devices);
MODULE_AUTHOR("David Daney <ddaney@caviumnetworks.com>"); MODULE_AUTHOR("David Daney <ddaney@caviumnetworks.com>");
MODULE_LICENSE("GPL"); MODULE_LICENSE("GPL");

View file

@ -146,7 +146,25 @@
/* /*
* Find irq with highest priority * Find irq with highest priority
*/ */
PTR_LA t1,cpu_mask_nr_tbl # open coded PTR_LA t1, cpu_mask_nr_tbl
#if (_MIPS_SZPTR == 32)
# open coded la t1, cpu_mask_nr_tbl
lui t1, %hi(cpu_mask_nr_tbl)
addiu t1, %lo(cpu_mask_nr_tbl)
#endif
#if (_MIPS_SZPTR == 64)
# open coded dla t1, cpu_mask_nr_tbl
.set push
.set noat
lui t1, %highest(cpu_mask_nr_tbl)
lui AT, %hi(cpu_mask_nr_tbl)
daddiu t1, t1, %higher(cpu_mask_nr_tbl)
daddiu AT, AT, %lo(cpu_mask_nr_tbl)
dsll t1, 32
daddu t1, t1, AT
.set pop
#endif
1: lw t2,(t1) 1: lw t2,(t1)
nop nop
and t2,t0 and t2,t0
@ -195,7 +213,25 @@
/* /*
* Find irq with highest priority * Find irq with highest priority
*/ */
PTR_LA t1,asic_mask_nr_tbl # open coded PTR_LA t1,asic_mask_nr_tbl
#if (_MIPS_SZPTR == 32)
# open coded la t1, asic_mask_nr_tbl
lui t1, %hi(asic_mask_nr_tbl)
addiu t1, %lo(asic_mask_nr_tbl)
#endif
#if (_MIPS_SZPTR == 64)
# open coded dla t1, asic_mask_nr_tbl
.set push
.set noat
lui t1, %highest(asic_mask_nr_tbl)
lui AT, %hi(asic_mask_nr_tbl)
daddiu t1, t1, %higher(asic_mask_nr_tbl)
daddiu AT, AT, %lo(asic_mask_nr_tbl)
dsll t1, 32
daddu t1, t1, AT
.set pop
#endif
2: lw t2,(t1) 2: lw t2,(t1)
nop nop
and t2,t0 and t2,t0

View file

@ -157,6 +157,7 @@
ldc1 $f28, THREAD_FPR28(\thread) ldc1 $f28, THREAD_FPR28(\thread)
ldc1 $f30, THREAD_FPR30(\thread) ldc1 $f30, THREAD_FPR30(\thread)
ctc1 \tmp, fcr31 ctc1 \tmp, fcr31
.set pop
.endm .endm
.macro fpu_restore_16odd thread .macro fpu_restore_16odd thread

View file

@ -15,8 +15,8 @@
static inline bool __should_swizzle_bits(volatile void *a) static inline bool __should_swizzle_bits(volatile void *a)
{ {
extern const bool octeon_should_swizzle_table[]; extern const bool octeon_should_swizzle_table[];
u64 did = ((u64)(uintptr_t)a >> 40) & 0xff;
unsigned long did = ((unsigned long)a >> 40) & 0xff;
return octeon_should_swizzle_table[did]; return octeon_should_swizzle_table[did];
} }
@ -29,7 +29,7 @@ static inline bool __should_swizzle_bits(volatile void *a)
#define __should_swizzle_bits(a) false #define __should_swizzle_bits(a) false
static inline bool __should_swizzle_addr(unsigned long p) static inline bool __should_swizzle_addr(u64 p)
{ {
/* boot bus? */ /* boot bus? */
return ((p >> 40) & 0xff) == 0; return ((p >> 40) & 0xff) == 0;

View file

@ -11,11 +11,13 @@
#define CP0_EBASE $15, 1 #define CP0_EBASE $15, 1
.macro kernel_entry_setup .macro kernel_entry_setup
#ifdef CONFIG_SMP
mfc0 t0, CP0_EBASE mfc0 t0, CP0_EBASE
andi t0, t0, 0x3ff # CPUNum andi t0, t0, 0x3ff # CPUNum
beqz t0, 1f beqz t0, 1f
# CPUs other than zero goto smp_bootstrap # CPUs other than zero goto smp_bootstrap
j smp_bootstrap j smp_bootstrap
#endif /* CONFIG_SMP */
1: 1:
.endm .endm

View file

@ -458,10 +458,21 @@ static inline int mips_cm_revision(void)
static inline unsigned int mips_cm_max_vp_width(void) static inline unsigned int mips_cm_max_vp_width(void)
{ {
extern int smp_num_siblings; extern int smp_num_siblings;
uint32_t cfg;
if (mips_cm_revision() >= CM_REV_CM3) if (mips_cm_revision() >= CM_REV_CM3)
return read_gcr_sys_config2() & CM_GCR_SYS_CONFIG2_MAXVPW_MSK; return read_gcr_sys_config2() & CM_GCR_SYS_CONFIG2_MAXVPW_MSK;
if (mips_cm_present()) {
/*
* We presume that all cores in the system will have the same
* number of VP(E)s, and if that ever changes then this will
* need revisiting.
*/
cfg = read_gcr_cl_config() & CM_GCR_Cx_CONFIG_PVPE_MSK;
return (cfg >> CM_GCR_Cx_CONFIG_PVPE_SHF) + 1;
}
if (IS_ENABLED(CONFIG_SMP)) if (IS_ENABLED(CONFIG_SMP))
return smp_num_siblings; return smp_num_siblings;

View file

@ -660,8 +660,6 @@
#define MIPS_CONF7_IAR (_ULCAST_(1) << 10) #define MIPS_CONF7_IAR (_ULCAST_(1) << 10)
#define MIPS_CONF7_AR (_ULCAST_(1) << 16) #define MIPS_CONF7_AR (_ULCAST_(1) << 16)
/* FTLB probability bits for R6 */
#define MIPS_CONF7_FTLBP_SHIFT (18)
/* WatchLo* register definitions */ /* WatchLo* register definitions */
#define MIPS_WATCHLO_IRW (_ULCAST_(0x7) << 0) #define MIPS_WATCHLO_IRW (_ULCAST_(0x7) << 0)

View file

@ -36,7 +36,6 @@ struct arch_uprobe {
unsigned long resume_epc; unsigned long resume_epc;
u32 insn[2]; u32 insn[2];
u32 ixol[2]; u32 ixol[2];
union mips_instruction orig_inst[MAX_UINSN_BYTES / 4];
}; };
struct arch_uprobe_task { struct arch_uprobe_task {

View file

@ -352,7 +352,12 @@ __setup("nohtw", htw_disable);
static int mips_ftlb_disabled; static int mips_ftlb_disabled;
static int mips_has_ftlb_configured; static int mips_has_ftlb_configured;
static int set_ftlb_enable(struct cpuinfo_mips *c, int enable); enum ftlb_flags {
FTLB_EN = 1 << 0,
FTLB_SET_PROB = 1 << 1,
};
static int set_ftlb_enable(struct cpuinfo_mips *c, enum ftlb_flags flags);
static int __init ftlb_disable(char *s) static int __init ftlb_disable(char *s)
{ {
@ -371,8 +376,6 @@ static int __init ftlb_disable(char *s)
return 1; return 1;
} }
back_to_back_c0_hazard();
config4 = read_c0_config4(); config4 = read_c0_config4();
/* Check that FTLB has been disabled */ /* Check that FTLB has been disabled */
@ -531,7 +534,7 @@ static unsigned int calculate_ftlb_probability(struct cpuinfo_mips *c)
return 3; return 3;
} }
static int set_ftlb_enable(struct cpuinfo_mips *c, int enable) static int set_ftlb_enable(struct cpuinfo_mips *c, enum ftlb_flags flags)
{ {
unsigned int config; unsigned int config;
@ -542,33 +545,33 @@ static int set_ftlb_enable(struct cpuinfo_mips *c, int enable)
case CPU_P6600: case CPU_P6600:
/* proAptiv & related cores use Config6 to enable the FTLB */ /* proAptiv & related cores use Config6 to enable the FTLB */
config = read_c0_config6(); config = read_c0_config6();
/* Clear the old probability value */
config &= ~(3 << MIPS_CONF6_FTLBP_SHIFT); if (flags & FTLB_EN)
if (enable) config |= MIPS_CONF6_FTLBEN;
/* Enable FTLB */
write_c0_config6(config |
(calculate_ftlb_probability(c)
<< MIPS_CONF6_FTLBP_SHIFT)
| MIPS_CONF6_FTLBEN);
else else
/* Disable FTLB */ config &= ~MIPS_CONF6_FTLBEN;
write_c0_config6(config & ~MIPS_CONF6_FTLBEN);
if (flags & FTLB_SET_PROB) {
config &= ~(3 << MIPS_CONF6_FTLBP_SHIFT);
config |= calculate_ftlb_probability(c)
<< MIPS_CONF6_FTLBP_SHIFT;
}
write_c0_config6(config);
back_to_back_c0_hazard();
break; break;
case CPU_I6400: case CPU_I6400:
/* I6400 & related cores use Config7 to configure FTLB */ /* There's no way to disable the FTLB */
config = read_c0_config7(); if (!(flags & FTLB_EN))
/* Clear the old probability value */ return 1;
config &= ~(3 << MIPS_CONF7_FTLBP_SHIFT); return 0;
write_c0_config7(config | (calculate_ftlb_probability(c)
<< MIPS_CONF7_FTLBP_SHIFT));
break;
case CPU_LOONGSON3: case CPU_LOONGSON3:
/* Flush ITLB, DTLB, VTLB and FTLB */ /* Flush ITLB, DTLB, VTLB and FTLB */
write_c0_diag(LOONGSON_DIAG_ITLB | LOONGSON_DIAG_DTLB | write_c0_diag(LOONGSON_DIAG_ITLB | LOONGSON_DIAG_DTLB |
LOONGSON_DIAG_VTLB | LOONGSON_DIAG_FTLB); LOONGSON_DIAG_VTLB | LOONGSON_DIAG_FTLB);
/* Loongson-3 cores use Config6 to enable the FTLB */ /* Loongson-3 cores use Config6 to enable the FTLB */
config = read_c0_config6(); config = read_c0_config6();
if (enable) if (flags & FTLB_EN)
/* Enable FTLB */ /* Enable FTLB */
write_c0_config6(config & ~MIPS_CONF6_FTLBDIS); write_c0_config6(config & ~MIPS_CONF6_FTLBDIS);
else else
@ -788,6 +791,7 @@ static inline unsigned int decode_config4(struct cpuinfo_mips *c)
PAGE_SIZE, config4); PAGE_SIZE, config4);
/* Switch FTLB off */ /* Switch FTLB off */
set_ftlb_enable(c, 0); set_ftlb_enable(c, 0);
mips_ftlb_disabled = 1;
break; break;
} }
c->tlbsizeftlbsets = 1 << c->tlbsizeftlbsets = 1 <<
@ -852,7 +856,7 @@ static void decode_configs(struct cpuinfo_mips *c)
c->scache.flags = MIPS_CACHE_NOT_PRESENT; c->scache.flags = MIPS_CACHE_NOT_PRESENT;
/* Enable FTLB if present and not disabled */ /* Enable FTLB if present and not disabled */
set_ftlb_enable(c, !mips_ftlb_disabled); set_ftlb_enable(c, mips_ftlb_disabled ? 0 : FTLB_EN);
ok = decode_config0(c); /* Read Config registers. */ ok = decode_config0(c); /* Read Config registers. */
BUG_ON(!ok); /* Arch spec violation! */ BUG_ON(!ok); /* Arch spec violation! */
@ -902,6 +906,9 @@ static void decode_configs(struct cpuinfo_mips *c)
} }
} }
/* configure the FTLB write probability */
set_ftlb_enable(c, (mips_ftlb_disabled ? 0 : FTLB_EN) | FTLB_SET_PROB);
mips_probe_watch_registers(c); mips_probe_watch_registers(c);
#ifndef CONFIG_MIPS_CPS #ifndef CONFIG_MIPS_CPS

View file

@ -142,9 +142,8 @@ LEAF(__r4k_wait)
PTR_LA k1, __r4k_wait PTR_LA k1, __r4k_wait
ori k0, 0x1f /* 32 byte rollback region */ ori k0, 0x1f /* 32 byte rollback region */
xori k0, 0x1f xori k0, 0x1f
bne k0, k1, 9f bne k0, k1, \handler
MTC0 k0, CP0_EPC MTC0 k0, CP0_EPC
9:
.set pop .set pop
.endm .endm

View file

@ -1164,7 +1164,9 @@ fpu_emul:
regs->regs[31] = r31; regs->regs[31] = r31;
regs->cp0_epc = epc; regs->cp0_epc = epc;
if (!used_math()) { /* First time FPU user. */ if (!used_math()) { /* First time FPU user. */
preempt_disable();
err = init_fpu(); err = init_fpu();
preempt_enable();
set_used_math(); set_used_math();
} }
lose_fpu(1); /* Save FPU state for the emulator. */ lose_fpu(1); /* Save FPU state for the emulator. */

View file

@ -605,14 +605,14 @@ int mips_set_process_fp_mode(struct task_struct *task, unsigned int value)
return -EOPNOTSUPP; return -EOPNOTSUPP;
/* Avoid inadvertently triggering emulation */ /* Avoid inadvertently triggering emulation */
if ((value & PR_FP_MODE_FR) && cpu_has_fpu && if ((value & PR_FP_MODE_FR) && raw_cpu_has_fpu &&
!(current_cpu_data.fpu_id & MIPS_FPIR_F64)) !(raw_current_cpu_data.fpu_id & MIPS_FPIR_F64))
return -EOPNOTSUPP; return -EOPNOTSUPP;
if ((value & PR_FP_MODE_FRE) && cpu_has_fpu && !cpu_has_fre) if ((value & PR_FP_MODE_FRE) && raw_cpu_has_fpu && !cpu_has_fre)
return -EOPNOTSUPP; return -EOPNOTSUPP;
/* FR = 0 not supported in MIPS R6 */ /* FR = 0 not supported in MIPS R6 */
if (!(value & PR_FP_MODE_FR) && cpu_has_fpu && cpu_has_mips_r6) if (!(value & PR_FP_MODE_FR) && raw_cpu_has_fpu && cpu_has_mips_r6)
return -EOPNOTSUPP; return -EOPNOTSUPP;
/* Proceed with the mode switch */ /* Proceed with the mode switch */

View file

@ -87,6 +87,13 @@ void __init add_memory_region(phys_addr_t start, phys_addr_t size, long type)
int x = boot_mem_map.nr_map; int x = boot_mem_map.nr_map;
int i; int i;
/*
* If the region reaches the top of the physical address space, adjust
* the size slightly so that (start + size) doesn't overflow
*/
if (start + size - 1 == (phys_addr_t)ULLONG_MAX)
--size;
/* Sanity check */ /* Sanity check */
if (start + size < start) { if (start + size < start) {
pr_warn("Trying to add an invalid memory region, skipped\n"); pr_warn("Trying to add an invalid memory region, skipped\n");
@ -757,7 +764,6 @@ static void __init arch_mem_init(char **cmdline_p)
device_tree_init(); device_tree_init();
sparse_init(); sparse_init();
plat_swiotlb_setup(); plat_swiotlb_setup();
paging_init();
dma_contiguous_reserve(PFN_PHYS(max_low_pfn)); dma_contiguous_reserve(PFN_PHYS(max_low_pfn));
/* Tell bootmem about cma reserved memblock section */ /* Tell bootmem about cma reserved memblock section */
@ -870,6 +876,7 @@ void __init setup_arch(char **cmdline_p)
prefill_possible_map(); prefill_possible_map();
cpu_cache_init(); cpu_cache_init();
paging_init();
} }
unsigned long kernelsp[NR_CPUS]; unsigned long kernelsp[NR_CPUS];

View file

@ -513,7 +513,7 @@ static void cps_cpu_die(unsigned int cpu)
* in which case the CPC will refuse to power down the core. * in which case the CPC will refuse to power down the core.
*/ */
do { do {
mips_cm_lock_other(core, vpe_id); mips_cm_lock_other(core, 0);
mips_cpc_lock_other(core); mips_cpc_lock_other(core);
stat = read_cpc_co_stat_conf(); stat = read_cpc_co_stat_conf();
stat &= CPC_Cx_STAT_CONF_SEQSTATE_MSK; stat &= CPC_Cx_STAT_CONF_SEQSTATE_MSK;

View file

@ -322,6 +322,9 @@ asmlinkage void start_secondary(void)
cpumask_set_cpu(cpu, &cpu_coherent_mask); cpumask_set_cpu(cpu, &cpu_coherent_mask);
notify_cpu_starting(cpu); notify_cpu_starting(cpu);
cpumask_set_cpu(cpu, &cpu_callin_map);
synchronise_count_slave(cpu);
set_cpu_online(cpu, true); set_cpu_online(cpu, true);
set_cpu_sibling_map(cpu); set_cpu_sibling_map(cpu);
@ -329,10 +332,6 @@ asmlinkage void start_secondary(void)
calculate_cpu_foreign_map(); calculate_cpu_foreign_map();
cpumask_set_cpu(cpu, &cpu_callin_map);
synchronise_count_slave(cpu);
/* /*
* irq will be enabled in ->smp_finish(), enabling it too early * irq will be enabled in ->smp_finish(), enabling it too early
* is dangerous. * is dangerous.

View file

@ -157,7 +157,6 @@ bool is_trap_insn(uprobe_opcode_t *insn)
int arch_uprobe_pre_xol(struct arch_uprobe *aup, struct pt_regs *regs) int arch_uprobe_pre_xol(struct arch_uprobe *aup, struct pt_regs *regs)
{ {
struct uprobe_task *utask = current->utask; struct uprobe_task *utask = current->utask;
union mips_instruction insn;
/* /*
* Now find the EPC where to resume after the breakpoint has been * Now find the EPC where to resume after the breakpoint has been
@ -168,10 +167,10 @@ int arch_uprobe_pre_xol(struct arch_uprobe *aup, struct pt_regs *regs)
unsigned long epc; unsigned long epc;
epc = regs->cp0_epc; epc = regs->cp0_epc;
__compute_return_epc_for_insn(regs, insn); __compute_return_epc_for_insn(regs,
(union mips_instruction) aup->insn[0]);
aup->resume_epc = regs->cp0_epc; aup->resume_epc = regs->cp0_epc;
} }
utask->autask.saved_trap_nr = current->thread.trap_nr; utask->autask.saved_trap_nr = current->thread.trap_nr;
current->thread.trap_nr = UPROBE_TRAP_NR; current->thread.trap_nr = UPROBE_TRAP_NR;
regs->cp0_epc = current->utask->xol_vaddr; regs->cp0_epc = current->utask->xol_vaddr;
@ -222,7 +221,7 @@ int arch_uprobe_exception_notify(struct notifier_block *self,
return NOTIFY_DONE; return NOTIFY_DONE;
switch (val) { switch (val) {
case DIE_BREAK: case DIE_UPROBE:
if (uprobe_pre_sstep_notifier(regs)) if (uprobe_pre_sstep_notifier(regs))
return NOTIFY_STOP; return NOTIFY_STOP;
break; break;
@ -257,7 +256,7 @@ unsigned long arch_uretprobe_hijack_return_addr(
ra = regs->regs[31]; ra = regs->regs[31];
/* Replace the return address with the trampoline address */ /* Replace the return address with the trampoline address */
regs->regs[31] = ra; regs->regs[31] = trampoline_vaddr;
return ra; return ra;
} }
@ -280,24 +279,6 @@ int __weak set_swbp(struct arch_uprobe *auprobe, struct mm_struct *mm,
return uprobe_write_opcode(mm, vaddr, UPROBE_SWBP_INSN); return uprobe_write_opcode(mm, vaddr, UPROBE_SWBP_INSN);
} }
/**
* set_orig_insn - Restore the original instruction.
* @mm: the probed process address space.
* @auprobe: arch specific probepoint information.
* @vaddr: the virtual address to insert the opcode.
*
* For mm @mm, restore the original opcode (opcode) at @vaddr.
* Return 0 (success) or a negative errno.
*
* This overrides the weak version in kernel/events/uprobes.c.
*/
int set_orig_insn(struct arch_uprobe *auprobe, struct mm_struct *mm,
unsigned long vaddr)
{
return uprobe_write_opcode(mm, vaddr,
*(uprobe_opcode_t *)&auprobe->orig_inst[0].word);
}
void __weak arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr, void __weak arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr,
void *src, unsigned long len) void *src, unsigned long len)
{ {

View file

@ -39,16 +39,16 @@ static struct vm_special_mapping vdso_vvar_mapping = {
static void __init init_vdso_image(struct mips_vdso_image *image) static void __init init_vdso_image(struct mips_vdso_image *image)
{ {
unsigned long num_pages, i; unsigned long num_pages, i;
unsigned long data_pfn;
BUG_ON(!PAGE_ALIGNED(image->data)); BUG_ON(!PAGE_ALIGNED(image->data));
BUG_ON(!PAGE_ALIGNED(image->size)); BUG_ON(!PAGE_ALIGNED(image->size));
num_pages = image->size / PAGE_SIZE; num_pages = image->size / PAGE_SIZE;
for (i = 0; i < num_pages; i++) { data_pfn = __phys_to_pfn(__pa_symbol(image->data));
image->mapping.pages[i] = for (i = 0; i < num_pages; i++)
virt_to_page(image->data + (i * PAGE_SIZE)); image->mapping.pages[i] = pfn_to_page(data_pfn + i);
}
} }
static int __init init_vdso(void) static int __init init_vdso(void)

View file

@ -298,5 +298,6 @@ bool do_dsemulret(struct pt_regs *xcp)
/* Set EPC to return to post-branch instruction */ /* Set EPC to return to post-branch instruction */
xcp->cp0_epc = current->thread.bd_emu_cont_pc; xcp->cp0_epc = current->thread.bd_emu_cont_pc;
pr_debug("dsemulret to 0x%08lx\n", xcp->cp0_epc); pr_debug("dsemulret to 0x%08lx\n", xcp->cp0_epc);
MIPS_FPU_EMU_INC_STATS(ds_emul);
return true; return true;
} }

View file

@ -800,7 +800,7 @@ static void r4k_flush_icache_range(unsigned long start, unsigned long end)
* If address-based cache ops don't require an SMP call, then * If address-based cache ops don't require an SMP call, then
* use them exclusively for small flushes. * use them exclusively for small flushes.
*/ */
size = start - end; size = end - start;
cache_size = icache_size; cache_size = icache_size;
if (!cpu_has_ic_fills_f_dc) { if (!cpu_has_ic_fills_f_dc) {
size *= 2; size *= 2;

View file

@ -261,7 +261,6 @@ unsigned __weak platform_maar_init(unsigned num_pairs)
{ {
struct maar_config cfg[BOOT_MEM_MAP_MAX]; struct maar_config cfg[BOOT_MEM_MAP_MAX];
unsigned i, num_configured, num_cfg = 0; unsigned i, num_configured, num_cfg = 0;
phys_addr_t skip;
for (i = 0; i < boot_mem_map.nr_map; i++) { for (i = 0; i < boot_mem_map.nr_map; i++) {
switch (boot_mem_map.map[i].type) { switch (boot_mem_map.map[i].type) {
@ -272,14 +271,14 @@ unsigned __weak platform_maar_init(unsigned num_pairs)
continue; continue;
} }
skip = 0x10000 - (boot_mem_map.map[i].addr & 0xffff); /* Round lower up */
cfg[num_cfg].lower = boot_mem_map.map[i].addr; cfg[num_cfg].lower = boot_mem_map.map[i].addr;
cfg[num_cfg].lower += skip; cfg[num_cfg].lower = (cfg[num_cfg].lower + 0xffff) & ~0xffff;
cfg[num_cfg].upper = cfg[num_cfg].lower; /* Round upper down */
cfg[num_cfg].upper += boot_mem_map.map[i].size - 1; cfg[num_cfg].upper = boot_mem_map.map[i].addr +
cfg[num_cfg].upper -= skip; boot_mem_map.map[i].size;
cfg[num_cfg].upper = (cfg[num_cfg].upper & ~0xffff) - 1;
cfg[num_cfg].attrs = MIPS_MAAR_S; cfg[num_cfg].attrs = MIPS_MAAR_S;
num_cfg++; num_cfg++;
@ -441,6 +440,9 @@ static inline void mem_init_free_highmem(void)
#ifdef CONFIG_HIGHMEM #ifdef CONFIG_HIGHMEM
unsigned long tmp; unsigned long tmp;
if (cpu_has_dc_aliases)
return;
for (tmp = highstart_pfn; tmp < highend_pfn; tmp++) { for (tmp = highstart_pfn; tmp < highend_pfn; tmp++) {
struct page *page = pfn_to_page(tmp); struct page *page = pfn_to_page(tmp);

View file

@ -39,6 +39,9 @@
#include <linux/console.h> #include <linux/console.h>
#endif #endif
#define ROCIT_CONFIG_GEN0 0x1f403000
#define ROCIT_CONFIG_GEN0_PCI_IOCU BIT(7)
extern void malta_be_init(void); extern void malta_be_init(void);
extern int malta_be_handler(struct pt_regs *regs, int is_fixup); extern int malta_be_handler(struct pt_regs *regs, int is_fixup);
@ -107,6 +110,8 @@ static void __init fd_activate(void)
static int __init plat_enable_iocoherency(void) static int __init plat_enable_iocoherency(void)
{ {
int supported = 0; int supported = 0;
u32 cfg;
if (mips_revision_sconid == MIPS_REVISION_SCON_BONITO) { if (mips_revision_sconid == MIPS_REVISION_SCON_BONITO) {
if (BONITO_PCICACHECTRL & BONITO_PCICACHECTRL_CPUCOH_PRES) { if (BONITO_PCICACHECTRL & BONITO_PCICACHECTRL_CPUCOH_PRES) {
BONITO_PCICACHECTRL |= BONITO_PCICACHECTRL_CPUCOH_EN; BONITO_PCICACHECTRL |= BONITO_PCICACHECTRL_CPUCOH_EN;
@ -129,7 +134,8 @@ static int __init plat_enable_iocoherency(void)
} else if (mips_cm_numiocu() != 0) { } else if (mips_cm_numiocu() != 0) {
/* Nothing special needs to be done to enable coherency */ /* Nothing special needs to be done to enable coherency */
pr_info("CMP IOCU detected\n"); pr_info("CMP IOCU detected\n");
if ((*(unsigned int *)0xbf403000 & 0x81) != 0x81) { cfg = __raw_readl((u32 *)CKSEG1ADDR(ROCIT_CONFIG_GEN0));
if (!(cfg & ROCIT_CONFIG_GEN0_PCI_IOCU)) {
pr_crit("IOCU OPERATION DISABLED BY SWITCH - DEFAULTING TO SW IO COHERENCY\n"); pr_crit("IOCU OPERATION DISABLED BY SWITCH - DEFAULTING TO SW IO COHERENCY\n");
return 0; return 0;
} }

View file

@ -124,6 +124,13 @@ static inline bool pnv_pci_is_m64(struct pnv_phb *phb, struct resource *r)
r->start < (phb->ioda.m64_base + phb->ioda.m64_size)); r->start < (phb->ioda.m64_base + phb->ioda.m64_size));
} }
static inline bool pnv_pci_is_m64_flags(unsigned long resource_flags)
{
unsigned long flags = (IORESOURCE_MEM_64 | IORESOURCE_PREFETCH);
return (resource_flags & flags) == flags;
}
static struct pnv_ioda_pe *pnv_ioda_init_pe(struct pnv_phb *phb, int pe_no) static struct pnv_ioda_pe *pnv_ioda_init_pe(struct pnv_phb *phb, int pe_no)
{ {
phb->ioda.pe_array[pe_no].phb = phb; phb->ioda.pe_array[pe_no].phb = phb;
@ -2871,7 +2878,7 @@ static void pnv_pci_ioda_fixup_iov_resources(struct pci_dev *pdev)
res = &pdev->resource[i + PCI_IOV_RESOURCES]; res = &pdev->resource[i + PCI_IOV_RESOURCES];
if (!res->flags || res->parent) if (!res->flags || res->parent)
continue; continue;
if (!pnv_pci_is_m64(phb, res)) { if (!pnv_pci_is_m64_flags(res->flags)) {
dev_warn(&pdev->dev, "Don't support SR-IOV with" dev_warn(&pdev->dev, "Don't support SR-IOV with"
" non M64 VF BAR%d: %pR. \n", " non M64 VF BAR%d: %pR. \n",
i, res); i, res);
@ -3096,7 +3103,7 @@ static resource_size_t pnv_pci_window_alignment(struct pci_bus *bus,
* alignment for any 64-bit resource, PCIe doesn't care and * alignment for any 64-bit resource, PCIe doesn't care and
* bridges only do 64-bit prefetchable anyway. * bridges only do 64-bit prefetchable anyway.
*/ */
if (phb->ioda.m64_segsize && (type & IORESOURCE_MEM_64)) if (phb->ioda.m64_segsize && pnv_pci_is_m64_flags(type))
return phb->ioda.m64_segsize; return phb->ioda.m64_segsize;
if (type & IORESOURCE_MEM) if (type & IORESOURCE_MEM)
return phb->ioda.m32_segsize; return phb->ioda.m32_segsize;

View file

@ -60,7 +60,7 @@ static inline int atomic_fetch_##op(int i, atomic_t *v) \
" movco.l %0, @%3 \n" \ " movco.l %0, @%3 \n" \
" bf 1b \n" \ " bf 1b \n" \
" synco \n" \ " synco \n" \
: "=&z" (temp), "=&z" (res) \ : "=&z" (temp), "=&r" (res) \
: "r" (i), "r" (&v->counter) \ : "r" (i), "r" (&v->counter) \
: "t"); \ : "t"); \
\ \

View file

@ -25,6 +25,7 @@
#define HPAGE_MASK (~(HPAGE_SIZE - 1UL)) #define HPAGE_MASK (~(HPAGE_SIZE - 1UL))
#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA #define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
#define REAL_HPAGE_PER_HPAGE (_AC(1,UL) << (HPAGE_SHIFT - REAL_HPAGE_SHIFT))
#endif #endif
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__

View file

@ -43,6 +43,7 @@ void arch_send_call_function_ipi_mask(const struct cpumask *mask);
int hard_smp_processor_id(void); int hard_smp_processor_id(void);
#define raw_smp_processor_id() (current_thread_info()->cpu) #define raw_smp_processor_id() (current_thread_info()->cpu)
void smp_fill_in_cpu_possible_map(void);
void smp_fill_in_sib_core_maps(void); void smp_fill_in_sib_core_maps(void);
void cpu_play_dead(void); void cpu_play_dead(void);
@ -72,6 +73,7 @@ void __cpu_die(unsigned int cpu);
#define smp_fill_in_sib_core_maps() do { } while (0) #define smp_fill_in_sib_core_maps() do { } while (0)
#define smp_fetch_global_regs() do { } while (0) #define smp_fetch_global_regs() do { } while (0)
#define smp_fetch_global_pmu() do { } while (0) #define smp_fetch_global_pmu() do { } while (0)
#define smp_fill_in_cpu_possible_map() do { } while (0)
#endif /* !(CONFIG_SMP) */ #endif /* !(CONFIG_SMP) */

View file

@ -31,6 +31,7 @@
#include <linux/initrd.h> #include <linux/initrd.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/start_kernel.h> #include <linux/start_kernel.h>
#include <linux/bootmem.h>
#include <asm/io.h> #include <asm/io.h>
#include <asm/processor.h> #include <asm/processor.h>
@ -50,6 +51,8 @@
#include <asm/elf.h> #include <asm/elf.h>
#include <asm/mdesc.h> #include <asm/mdesc.h>
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
#include <asm/dma.h>
#include <asm/irq.h>
#ifdef CONFIG_IP_PNP #ifdef CONFIG_IP_PNP
#include <net/ipconfig.h> #include <net/ipconfig.h>
@ -590,6 +593,22 @@ static void __init init_sparc64_elf_hwcap(void)
pause_patch(); pause_patch();
} }
void __init alloc_irqstack_bootmem(void)
{
unsigned int i, node;
for_each_possible_cpu(i) {
node = cpu_to_node(i);
softirq_stack[i] = __alloc_bootmem_node(NODE_DATA(node),
THREAD_SIZE,
THREAD_SIZE, 0);
hardirq_stack[i] = __alloc_bootmem_node(NODE_DATA(node),
THREAD_SIZE,
THREAD_SIZE, 0);
}
}
void __init setup_arch(char **cmdline_p) void __init setup_arch(char **cmdline_p)
{ {
/* Initialize PROM console and command line. */ /* Initialize PROM console and command line. */
@ -650,6 +669,13 @@ void __init setup_arch(char **cmdline_p)
paging_init(); paging_init();
init_sparc64_elf_hwcap(); init_sparc64_elf_hwcap();
smp_fill_in_cpu_possible_map();
/*
* Once the OF device tree and MDESC have been setup and nr_cpus has
* been parsed, we know the list of possible cpus. Therefore we can
* allocate the IRQ stacks.
*/
alloc_irqstack_bootmem();
} }
extern int stop_a_enabled; extern int stop_a_enabled;

View file

@ -1227,6 +1227,20 @@ void __init smp_setup_processor_id(void)
xcall_deliver_impl = hypervisor_xcall_deliver; xcall_deliver_impl = hypervisor_xcall_deliver;
} }
void __init smp_fill_in_cpu_possible_map(void)
{
int possible_cpus = num_possible_cpus();
int i;
if (possible_cpus > nr_cpu_ids)
possible_cpus = nr_cpu_ids;
for (i = 0; i < possible_cpus; i++)
set_cpu_possible(i, true);
for (; i < NR_CPUS; i++)
set_cpu_possible(i, false);
}
void smp_fill_in_sib_core_maps(void) void smp_fill_in_sib_core_maps(void)
{ {
unsigned int i; unsigned int i;

View file

@ -484,6 +484,7 @@ good_area:
tsb_grow(mm, MM_TSB_BASE, mm_rss); tsb_grow(mm, MM_TSB_BASE, mm_rss);
#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
mm_rss = mm->context.hugetlb_pte_count + mm->context.thp_pte_count; mm_rss = mm->context.hugetlb_pte_count + mm->context.thp_pte_count;
mm_rss *= REAL_HPAGE_PER_HPAGE;
if (unlikely(mm_rss > if (unlikely(mm_rss >
mm->context.tsb_block[MM_TSB_HUGE].tsb_rss_limit)) { mm->context.tsb_block[MM_TSB_HUGE].tsb_rss_limit)) {
if (mm->context.tsb_block[MM_TSB_HUGE].tsb) if (mm->context.tsb_block[MM_TSB_HUGE].tsb)

View file

@ -1160,7 +1160,7 @@ int __node_distance(int from, int to)
return numa_latency[from][to]; return numa_latency[from][to];
} }
static int find_best_numa_node_for_mlgroup(struct mdesc_mlgroup *grp) static int __init find_best_numa_node_for_mlgroup(struct mdesc_mlgroup *grp)
{ {
int i; int i;
@ -1173,8 +1173,8 @@ static int find_best_numa_node_for_mlgroup(struct mdesc_mlgroup *grp)
return i; return i;
} }
static void find_numa_latencies_for_group(struct mdesc_handle *md, u64 grp, static void __init find_numa_latencies_for_group(struct mdesc_handle *md,
int index) u64 grp, int index)
{ {
u64 arc; u64 arc;
@ -2081,7 +2081,6 @@ void __init paging_init(void)
{ {
unsigned long end_pfn, shift, phys_base; unsigned long end_pfn, shift, phys_base;
unsigned long real_end, i; unsigned long real_end, i;
int node;
setup_page_offset(); setup_page_offset();
@ -2250,21 +2249,6 @@ void __init paging_init(void)
/* Setup bootmem... */ /* Setup bootmem... */
last_valid_pfn = end_pfn = bootmem_init(phys_base); last_valid_pfn = end_pfn = bootmem_init(phys_base);
/* Once the OF device tree and MDESC have been setup, we know
* the list of possible cpus. Therefore we can allocate the
* IRQ stacks.
*/
for_each_possible_cpu(i) {
node = cpu_to_node(i);
softirq_stack[i] = __alloc_bootmem_node(NODE_DATA(node),
THREAD_SIZE,
THREAD_SIZE, 0);
hardirq_stack[i] = __alloc_bootmem_node(NODE_DATA(node),
THREAD_SIZE,
THREAD_SIZE, 0);
}
kernel_physical_mapping_init(); kernel_physical_mapping_init();
{ {

View file

@ -174,10 +174,25 @@ void set_pmd_at(struct mm_struct *mm, unsigned long addr,
return; return;
if ((pmd_val(pmd) ^ pmd_val(orig)) & _PAGE_PMD_HUGE) { if ((pmd_val(pmd) ^ pmd_val(orig)) & _PAGE_PMD_HUGE) {
if (pmd_val(pmd) & _PAGE_PMD_HUGE) /*
mm->context.thp_pte_count++; * Note that this routine only sets pmds for THP pages.
else * Hugetlb pages are handled elsewhere. We need to check
mm->context.thp_pte_count--; * for huge zero page. Huge zero pages are like hugetlb
* pages in that there is no RSS, but there is the need
* for TSB entries. So, huge zero page counts go into
* hugetlb_pte_count.
*/
if (pmd_val(pmd) & _PAGE_PMD_HUGE) {
if (is_huge_zero_page(pmd_page(pmd)))
mm->context.hugetlb_pte_count++;
else
mm->context.thp_pte_count++;
} else {
if (is_huge_zero_page(pmd_page(orig)))
mm->context.hugetlb_pte_count--;
else
mm->context.thp_pte_count--;
}
/* Do not try to allocate the TSB hash table if we /* Do not try to allocate the TSB hash table if we
* don't have one already. We have various locks held * don't have one already. We have various locks held
@ -204,6 +219,9 @@ void set_pmd_at(struct mm_struct *mm, unsigned long addr,
} }
} }
/*
* This routine is only called when splitting a THP
*/
void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address, void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
pmd_t *pmdp) pmd_t *pmdp)
{ {
@ -213,6 +231,15 @@ void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
set_pmd_at(vma->vm_mm, address, pmdp, entry); set_pmd_at(vma->vm_mm, address, pmdp, entry);
flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE); flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
/*
* set_pmd_at() will not be called in a way to decrement
* thp_pte_count when splitting a THP, so do it now.
* Sanity check pmd before doing the actual decrement.
*/
if ((pmd_val(entry) & _PAGE_PMD_HUGE) &&
!is_huge_zero_page(pmd_page(entry)))
(vma->vm_mm)->context.thp_pte_count--;
} }
void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp, void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,

View file

@ -469,8 +469,10 @@ retry_tsb_alloc:
int init_new_context(struct task_struct *tsk, struct mm_struct *mm) int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
{ {
unsigned long mm_rss = get_mm_rss(mm);
#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
unsigned long total_huge_pte_count; unsigned long saved_hugetlb_pte_count;
unsigned long saved_thp_pte_count;
#endif #endif
unsigned int i; unsigned int i;
@ -483,10 +485,12 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
* will re-increment the counters as the parent PTEs are * will re-increment the counters as the parent PTEs are
* copied into the child address space. * copied into the child address space.
*/ */
total_huge_pte_count = mm->context.hugetlb_pte_count + saved_hugetlb_pte_count = mm->context.hugetlb_pte_count;
mm->context.thp_pte_count; saved_thp_pte_count = mm->context.thp_pte_count;
mm->context.hugetlb_pte_count = 0; mm->context.hugetlb_pte_count = 0;
mm->context.thp_pte_count = 0; mm->context.thp_pte_count = 0;
mm_rss -= saved_thp_pte_count * (HPAGE_SIZE / PAGE_SIZE);
#endif #endif
/* copy_mm() copies over the parent's mm_struct before calling /* copy_mm() copies over the parent's mm_struct before calling
@ -499,11 +503,13 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
/* If this is fork, inherit the parent's TSB size. We would /* If this is fork, inherit the parent's TSB size. We would
* grow it to that size on the first page fault anyways. * grow it to that size on the first page fault anyways.
*/ */
tsb_grow(mm, MM_TSB_BASE, get_mm_rss(mm)); tsb_grow(mm, MM_TSB_BASE, mm_rss);
#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
if (unlikely(total_huge_pte_count)) if (unlikely(saved_hugetlb_pte_count + saved_thp_pte_count))
tsb_grow(mm, MM_TSB_HUGE, total_huge_pte_count); tsb_grow(mm, MM_TSB_HUGE,
(saved_hugetlb_pte_count + saved_thp_pte_count) *
REAL_HPAGE_PER_HPAGE);
#endif #endif
if (unlikely(!mm->context.tsb_block[MM_TSB_BASE].tsb)) if (unlikely(!mm->context.tsb_block[MM_TSB_BASE].tsb))

View file

@ -1002,7 +1002,6 @@ ENTRY(error_entry)
testb $3, CS+8(%rsp) testb $3, CS+8(%rsp)
jz .Lerror_kernelspace jz .Lerror_kernelspace
.Lerror_entry_from_usermode_swapgs:
/* /*
* We entered from user mode or we're pretending to have entered * We entered from user mode or we're pretending to have entered
* from user mode due to an IRET fault. * from user mode due to an IRET fault.
@ -1045,7 +1044,8 @@ ENTRY(error_entry)
* gsbase and proceed. We'll fix up the exception and land in * gsbase and proceed. We'll fix up the exception and land in
* .Lgs_change's error handler with kernel gsbase. * .Lgs_change's error handler with kernel gsbase.
*/ */
jmp .Lerror_entry_from_usermode_swapgs SWAPGS
jmp .Lerror_entry_done
.Lbstep_iret: .Lbstep_iret:
/* Fix truncated RIP */ /* Fix truncated RIP */

View file

@ -22,7 +22,7 @@ static void BITSFUNC(go)(void *raw_addr, size_t raw_len,
ELF(Phdr) *pt = (ELF(Phdr) *)(raw_addr + GET_LE(&hdr->e_phoff)); ELF(Phdr) *pt = (ELF(Phdr) *)(raw_addr + GET_LE(&hdr->e_phoff));
if (hdr->e_type != ET_DYN) if (GET_LE(&hdr->e_type) != ET_DYN)
fail("input is not a shared object\n"); fail("input is not a shared object\n");
/* Walk the segment table. */ /* Walk the segment table. */

View file

@ -455,7 +455,7 @@ int intel_bts_interrupt(void)
* The only surefire way of knowing if this NMI is ours is by checking * The only surefire way of knowing if this NMI is ours is by checking
* the write ptr against the PMI threshold. * the write ptr against the PMI threshold.
*/ */
if (ds->bts_index >= ds->bts_interrupt_threshold) if (ds && (ds->bts_index >= ds->bts_interrupt_threshold))
handled = 1; handled = 1;
/* /*
@ -584,7 +584,8 @@ static __init int bts_init(void)
if (!boot_cpu_has(X86_FEATURE_DTES64) || !x86_pmu.bts) if (!boot_cpu_has(X86_FEATURE_DTES64) || !x86_pmu.bts)
return -ENODEV; return -ENODEV;
bts_pmu.capabilities = PERF_PMU_CAP_AUX_NO_SG | PERF_PMU_CAP_ITRACE; bts_pmu.capabilities = PERF_PMU_CAP_AUX_NO_SG | PERF_PMU_CAP_ITRACE |
PERF_PMU_CAP_EXCLUSIVE;
bts_pmu.task_ctx_nr = perf_sw_context; bts_pmu.task_ctx_nr = perf_sw_context;
bts_pmu.event_init = bts_event_init; bts_pmu.event_init = bts_event_init;
bts_pmu.add = bts_event_add; bts_pmu.add = bts_event_add;

View file

@ -81,7 +81,7 @@ DECLARE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate);
/* Initialize cr4 shadow for this CPU. */ /* Initialize cr4 shadow for this CPU. */
static inline void cr4_init_shadow(void) static inline void cr4_init_shadow(void)
{ {
this_cpu_write(cpu_tlbstate.cr4, __read_cr4()); this_cpu_write(cpu_tlbstate.cr4, __read_cr4_safe());
} }
/* Set in this cpu's CR4. */ /* Set in this cpu's CR4. */

View file

@ -804,21 +804,20 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c)
identify_cpu_without_cpuid(c); identify_cpu_without_cpuid(c);
/* cyrix could have cpuid enabled via c_identify()*/ /* cyrix could have cpuid enabled via c_identify()*/
if (!have_cpuid_p()) if (have_cpuid_p()) {
return; cpu_detect(c);
get_cpu_vendor(c);
get_cpu_cap(c);
cpu_detect(c); if (this_cpu->c_early_init)
get_cpu_vendor(c); this_cpu->c_early_init(c);
get_cpu_cap(c);
if (this_cpu->c_early_init) c->cpu_index = 0;
this_cpu->c_early_init(c); filter_cpuid_features(c, false);
c->cpu_index = 0; if (this_cpu->c_bsp_init)
filter_cpuid_features(c, false); this_cpu->c_bsp_init(c);
}
if (this_cpu->c_bsp_init)
this_cpu->c_bsp_init(c);
setup_force_cpu_cap(X86_FEATURE_ALWAYS); setup_force_cpu_cap(X86_FEATURE_ALWAYS);
fpu__init_system(c); fpu__init_system(c);

View file

@ -1137,9 +1137,7 @@ void __init setup_arch(char **cmdline_p)
* auditing all the early-boot CR4 manipulation would be needed to * auditing all the early-boot CR4 manipulation would be needed to
* rule it out. * rule it out.
*/ */
if (boot_cpu_data.cpuid_level >= 0) mmu_cr4_features = __read_cr4_safe();
/* A CPU has %cr4 if and only if it has CPUID. */
mmu_cr4_features = __read_cr4();
memblock_set_current_limit(get_max_mapped()); memblock_set_current_limit(get_max_mapped());

View file

@ -917,11 +917,11 @@ static void populate_pte(struct cpa_data *cpa,
} }
} }
static int populate_pmd(struct cpa_data *cpa, static long populate_pmd(struct cpa_data *cpa,
unsigned long start, unsigned long end, unsigned long start, unsigned long end,
unsigned num_pages, pud_t *pud, pgprot_t pgprot) unsigned num_pages, pud_t *pud, pgprot_t pgprot)
{ {
unsigned int cur_pages = 0; long cur_pages = 0;
pmd_t *pmd; pmd_t *pmd;
pgprot_t pmd_pgprot; pgprot_t pmd_pgprot;
@ -991,12 +991,12 @@ static int populate_pmd(struct cpa_data *cpa,
return num_pages; return num_pages;
} }
static int populate_pud(struct cpa_data *cpa, unsigned long start, pgd_t *pgd, static long populate_pud(struct cpa_data *cpa, unsigned long start, pgd_t *pgd,
pgprot_t pgprot) pgprot_t pgprot)
{ {
pud_t *pud; pud_t *pud;
unsigned long end; unsigned long end;
int cur_pages = 0; long cur_pages = 0;
pgprot_t pud_pgprot; pgprot_t pud_pgprot;
end = start + (cpa->numpages << PAGE_SHIFT); end = start + (cpa->numpages << PAGE_SHIFT);
@ -1052,7 +1052,7 @@ static int populate_pud(struct cpa_data *cpa, unsigned long start, pgd_t *pgd,
/* Map trailing leftover */ /* Map trailing leftover */
if (start < end) { if (start < end) {
int tmp; long tmp;
pud = pud_offset(pgd, start); pud = pud_offset(pgd, start);
if (pud_none(*pud)) if (pud_none(*pud))
@ -1078,7 +1078,7 @@ static int populate_pgd(struct cpa_data *cpa, unsigned long addr)
pgprot_t pgprot = __pgprot(_KERNPG_TABLE); pgprot_t pgprot = __pgprot(_KERNPG_TABLE);
pud_t *pud = NULL; /* shut up gcc */ pud_t *pud = NULL; /* shut up gcc */
pgd_t *pgd_entry; pgd_t *pgd_entry;
int ret; long ret;
pgd_entry = cpa->pgd + pgd_index(addr); pgd_entry = cpa->pgd + pgd_index(addr);
@ -1327,7 +1327,8 @@ static int cpa_process_alias(struct cpa_data *cpa)
static int __change_page_attr_set_clr(struct cpa_data *cpa, int checkalias) static int __change_page_attr_set_clr(struct cpa_data *cpa, int checkalias)
{ {
int ret, numpages = cpa->numpages; unsigned long numpages = cpa->numpages;
int ret;
while (numpages) { while (numpages) {
/* /*

View file

@ -245,7 +245,7 @@ int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
* text and allocate a new stack because we can't rely on the * text and allocate a new stack because we can't rely on the
* stack pointer being < 4GB. * stack pointer being < 4GB.
*/ */
if (!IS_ENABLED(CONFIG_EFI_MIXED)) if (!IS_ENABLED(CONFIG_EFI_MIXED) || efi_is_native())
return 0; return 0;
/* /*

View file

@ -296,17 +296,29 @@ struct request *blk_mq_alloc_request_hctx(struct request_queue *q, int rw,
if (ret) if (ret)
return ERR_PTR(ret); return ERR_PTR(ret);
/*
* Check if the hardware context is actually mapped to anything.
* If not tell the caller that it should skip this queue.
*/
hctx = q->queue_hw_ctx[hctx_idx]; hctx = q->queue_hw_ctx[hctx_idx];
if (!blk_mq_hw_queue_mapped(hctx)) {
ret = -EXDEV;
goto out_queue_exit;
}
ctx = __blk_mq_get_ctx(q, cpumask_first(hctx->cpumask)); ctx = __blk_mq_get_ctx(q, cpumask_first(hctx->cpumask));
blk_mq_set_alloc_data(&alloc_data, q, flags, ctx, hctx); blk_mq_set_alloc_data(&alloc_data, q, flags, ctx, hctx);
rq = __blk_mq_alloc_request(&alloc_data, rw, 0); rq = __blk_mq_alloc_request(&alloc_data, rw, 0);
if (!rq) { if (!rq) {
blk_queue_exit(q); ret = -EWOULDBLOCK;
return ERR_PTR(-EWOULDBLOCK); goto out_queue_exit;
} }
return rq; return rq;
out_queue_exit:
blk_queue_exit(q);
return ERR_PTR(ret);
} }
EXPORT_SYMBOL_GPL(blk_mq_alloc_request_hctx); EXPORT_SYMBOL_GPL(blk_mq_alloc_request_hctx);

View file

@ -780,9 +780,11 @@ static bool tg_may_dispatch(struct throtl_grp *tg, struct bio *bio,
/* /*
* If previous slice expired, start a new one otherwise renew/extend * If previous slice expired, start a new one otherwise renew/extend
* existing slice to make sure it is at least throtl_slice interval * existing slice to make sure it is at least throtl_slice interval
* long since now. * long since now. New slice is started only for empty throttle group.
* If there is queued bio, that means there should be an active
* slice and it should be extended instead.
*/ */
if (throtl_slice_used(tg, rw)) if (throtl_slice_used(tg, rw) && !(tg->service_queue.nr_queued[rw]))
throtl_start_new_slice(tg, rw); throtl_start_new_slice(tg, rw);
else { else {
if (time_before(tg->slice_end[rw], jiffies + throtl_slice)) if (time_before(tg->slice_end[rw], jiffies + throtl_slice))

View file

@ -298,41 +298,48 @@ static int pkcs1pad_decrypt_complete(struct akcipher_request *req, int err)
struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm); struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req); struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req);
unsigned int dst_len;
unsigned int pos; unsigned int pos;
u8 *out_buf;
if (err == -EOVERFLOW)
/* Decrypted value had no leading 0 byte */
err = -EINVAL;
if (err) if (err)
goto done; goto done;
if (req_ctx->child_req.dst_len != ctx->key_size - 1) { err = -EINVAL;
err = -EINVAL; dst_len = req_ctx->child_req.dst_len;
if (dst_len < ctx->key_size - 1)
goto done; goto done;
out_buf = req_ctx->out_buf;
if (dst_len == ctx->key_size) {
if (out_buf[0] != 0x00)
/* Decrypted value had no leading 0 byte */
goto done;
dst_len--;
out_buf++;
} }
if (req_ctx->out_buf[0] != 0x02) { if (out_buf[0] != 0x02)
err = -EINVAL;
goto done; goto done;
}
for (pos = 1; pos < req_ctx->child_req.dst_len; pos++) for (pos = 1; pos < dst_len; pos++)
if (req_ctx->out_buf[pos] == 0x00) if (out_buf[pos] == 0x00)
break; break;
if (pos < 9 || pos == req_ctx->child_req.dst_len) { if (pos < 9 || pos == dst_len)
err = -EINVAL;
goto done; goto done;
}
pos++; pos++;
if (req->dst_len < req_ctx->child_req.dst_len - pos) err = 0;
if (req->dst_len < dst_len - pos)
err = -EOVERFLOW; err = -EOVERFLOW;
req->dst_len = req_ctx->child_req.dst_len - pos; req->dst_len = dst_len - pos;
if (!err) if (!err)
sg_copy_from_buffer(req->dst, sg_copy_from_buffer(req->dst,
sg_nents_for_len(req->dst, req->dst_len), sg_nents_for_len(req->dst, req->dst_len),
req_ctx->out_buf + pos, req->dst_len); out_buf + pos, req->dst_len);
done: done:
kzfree(req_ctx->out_buf); kzfree(req_ctx->out_buf);

View file

@ -94,54 +94,50 @@ static struct acpi_device *to_acpi_dev(struct acpi_nfit_desc *acpi_desc)
return to_acpi_device(acpi_desc->dev); return to_acpi_device(acpi_desc->dev);
} }
static int xlat_status(void *buf, unsigned int cmd) static int xlat_status(void *buf, unsigned int cmd, u32 status)
{ {
struct nd_cmd_clear_error *clear_err; struct nd_cmd_clear_error *clear_err;
struct nd_cmd_ars_status *ars_status; struct nd_cmd_ars_status *ars_status;
struct nd_cmd_ars_start *ars_start;
struct nd_cmd_ars_cap *ars_cap;
u16 flags; u16 flags;
switch (cmd) { switch (cmd) {
case ND_CMD_ARS_CAP: case ND_CMD_ARS_CAP:
ars_cap = buf; if ((status & 0xffff) == NFIT_ARS_CAP_NONE)
if ((ars_cap->status & 0xffff) == NFIT_ARS_CAP_NONE)
return -ENOTTY; return -ENOTTY;
/* Command failed */ /* Command failed */
if (ars_cap->status & 0xffff) if (status & 0xffff)
return -EIO; return -EIO;
/* No supported scan types for this range */ /* No supported scan types for this range */
flags = ND_ARS_PERSISTENT | ND_ARS_VOLATILE; flags = ND_ARS_PERSISTENT | ND_ARS_VOLATILE;
if ((ars_cap->status >> 16 & flags) == 0) if ((status >> 16 & flags) == 0)
return -ENOTTY; return -ENOTTY;
break; break;
case ND_CMD_ARS_START: case ND_CMD_ARS_START:
ars_start = buf;
/* ARS is in progress */ /* ARS is in progress */
if ((ars_start->status & 0xffff) == NFIT_ARS_START_BUSY) if ((status & 0xffff) == NFIT_ARS_START_BUSY)
return -EBUSY; return -EBUSY;
/* Command failed */ /* Command failed */
if (ars_start->status & 0xffff) if (status & 0xffff)
return -EIO; return -EIO;
break; break;
case ND_CMD_ARS_STATUS: case ND_CMD_ARS_STATUS:
ars_status = buf; ars_status = buf;
/* Command failed */ /* Command failed */
if (ars_status->status & 0xffff) if (status & 0xffff)
return -EIO; return -EIO;
/* Check extended status (Upper two bytes) */ /* Check extended status (Upper two bytes) */
if (ars_status->status == NFIT_ARS_STATUS_DONE) if (status == NFIT_ARS_STATUS_DONE)
return 0; return 0;
/* ARS is in progress */ /* ARS is in progress */
if (ars_status->status == NFIT_ARS_STATUS_BUSY) if (status == NFIT_ARS_STATUS_BUSY)
return -EBUSY; return -EBUSY;
/* No ARS performed for the current boot */ /* No ARS performed for the current boot */
if (ars_status->status == NFIT_ARS_STATUS_NONE) if (status == NFIT_ARS_STATUS_NONE)
return -EAGAIN; return -EAGAIN;
/* /*
@ -149,19 +145,19 @@ static int xlat_status(void *buf, unsigned int cmd)
* agent wants the scan to stop. If we didn't overflow * agent wants the scan to stop. If we didn't overflow
* then just continue with the returned results. * then just continue with the returned results.
*/ */
if (ars_status->status == NFIT_ARS_STATUS_INTR) { if (status == NFIT_ARS_STATUS_INTR) {
if (ars_status->flags & NFIT_ARS_F_OVERFLOW) if (ars_status->flags & NFIT_ARS_F_OVERFLOW)
return -ENOSPC; return -ENOSPC;
return 0; return 0;
} }
/* Unknown status */ /* Unknown status */
if (ars_status->status >> 16) if (status >> 16)
return -EIO; return -EIO;
break; break;
case ND_CMD_CLEAR_ERROR: case ND_CMD_CLEAR_ERROR:
clear_err = buf; clear_err = buf;
if (clear_err->status & 0xffff) if (status & 0xffff)
return -EIO; return -EIO;
if (!clear_err->cleared) if (!clear_err->cleared)
return -EIO; return -EIO;
@ -172,6 +168,9 @@ static int xlat_status(void *buf, unsigned int cmd)
break; break;
} }
/* all other non-zero status results in an error */
if (status)
return -EIO;
return 0; return 0;
} }
@ -186,10 +185,10 @@ static int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc,
struct nd_cmd_pkg *call_pkg = NULL; struct nd_cmd_pkg *call_pkg = NULL;
const char *cmd_name, *dimm_name; const char *cmd_name, *dimm_name;
unsigned long cmd_mask, dsm_mask; unsigned long cmd_mask, dsm_mask;
u32 offset, fw_status = 0;
acpi_handle handle; acpi_handle handle;
unsigned int func; unsigned int func;
const u8 *uuid; const u8 *uuid;
u32 offset;
int rc, i; int rc, i;
func = cmd; func = cmd;
@ -317,6 +316,15 @@ static int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc,
out_obj->buffer.pointer + offset, out_size); out_obj->buffer.pointer + offset, out_size);
offset += out_size; offset += out_size;
} }
/*
* Set fw_status for all the commands with a known format to be
* later interpreted by xlat_status().
*/
if (i >= 1 && ((cmd >= ND_CMD_ARS_CAP && cmd <= ND_CMD_CLEAR_ERROR)
|| (cmd >= ND_CMD_SMART && cmd <= ND_CMD_VENDOR)))
fw_status = *(u32 *) out_obj->buffer.pointer;
if (offset + in_buf.buffer.length < buf_len) { if (offset + in_buf.buffer.length < buf_len) {
if (i >= 1) { if (i >= 1) {
/* /*
@ -325,7 +333,7 @@ static int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc,
*/ */
rc = buf_len - offset - in_buf.buffer.length; rc = buf_len - offset - in_buf.buffer.length;
if (cmd_rc) if (cmd_rc)
*cmd_rc = xlat_status(buf, cmd); *cmd_rc = xlat_status(buf, cmd, fw_status);
} else { } else {
dev_err(dev, "%s:%s underrun cmd: %s buf_len: %d out_len: %d\n", dev_err(dev, "%s:%s underrun cmd: %s buf_len: %d out_len: %d\n",
__func__, dimm_name, cmd_name, buf_len, __func__, dimm_name, cmd_name, buf_len,
@ -335,7 +343,7 @@ static int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc,
} else { } else {
rc = 0; rc = 0;
if (cmd_rc) if (cmd_rc)
*cmd_rc = xlat_status(buf, cmd); *cmd_rc = xlat_status(buf, cmd, fw_status);
} }
out: out:

View file

@ -1475,7 +1475,11 @@ int _regmap_raw_write(struct regmap *map, unsigned int reg,
kfree(buf); kfree(buf);
} else if (ret != 0 && !map->cache_bypass && map->format.parse_val) { } else if (ret != 0 && !map->cache_bypass && map->format.parse_val) {
regcache_drop_region(map, reg, reg + 1); /* regcache_drop_region() takes lock that we already have,
* thus call map->cache_ops->drop() directly
*/
if (map->cache_ops && map->cache_ops->drop)
map->cache_ops->drop(map, reg, reg + 1);
} }
trace_regmap_hw_write_done(map, reg, val_len / map->format.val_bytes); trace_regmap_hw_write_done(map, reg, val_len / map->format.val_bytes);

View file

@ -1708,11 +1708,11 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
DRM_INFO("amdgpu: finishing device.\n"); DRM_INFO("amdgpu: finishing device.\n");
adev->shutdown = true; adev->shutdown = true;
drm_crtc_force_disable_all(adev->ddev);
/* evict vram memory */ /* evict vram memory */
amdgpu_bo_evict_vram(adev); amdgpu_bo_evict_vram(adev);
amdgpu_ib_pool_fini(adev); amdgpu_ib_pool_fini(adev);
amdgpu_fence_driver_fini(adev); amdgpu_fence_driver_fini(adev);
drm_crtc_force_disable_all(adev->ddev);
amdgpu_fbdev_fini(adev); amdgpu_fbdev_fini(adev);
r = amdgpu_fini(adev); r = amdgpu_fini(adev);
kfree(adev->ip_block_status); kfree(adev->ip_block_status);

View file

@ -175,6 +175,7 @@ struct nvkm_device_func {
void (*fini)(struct nvkm_device *, bool suspend); void (*fini)(struct nvkm_device *, bool suspend);
resource_size_t (*resource_addr)(struct nvkm_device *, unsigned bar); resource_size_t (*resource_addr)(struct nvkm_device *, unsigned bar);
resource_size_t (*resource_size)(struct nvkm_device *, unsigned bar); resource_size_t (*resource_size)(struct nvkm_device *, unsigned bar);
bool cpu_coherent;
}; };
struct nvkm_device_quirk { struct nvkm_device_quirk {

View file

@ -209,7 +209,8 @@ nouveau_bo_new(struct drm_device *dev, int size, int align,
nvbo->tile_flags = tile_flags; nvbo->tile_flags = tile_flags;
nvbo->bo.bdev = &drm->ttm.bdev; nvbo->bo.bdev = &drm->ttm.bdev;
nvbo->force_coherent = flags & TTM_PL_FLAG_UNCACHED; if (!nvxx_device(&drm->device)->func->cpu_coherent)
nvbo->force_coherent = flags & TTM_PL_FLAG_UNCACHED;
nvbo->page_shift = 12; nvbo->page_shift = 12;
if (drm->client.vm) { if (drm->client.vm) {

View file

@ -1614,6 +1614,7 @@ nvkm_device_pci_func = {
.fini = nvkm_device_pci_fini, .fini = nvkm_device_pci_fini,
.resource_addr = nvkm_device_pci_resource_addr, .resource_addr = nvkm_device_pci_resource_addr,
.resource_size = nvkm_device_pci_resource_size, .resource_size = nvkm_device_pci_resource_size,
.cpu_coherent = !IS_ENABLED(CONFIG_ARM),
}; };
int int

View file

@ -245,6 +245,7 @@ nvkm_device_tegra_func = {
.fini = nvkm_device_tegra_fini, .fini = nvkm_device_tegra_fini,
.resource_addr = nvkm_device_tegra_resource_addr, .resource_addr = nvkm_device_tegra_resource_addr,
.resource_size = nvkm_device_tegra_resource_size, .resource_size = nvkm_device_tegra_resource_size,
.cpu_coherent = false,
}; };
int int

View file

@ -37,7 +37,10 @@ nv04_fifo_dma_object_dtor(struct nvkm_fifo_chan *base, int cookie)
{ {
struct nv04_fifo_chan *chan = nv04_fifo_chan(base); struct nv04_fifo_chan *chan = nv04_fifo_chan(base);
struct nvkm_instmem *imem = chan->fifo->base.engine.subdev.device->imem; struct nvkm_instmem *imem = chan->fifo->base.engine.subdev.device->imem;
mutex_lock(&chan->fifo->base.engine.subdev.mutex);
nvkm_ramht_remove(imem->ramht, cookie); nvkm_ramht_remove(imem->ramht, cookie);
mutex_unlock(&chan->fifo->base.engine.subdev.mutex);
} }
static int static int

View file

@ -3015,6 +3015,12 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
if (rdev->pdev->device == 0x6811 && if (rdev->pdev->device == 0x6811 &&
rdev->pdev->revision == 0x81) rdev->pdev->revision == 0x81)
max_mclk = 120000; max_mclk = 120000;
/* limit sclk/mclk on Jet parts for stability */
if (rdev->pdev->device == 0x6665 &&
rdev->pdev->revision == 0xc3) {
max_sclk = 75000;
max_mclk = 80000;
}
if (rps->vce_active) { if (rps->vce_active) {
rps->evclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].evclk; rps->evclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].evclk;

View file

@ -122,7 +122,7 @@ int udl_handle_damage(struct udl_framebuffer *fb, int x, int y,
return 0; return 0;
cmd = urb->transfer_buffer; cmd = urb->transfer_buffer;
for (i = y; i < height ; i++) { for (i = y; i < y + height ; i++) {
const int line_offset = fb->base.pitches[0] * i; const int line_offset = fb->base.pitches[0] * i;
const int byte_offset = line_offset + (x * bpp); const int byte_offset = line_offset + (x * bpp);
const int dev_byte_offset = (fb->base.width * bpp * i) + (x * bpp); const int dev_byte_offset = (fb->base.width * bpp * i) + (x * bpp);

View file

@ -773,13 +773,6 @@ static int pch_i2c_probe(struct pci_dev *pdev,
/* Set the number of I2C channel instance */ /* Set the number of I2C channel instance */
adap_info->ch_num = id->driver_data; adap_info->ch_num = id->driver_data;
ret = request_irq(pdev->irq, pch_i2c_handler, IRQF_SHARED,
KBUILD_MODNAME, adap_info);
if (ret) {
pch_pci_err(pdev, "request_irq FAILED\n");
goto err_request_irq;
}
for (i = 0; i < adap_info->ch_num; i++) { for (i = 0; i < adap_info->ch_num; i++) {
pch_adap = &adap_info->pch_data[i].pch_adapter; pch_adap = &adap_info->pch_data[i].pch_adapter;
adap_info->pch_i2c_suspended = false; adap_info->pch_i2c_suspended = false;
@ -797,6 +790,17 @@ static int pch_i2c_probe(struct pci_dev *pdev,
pch_adap->dev.of_node = pdev->dev.of_node; pch_adap->dev.of_node = pdev->dev.of_node;
pch_adap->dev.parent = &pdev->dev; pch_adap->dev.parent = &pdev->dev;
}
ret = request_irq(pdev->irq, pch_i2c_handler, IRQF_SHARED,
KBUILD_MODNAME, adap_info);
if (ret) {
pch_pci_err(pdev, "request_irq FAILED\n");
goto err_request_irq;
}
for (i = 0; i < adap_info->ch_num; i++) {
pch_adap = &adap_info->pch_data[i].pch_adapter;
pch_i2c_init(&adap_info->pch_data[i]); pch_i2c_init(&adap_info->pch_data[i]);

View file

@ -1599,7 +1599,8 @@ static int qup_i2c_pm_resume_runtime(struct device *device)
#ifdef CONFIG_PM_SLEEP #ifdef CONFIG_PM_SLEEP
static int qup_i2c_suspend(struct device *device) static int qup_i2c_suspend(struct device *device)
{ {
qup_i2c_pm_suspend_runtime(device); if (!pm_runtime_suspended(device))
return qup_i2c_pm_suspend_runtime(device);
return 0; return 0;
} }

View file

@ -164,7 +164,7 @@ static int pca954x_select_chan(struct i2c_mux_core *muxc, u32 chan)
/* Only select the channel if its different from the last channel */ /* Only select the channel if its different from the last channel */
if (data->last_chan != regval) { if (data->last_chan != regval) {
ret = pca954x_reg_write(muxc->parent, client, regval); ret = pca954x_reg_write(muxc->parent, client, regval);
data->last_chan = regval; data->last_chan = ret ? 0 : regval;
} }
return ret; return ret;

View file

@ -946,6 +946,12 @@ static const struct input_device_id joydev_ids[] = {
.evbit = { BIT_MASK(EV_ABS) }, .evbit = { BIT_MASK(EV_ABS) },
.absbit = { BIT_MASK(ABS_X) }, .absbit = { BIT_MASK(ABS_X) },
}, },
{
.flags = INPUT_DEVICE_ID_MATCH_EVBIT |
INPUT_DEVICE_ID_MATCH_ABSBIT,
.evbit = { BIT_MASK(EV_ABS) },
.absbit = { BIT_MASK(ABS_Z) },
},
{ {
.flags = INPUT_DEVICE_ID_MATCH_EVBIT | .flags = INPUT_DEVICE_ID_MATCH_EVBIT |
INPUT_DEVICE_ID_MATCH_ABSBIT, INPUT_DEVICE_ID_MATCH_ABSBIT,

View file

@ -390,9 +390,10 @@ static void silead_ts_read_props(struct i2c_client *client)
data->max_fingers = 5; /* Most devices handle up-to 5 fingers */ data->max_fingers = 5; /* Most devices handle up-to 5 fingers */
} }
error = device_property_read_string(dev, "touchscreen-fw-name", &str); error = device_property_read_string(dev, "firmware-name", &str);
if (!error) if (!error)
snprintf(data->fw_name, sizeof(data->fw_name), "%s", str); snprintf(data->fw_name, sizeof(data->fw_name),
"silead/%s", str);
else else
dev_dbg(dev, "Firmware file name read error. Using default."); dev_dbg(dev, "Firmware file name read error. Using default.");
} }
@ -410,14 +411,14 @@ static int silead_ts_set_default_fw_name(struct silead_ts_data *data,
if (!acpi_id) if (!acpi_id)
return -ENODEV; return -ENODEV;
snprintf(data->fw_name, sizeof(data->fw_name), "%s.fw", snprintf(data->fw_name, sizeof(data->fw_name),
acpi_id->id); "silead/%s.fw", acpi_id->id);
for (i = 0; i < strlen(data->fw_name); i++) for (i = 0; i < strlen(data->fw_name); i++)
data->fw_name[i] = tolower(data->fw_name[i]); data->fw_name[i] = tolower(data->fw_name[i]);
} else { } else {
snprintf(data->fw_name, sizeof(data->fw_name), "%s.fw", snprintf(data->fw_name, sizeof(data->fw_name),
id->name); "silead/%s.fw", id->name);
} }
return 0; return 0;
@ -426,7 +427,8 @@ static int silead_ts_set_default_fw_name(struct silead_ts_data *data,
static int silead_ts_set_default_fw_name(struct silead_ts_data *data, static int silead_ts_set_default_fw_name(struct silead_ts_data *data,
const struct i2c_device_id *id) const struct i2c_device_id *id)
{ {
snprintf(data->fw_name, sizeof(data->fw_name), "%s.fw", id->name); snprintf(data->fw_name, sizeof(data->fw_name),
"silead/%s.fw", id->name);
return 0; return 0;
} }
#endif #endif

View file

@ -548,7 +548,7 @@ static int gic_starting_cpu(unsigned int cpu)
static u16 gic_compute_target_list(int *base_cpu, const struct cpumask *mask, static u16 gic_compute_target_list(int *base_cpu, const struct cpumask *mask,
unsigned long cluster_id) unsigned long cluster_id)
{ {
int cpu = *base_cpu; int next_cpu, cpu = *base_cpu;
unsigned long mpidr = cpu_logical_map(cpu); unsigned long mpidr = cpu_logical_map(cpu);
u16 tlist = 0; u16 tlist = 0;
@ -562,9 +562,10 @@ static u16 gic_compute_target_list(int *base_cpu, const struct cpumask *mask,
tlist |= 1 << (mpidr & 0xf); tlist |= 1 << (mpidr & 0xf);
cpu = cpumask_next(cpu, mask); next_cpu = cpumask_next(cpu, mask);
if (cpu >= nr_cpu_ids) if (next_cpu >= nr_cpu_ids)
goto out; goto out;
cpu = next_cpu;
mpidr = cpu_logical_map(cpu); mpidr = cpu_logical_map(cpu);

View file

@ -638,27 +638,6 @@ static int gic_local_irq_domain_map(struct irq_domain *d, unsigned int virq,
if (!gic_local_irq_is_routable(intr)) if (!gic_local_irq_is_routable(intr))
return -EPERM; return -EPERM;
/*
* HACK: These are all really percpu interrupts, but the rest
* of the MIPS kernel code does not use the percpu IRQ API for
* the CP0 timer and performance counter interrupts.
*/
switch (intr) {
case GIC_LOCAL_INT_TIMER:
case GIC_LOCAL_INT_PERFCTR:
case GIC_LOCAL_INT_FDC:
irq_set_chip_and_handler(virq,
&gic_all_vpes_local_irq_controller,
handle_percpu_irq);
break;
default:
irq_set_chip_and_handler(virq,
&gic_local_irq_controller,
handle_percpu_devid_irq);
irq_set_percpu_devid(virq);
break;
}
spin_lock_irqsave(&gic_lock, flags); spin_lock_irqsave(&gic_lock, flags);
for (i = 0; i < gic_vpes; i++) { for (i = 0; i < gic_vpes; i++) {
u32 val = GIC_MAP_TO_PIN_MSK | gic_cpu_pin; u32 val = GIC_MAP_TO_PIN_MSK | gic_cpu_pin;
@ -724,16 +703,42 @@ static int gic_shared_irq_domain_map(struct irq_domain *d, unsigned int virq,
return 0; return 0;
} }
static int gic_irq_domain_map(struct irq_domain *d, unsigned int virq, static int gic_setup_dev_chip(struct irq_domain *d, unsigned int virq,
irq_hw_number_t hw) unsigned int hwirq)
{ {
if (GIC_HWIRQ_TO_LOCAL(hw) < GIC_NUM_LOCAL_INTRS) struct irq_chip *chip;
return gic_local_irq_domain_map(d, virq, hw); int err;
irq_set_chip_and_handler(virq, &gic_level_irq_controller, if (hwirq >= GIC_SHARED_HWIRQ_BASE) {
handle_level_irq); err = irq_domain_set_hwirq_and_chip(d, virq, hwirq,
&gic_level_irq_controller,
NULL);
} else {
switch (GIC_HWIRQ_TO_LOCAL(hwirq)) {
case GIC_LOCAL_INT_TIMER:
case GIC_LOCAL_INT_PERFCTR:
case GIC_LOCAL_INT_FDC:
/*
* HACK: These are all really percpu interrupts, but
* the rest of the MIPS kernel code does not use the
* percpu IRQ API for them.
*/
chip = &gic_all_vpes_local_irq_controller;
irq_set_handler(virq, handle_percpu_irq);
break;
return gic_shared_irq_domain_map(d, virq, hw, 0); default:
chip = &gic_local_irq_controller;
irq_set_handler(virq, handle_percpu_devid_irq);
irq_set_percpu_devid(virq);
break;
}
err = irq_domain_set_hwirq_and_chip(d, virq, hwirq,
chip, NULL);
}
return err;
} }
static int gic_irq_domain_alloc(struct irq_domain *d, unsigned int virq, static int gic_irq_domain_alloc(struct irq_domain *d, unsigned int virq,
@ -744,15 +749,12 @@ static int gic_irq_domain_alloc(struct irq_domain *d, unsigned int virq,
int cpu, ret, i; int cpu, ret, i;
if (spec->type == GIC_DEVICE) { if (spec->type == GIC_DEVICE) {
/* verify that it doesn't conflict with an IPI irq */ /* verify that shared irqs don't conflict with an IPI irq */
if (test_bit(spec->hwirq, ipi_resrv)) if ((spec->hwirq >= GIC_SHARED_HWIRQ_BASE) &&
test_bit(GIC_HWIRQ_TO_SHARED(spec->hwirq), ipi_resrv))
return -EBUSY; return -EBUSY;
hwirq = GIC_SHARED_TO_HWIRQ(spec->hwirq); return gic_setup_dev_chip(d, virq, spec->hwirq);
return irq_domain_set_hwirq_and_chip(d, virq, hwirq,
&gic_level_irq_controller,
NULL);
} else { } else {
base_hwirq = find_first_bit(ipi_resrv, gic_shared_intrs); base_hwirq = find_first_bit(ipi_resrv, gic_shared_intrs);
if (base_hwirq == gic_shared_intrs) { if (base_hwirq == gic_shared_intrs) {
@ -821,7 +823,6 @@ int gic_irq_domain_match(struct irq_domain *d, struct device_node *node,
} }
static const struct irq_domain_ops gic_irq_domain_ops = { static const struct irq_domain_ops gic_irq_domain_ops = {
.map = gic_irq_domain_map,
.alloc = gic_irq_domain_alloc, .alloc = gic_irq_domain_alloc,
.free = gic_irq_domain_free, .free = gic_irq_domain_free,
.match = gic_irq_domain_match, .match = gic_irq_domain_match,
@ -852,29 +853,20 @@ static int gic_dev_domain_alloc(struct irq_domain *d, unsigned int virq,
struct irq_fwspec *fwspec = arg; struct irq_fwspec *fwspec = arg;
struct gic_irq_spec spec = { struct gic_irq_spec spec = {
.type = GIC_DEVICE, .type = GIC_DEVICE,
.hwirq = fwspec->param[1],
}; };
int i, ret; int i, ret;
bool is_shared = fwspec->param[0] == GIC_SHARED;
if (is_shared) { if (fwspec->param[0] == GIC_SHARED)
ret = irq_domain_alloc_irqs_parent(d, virq, nr_irqs, &spec); spec.hwirq = GIC_SHARED_TO_HWIRQ(fwspec->param[1]);
if (ret) else
return ret; spec.hwirq = GIC_LOCAL_TO_HWIRQ(fwspec->param[1]);
}
ret = irq_domain_alloc_irqs_parent(d, virq, nr_irqs, &spec);
if (ret)
return ret;
for (i = 0; i < nr_irqs; i++) { for (i = 0; i < nr_irqs; i++) {
irq_hw_number_t hwirq; ret = gic_setup_dev_chip(d, virq + i, spec.hwirq + i);
if (is_shared)
hwirq = GIC_SHARED_TO_HWIRQ(spec.hwirq + i);
else
hwirq = GIC_LOCAL_TO_HWIRQ(spec.hwirq + i);
ret = irq_domain_set_hwirq_and_chip(d, virq + i,
hwirq,
&gic_level_irq_controller,
NULL);
if (ret) if (ret)
goto error; goto error;
} }
@ -896,7 +888,10 @@ void gic_dev_domain_free(struct irq_domain *d, unsigned int virq,
static void gic_dev_domain_activate(struct irq_domain *domain, static void gic_dev_domain_activate(struct irq_domain *domain,
struct irq_data *d) struct irq_data *d)
{ {
gic_shared_irq_domain_map(domain, d->irq, d->hwirq, 0); if (GIC_HWIRQ_TO_LOCAL(d->hwirq) < GIC_NUM_LOCAL_INTRS)
gic_local_irq_domain_map(domain, d->irq, d->hwirq);
else
gic_shared_irq_domain_map(domain, d->irq, d->hwirq, 0);
} }
static struct irq_domain_ops gic_dev_domain_ops = { static struct irq_domain_ops gic_dev_domain_ops = {

View file

@ -1112,11 +1112,12 @@ static void dw_mci_setup_bus(struct dw_mci_slot *slot, bool force_clkinit)
div = (host->bus_hz != clock) ? DIV_ROUND_UP(div, 2) : 0; div = (host->bus_hz != clock) ? DIV_ROUND_UP(div, 2) : 0;
dev_info(&slot->mmc->class_dev, if (clock != slot->__clk_old || force_clkinit)
"Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ div = %d)\n", dev_info(&slot->mmc->class_dev,
slot->id, host->bus_hz, clock, "Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ div = %d)\n",
div ? ((host->bus_hz / div) >> 1) : slot->id, host->bus_hz, clock,
host->bus_hz, div); div ? ((host->bus_hz / div) >> 1) :
host->bus_hz, div);
/* disable clock */ /* disable clock */
mci_writel(host, CLKENA, 0); mci_writel(host, CLKENA, 0);
@ -1139,6 +1140,9 @@ static void dw_mci_setup_bus(struct dw_mci_slot *slot, bool force_clkinit)
/* inform CIU */ /* inform CIU */
mci_send_cmd(slot, sdmmc_cmd_bits, 0); mci_send_cmd(slot, sdmmc_cmd_bits, 0);
/* keep the last clock value that was requested from core */
slot->__clk_old = clock;
} }
host->current_speed = clock; host->current_speed = clock;

View file

@ -249,6 +249,8 @@ extern int dw_mci_resume(struct dw_mci *host);
* @queue_node: List node for placing this node in the @queue list of * @queue_node: List node for placing this node in the @queue list of
* &struct dw_mci. * &struct dw_mci.
* @clock: Clock rate configured by set_ios(). Protected by host->lock. * @clock: Clock rate configured by set_ios(). Protected by host->lock.
* @__clk_old: The last clock value that was requested from core.
* Keeping track of this helps us to avoid spamming the console.
* @flags: Random state bits associated with the slot. * @flags: Random state bits associated with the slot.
* @id: Number of this slot. * @id: Number of this slot.
* @sdio_id: Number of this slot in the SDIO interrupt registers. * @sdio_id: Number of this slot in the SDIO interrupt registers.
@ -263,6 +265,7 @@ struct dw_mci_slot {
struct list_head queue_node; struct list_head queue_node;
unsigned int clock; unsigned int clock;
unsigned int __clk_old;
unsigned long flags; unsigned long flags;
#define DW_MMC_CARD_PRESENT 0 #define DW_MMC_CARD_PRESENT 0

View file

@ -240,6 +240,9 @@ static void nand_davinci_hwctl_4bit(struct mtd_info *mtd, int mode)
unsigned long flags; unsigned long flags;
u32 val; u32 val;
/* Reset ECC hardware */
davinci_nand_readl(info, NAND_4BIT_ECC1_OFFSET);
spin_lock_irqsave(&davinci_nand_lock, flags); spin_lock_irqsave(&davinci_nand_lock, flags);
/* Start 4-bit ECC calculation for read/write */ /* Start 4-bit ECC calculation for read/write */

View file

@ -366,7 +366,8 @@ int mtk_ecc_encode(struct mtk_ecc *ecc, struct mtk_ecc_config *config,
u8 *data, u32 bytes) u8 *data, u32 bytes)
{ {
dma_addr_t addr; dma_addr_t addr;
u32 *p, len, i; u8 *p;
u32 len, i, val;
int ret = 0; int ret = 0;
addr = dma_map_single(ecc->dev, data, bytes, DMA_TO_DEVICE); addr = dma_map_single(ecc->dev, data, bytes, DMA_TO_DEVICE);
@ -392,11 +393,14 @@ int mtk_ecc_encode(struct mtk_ecc *ecc, struct mtk_ecc_config *config,
/* Program ECC bytes to OOB: per sector oob = FDM + ECC + SPARE */ /* Program ECC bytes to OOB: per sector oob = FDM + ECC + SPARE */
len = (config->strength * ECC_PARITY_BITS + 7) >> 3; len = (config->strength * ECC_PARITY_BITS + 7) >> 3;
p = (u32 *)(data + bytes); p = data + bytes;
/* write the parity bytes generated by the ECC back to the OOB region */ /* write the parity bytes generated by the ECC back to the OOB region */
for (i = 0; i < len; i++) for (i = 0; i < len; i++) {
p[i] = readl(ecc->regs + ECC_ENCPAR(i)); if ((i % 4) == 0)
val = readl(ecc->regs + ECC_ENCPAR(i / 4));
p[i] = (val >> ((i % 4) * 8)) & 0xff;
}
timeout: timeout:
dma_unmap_single(ecc->dev, addr, bytes, DMA_TO_DEVICE); dma_unmap_single(ecc->dev, addr, bytes, DMA_TO_DEVICE);

View file

@ -93,6 +93,9 @@
#define NFI_FSM_MASK (0xf << 16) #define NFI_FSM_MASK (0xf << 16)
#define NFI_ADDRCNTR (0x70) #define NFI_ADDRCNTR (0x70)
#define CNTR_MASK GENMASK(16, 12) #define CNTR_MASK GENMASK(16, 12)
#define ADDRCNTR_SEC_SHIFT (12)
#define ADDRCNTR_SEC(val) \
(((val) & CNTR_MASK) >> ADDRCNTR_SEC_SHIFT)
#define NFI_STRADDR (0x80) #define NFI_STRADDR (0x80)
#define NFI_BYTELEN (0x84) #define NFI_BYTELEN (0x84)
#define NFI_CSEL (0x90) #define NFI_CSEL (0x90)
@ -699,7 +702,7 @@ static int mtk_nfc_do_write_page(struct mtd_info *mtd, struct nand_chip *chip,
} }
ret = readl_poll_timeout_atomic(nfc->regs + NFI_ADDRCNTR, reg, ret = readl_poll_timeout_atomic(nfc->regs + NFI_ADDRCNTR, reg,
(reg & CNTR_MASK) >= chip->ecc.steps, ADDRCNTR_SEC(reg) >= chip->ecc.steps,
10, MTK_TIMEOUT); 10, MTK_TIMEOUT);
if (ret) if (ret)
dev_err(dev, "hwecc write timeout\n"); dev_err(dev, "hwecc write timeout\n");
@ -902,7 +905,7 @@ static int mtk_nfc_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
dev_warn(nfc->dev, "read ahb/dma done timeout\n"); dev_warn(nfc->dev, "read ahb/dma done timeout\n");
rc = readl_poll_timeout_atomic(nfc->regs + NFI_BYTELEN, reg, rc = readl_poll_timeout_atomic(nfc->regs + NFI_BYTELEN, reg,
(reg & CNTR_MASK) >= sectors, 10, ADDRCNTR_SEC(reg) >= sectors, 10,
MTK_TIMEOUT); MTK_TIMEOUT);
if (rc < 0) { if (rc < 0) {
dev_err(nfc->dev, "subpage done timeout\n"); dev_err(nfc->dev, "subpage done timeout\n");

View file

@ -943,7 +943,7 @@ static int mxc_v2_ooblayout_free(struct mtd_info *mtd, int section,
struct nand_chip *nand_chip = mtd_to_nand(mtd); struct nand_chip *nand_chip = mtd_to_nand(mtd);
int stepsize = nand_chip->ecc.bytes == 9 ? 16 : 26; int stepsize = nand_chip->ecc.bytes == 9 ? 16 : 26;
if (section > nand_chip->ecc.steps) if (section >= nand_chip->ecc.steps)
return -ERANGE; return -ERANGE;
if (!section) { if (!section) {

View file

@ -2169,7 +2169,7 @@ scan_tail:
return 0; return 0;
return_error: return_error:
if (info->dma) if (!IS_ERR_OR_NULL(info->dma))
dma_release_channel(info->dma); dma_release_channel(info->dma);
if (nand_chip->ecc.priv) { if (nand_chip->ecc.priv) {
nand_bch_free(nand_chip->ecc.priv); nand_bch_free(nand_chip->ecc.priv);

View file

@ -21,6 +21,7 @@
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/netdevice.h> #include <linux/netdevice.h>
#include <linux/if_arp.h> #include <linux/if_arp.h>
#include <linux/workqueue.h>
#include <linux/can.h> #include <linux/can.h>
#include <linux/can/dev.h> #include <linux/can/dev.h>
#include <linux/can/skb.h> #include <linux/can/skb.h>
@ -501,9 +502,8 @@ EXPORT_SYMBOL_GPL(can_free_echo_skb);
/* /*
* CAN device restart for bus-off recovery * CAN device restart for bus-off recovery
*/ */
static void can_restart(unsigned long data) static void can_restart(struct net_device *dev)
{ {
struct net_device *dev = (struct net_device *)data;
struct can_priv *priv = netdev_priv(dev); struct can_priv *priv = netdev_priv(dev);
struct net_device_stats *stats = &dev->stats; struct net_device_stats *stats = &dev->stats;
struct sk_buff *skb; struct sk_buff *skb;
@ -543,6 +543,14 @@ restart:
netdev_err(dev, "Error %d during restart", err); netdev_err(dev, "Error %d during restart", err);
} }
static void can_restart_work(struct work_struct *work)
{
struct delayed_work *dwork = to_delayed_work(work);
struct can_priv *priv = container_of(dwork, struct can_priv, restart_work);
can_restart(priv->dev);
}
int can_restart_now(struct net_device *dev) int can_restart_now(struct net_device *dev)
{ {
struct can_priv *priv = netdev_priv(dev); struct can_priv *priv = netdev_priv(dev);
@ -556,8 +564,8 @@ int can_restart_now(struct net_device *dev)
if (priv->state != CAN_STATE_BUS_OFF) if (priv->state != CAN_STATE_BUS_OFF)
return -EBUSY; return -EBUSY;
/* Runs as soon as possible in the timer context */ cancel_delayed_work_sync(&priv->restart_work);
mod_timer(&priv->restart_timer, jiffies); can_restart(dev);
return 0; return 0;
} }
@ -578,8 +586,8 @@ void can_bus_off(struct net_device *dev)
netif_carrier_off(dev); netif_carrier_off(dev);
if (priv->restart_ms) if (priv->restart_ms)
mod_timer(&priv->restart_timer, schedule_delayed_work(&priv->restart_work,
jiffies + (priv->restart_ms * HZ) / 1000); msecs_to_jiffies(priv->restart_ms));
} }
EXPORT_SYMBOL_GPL(can_bus_off); EXPORT_SYMBOL_GPL(can_bus_off);
@ -688,6 +696,7 @@ struct net_device *alloc_candev(int sizeof_priv, unsigned int echo_skb_max)
return NULL; return NULL;
priv = netdev_priv(dev); priv = netdev_priv(dev);
priv->dev = dev;
if (echo_skb_max) { if (echo_skb_max) {
priv->echo_skb_max = echo_skb_max; priv->echo_skb_max = echo_skb_max;
@ -697,7 +706,7 @@ struct net_device *alloc_candev(int sizeof_priv, unsigned int echo_skb_max)
priv->state = CAN_STATE_STOPPED; priv->state = CAN_STATE_STOPPED;
init_timer(&priv->restart_timer); INIT_DELAYED_WORK(&priv->restart_work, can_restart_work);
return dev; return dev;
} }
@ -778,8 +787,6 @@ int open_candev(struct net_device *dev)
if (!netif_carrier_ok(dev)) if (!netif_carrier_ok(dev))
netif_carrier_on(dev); netif_carrier_on(dev);
setup_timer(&priv->restart_timer, can_restart, (unsigned long)dev);
return 0; return 0;
} }
EXPORT_SYMBOL_GPL(open_candev); EXPORT_SYMBOL_GPL(open_candev);
@ -794,7 +801,7 @@ void close_candev(struct net_device *dev)
{ {
struct can_priv *priv = netdev_priv(dev); struct can_priv *priv = netdev_priv(dev);
del_timer_sync(&priv->restart_timer); cancel_delayed_work_sync(&priv->restart_work);
can_flush_echo_skb(dev); can_flush_echo_skb(dev);
} }
EXPORT_SYMBOL_GPL(close_candev); EXPORT_SYMBOL_GPL(close_candev);

View file

@ -18134,14 +18134,14 @@ static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
rtnl_lock(); rtnl_lock();
/* We needn't recover from permanent error */
if (state == pci_channel_io_frozen)
tp->pcierr_recovery = true;
/* We probably don't have netdev yet */ /* We probably don't have netdev yet */
if (!netdev || !netif_running(netdev)) if (!netdev || !netif_running(netdev))
goto done; goto done;
/* We needn't recover from permanent error */
if (state == pci_channel_io_frozen)
tp->pcierr_recovery = true;
tg3_phy_stop(tp); tg3_phy_stop(tp);
tg3_netif_stop(tp); tg3_netif_stop(tp);
@ -18238,7 +18238,7 @@ static void tg3_io_resume(struct pci_dev *pdev)
rtnl_lock(); rtnl_lock();
if (!netif_running(netdev)) if (!netdev || !netif_running(netdev))
goto done; goto done;
tg3_full_lock(tp, 0); tg3_full_lock(tp, 0);

View file

@ -89,10 +89,10 @@ static struct platform_device_id fec_devtype[] = {
.driver_data = 0, .driver_data = 0,
}, { }, {
.name = "imx25-fec", .name = "imx25-fec",
.driver_data = FEC_QUIRK_USE_GASKET | FEC_QUIRK_HAS_RACC, .driver_data = FEC_QUIRK_USE_GASKET,
}, { }, {
.name = "imx27-fec", .name = "imx27-fec",
.driver_data = FEC_QUIRK_HAS_RACC, .driver_data = 0,
}, { }, {
.name = "imx28-fec", .name = "imx28-fec",
.driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME | .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME |
@ -180,6 +180,7 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
/* FEC receive acceleration */ /* FEC receive acceleration */
#define FEC_RACC_IPDIS (1 << 1) #define FEC_RACC_IPDIS (1 << 1)
#define FEC_RACC_PRODIS (1 << 2) #define FEC_RACC_PRODIS (1 << 2)
#define FEC_RACC_SHIFT16 BIT(7)
#define FEC_RACC_OPTIONS (FEC_RACC_IPDIS | FEC_RACC_PRODIS) #define FEC_RACC_OPTIONS (FEC_RACC_IPDIS | FEC_RACC_PRODIS)
/* /*
@ -945,9 +946,11 @@ fec_restart(struct net_device *ndev)
#if !defined(CONFIG_M5272) #if !defined(CONFIG_M5272)
if (fep->quirks & FEC_QUIRK_HAS_RACC) { if (fep->quirks & FEC_QUIRK_HAS_RACC) {
/* set RX checksum */
val = readl(fep->hwp + FEC_RACC); val = readl(fep->hwp + FEC_RACC);
/* align IP header */
val |= FEC_RACC_SHIFT16;
if (fep->csum_flags & FLAG_RX_CSUM_ENABLED) if (fep->csum_flags & FLAG_RX_CSUM_ENABLED)
/* set RX checksum */
val |= FEC_RACC_OPTIONS; val |= FEC_RACC_OPTIONS;
else else
val &= ~FEC_RACC_OPTIONS; val &= ~FEC_RACC_OPTIONS;
@ -1428,6 +1431,12 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
prefetch(skb->data - NET_IP_ALIGN); prefetch(skb->data - NET_IP_ALIGN);
skb_put(skb, pkt_len - 4); skb_put(skb, pkt_len - 4);
data = skb->data; data = skb->data;
#if !defined(CONFIG_M5272)
if (fep->quirks & FEC_QUIRK_HAS_RACC)
data = skb_pull_inline(skb, 2);
#endif
if (!is_copybreak && need_swap) if (!is_copybreak && need_swap)
swap_buffer(data, pkt_len); swap_buffer(data, pkt_len);

View file

@ -99,8 +99,11 @@ static struct nvdimm_map *alloc_nvdimm_map(struct device *dev,
nvdimm_map->size = size; nvdimm_map->size = size;
kref_init(&nvdimm_map->kref); kref_init(&nvdimm_map->kref);
if (!request_mem_region(offset, size, dev_name(&nvdimm_bus->dev))) if (!request_mem_region(offset, size, dev_name(&nvdimm_bus->dev))) {
dev_err(&nvdimm_bus->dev, "failed to request %pa + %zd for %s\n",
&offset, size, dev_name(dev));
goto err_request_region; goto err_request_region;
}
if (flags) if (flags)
nvdimm_map->mem = memremap(offset, size, flags); nvdimm_map->mem = memremap(offset, size, flags);
@ -171,6 +174,9 @@ void *devm_nvdimm_memremap(struct device *dev, resource_size_t offset,
kref_get(&nvdimm_map->kref); kref_get(&nvdimm_map->kref);
nvdimm_bus_unlock(dev); nvdimm_bus_unlock(dev);
if (!nvdimm_map)
return NULL;
if (devm_add_action_or_reset(dev, nvdimm_map_put, nvdimm_map)) if (devm_add_action_or_reset(dev, nvdimm_map_put, nvdimm_map))
return NULL; return NULL;

View file

@ -52,10 +52,28 @@ struct nvdimm_drvdata {
struct nd_region_data { struct nd_region_data {
int ns_count; int ns_count;
int ns_active; int ns_active;
unsigned int flush_mask; unsigned int hints_shift;
void __iomem *flush_wpq[0][0]; void __iomem *flush_wpq[0];
}; };
static inline void __iomem *ndrd_get_flush_wpq(struct nd_region_data *ndrd,
int dimm, int hint)
{
unsigned int num = 1 << ndrd->hints_shift;
unsigned int mask = num - 1;
return ndrd->flush_wpq[dimm * num + (hint & mask)];
}
static inline void ndrd_set_flush_wpq(struct nd_region_data *ndrd, int dimm,
int hint, void __iomem *flush)
{
unsigned int num = 1 << ndrd->hints_shift;
unsigned int mask = num - 1;
ndrd->flush_wpq[dimm * num + (hint & mask)] = flush;
}
static inline struct nd_namespace_index *to_namespace_index( static inline struct nd_namespace_index *to_namespace_index(
struct nvdimm_drvdata *ndd, int i) struct nvdimm_drvdata *ndd, int i)
{ {

View file

@ -38,7 +38,7 @@ static int nvdimm_map_flush(struct device *dev, struct nvdimm *nvdimm, int dimm,
dev_dbg(dev, "%s: map %d flush address%s\n", nvdimm_name(nvdimm), dev_dbg(dev, "%s: map %d flush address%s\n", nvdimm_name(nvdimm),
nvdimm->num_flush, nvdimm->num_flush == 1 ? "" : "es"); nvdimm->num_flush, nvdimm->num_flush == 1 ? "" : "es");
for (i = 0; i < nvdimm->num_flush; i++) { for (i = 0; i < (1 << ndrd->hints_shift); i++) {
struct resource *res = &nvdimm->flush_wpq[i]; struct resource *res = &nvdimm->flush_wpq[i];
unsigned long pfn = PHYS_PFN(res->start); unsigned long pfn = PHYS_PFN(res->start);
void __iomem *flush_page; void __iomem *flush_page;
@ -54,14 +54,15 @@ static int nvdimm_map_flush(struct device *dev, struct nvdimm *nvdimm, int dimm,
if (j < i) if (j < i)
flush_page = (void __iomem *) ((unsigned long) flush_page = (void __iomem *) ((unsigned long)
ndrd->flush_wpq[dimm][j] & PAGE_MASK); ndrd_get_flush_wpq(ndrd, dimm, j)
& PAGE_MASK);
else else
flush_page = devm_nvdimm_ioremap(dev, flush_page = devm_nvdimm_ioremap(dev,
PHYS_PFN(pfn), PAGE_SIZE); PFN_PHYS(pfn), PAGE_SIZE);
if (!flush_page) if (!flush_page)
return -ENXIO; return -ENXIO;
ndrd->flush_wpq[dimm][i] = flush_page ndrd_set_flush_wpq(ndrd, dimm, i, flush_page
+ (res->start & ~PAGE_MASK); + (res->start & ~PAGE_MASK));
} }
return 0; return 0;
@ -93,7 +94,10 @@ int nd_region_activate(struct nd_region *nd_region)
return -ENOMEM; return -ENOMEM;
dev_set_drvdata(dev, ndrd); dev_set_drvdata(dev, ndrd);
ndrd->flush_mask = (1 << ilog2(num_flush)) - 1; if (!num_flush)
return 0;
ndrd->hints_shift = ilog2(num_flush);
for (i = 0; i < nd_region->ndr_mappings; i++) { for (i = 0; i < nd_region->ndr_mappings; i++) {
struct nd_mapping *nd_mapping = &nd_region->mapping[i]; struct nd_mapping *nd_mapping = &nd_region->mapping[i];
struct nvdimm *nvdimm = nd_mapping->nvdimm; struct nvdimm *nvdimm = nd_mapping->nvdimm;
@ -900,8 +904,8 @@ void nvdimm_flush(struct nd_region *nd_region)
*/ */
wmb(); wmb();
for (i = 0; i < nd_region->ndr_mappings; i++) for (i = 0; i < nd_region->ndr_mappings; i++)
if (ndrd->flush_wpq[i][0]) if (ndrd_get_flush_wpq(ndrd, i, 0))
writeq(1, ndrd->flush_wpq[i][idx & ndrd->flush_mask]); writeq(1, ndrd_get_flush_wpq(ndrd, i, idx));
wmb(); wmb();
} }
EXPORT_SYMBOL_GPL(nvdimm_flush); EXPORT_SYMBOL_GPL(nvdimm_flush);
@ -925,7 +929,7 @@ int nvdimm_has_flush(struct nd_region *nd_region)
for (i = 0; i < nd_region->ndr_mappings; i++) for (i = 0; i < nd_region->ndr_mappings; i++)
/* flush hints present, flushing required */ /* flush hints present, flushing required */
if (ndrd->flush_wpq[i][0]) if (ndrd_get_flush_wpq(ndrd, i, 0))
return 1; return 1;
/* /*

View file

@ -561,7 +561,6 @@ static int nvme_rdma_init_queue(struct nvme_rdma_ctrl *ctrl,
queue = &ctrl->queues[idx]; queue = &ctrl->queues[idx];
queue->ctrl = ctrl; queue->ctrl = ctrl;
queue->flags = 0;
init_completion(&queue->cm_done); init_completion(&queue->cm_done);
if (idx > 0) if (idx > 0)
@ -595,6 +594,7 @@ static int nvme_rdma_init_queue(struct nvme_rdma_ctrl *ctrl,
goto out_destroy_cm_id; goto out_destroy_cm_id;
} }
clear_bit(NVME_RDMA_Q_DELETING, &queue->flags);
set_bit(NVME_RDMA_Q_CONNECTED, &queue->flags); set_bit(NVME_RDMA_Q_CONNECTED, &queue->flags);
return 0; return 0;

View file

@ -486,6 +486,8 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
else else
shost->dma_boundary = 0xffffffff; shost->dma_boundary = 0xffffffff;
shost->use_blk_mq = scsi_use_blk_mq;
device_initialize(&shost->shost_gendev); device_initialize(&shost->shost_gendev);
dev_set_name(&shost->shost_gendev, "host%d", shost->host_no); dev_set_name(&shost->shost_gendev, "host%d", shost->host_no);
shost->shost_gendev.bus = &scsi_bus_type; shost->shost_gendev.bus = &scsi_bus_type;

View file

@ -1160,7 +1160,6 @@ bool scsi_use_blk_mq = true;
bool scsi_use_blk_mq = false; bool scsi_use_blk_mq = false;
#endif #endif
module_param_named(use_blk_mq, scsi_use_blk_mq, bool, S_IWUSR | S_IRUGO); module_param_named(use_blk_mq, scsi_use_blk_mq, bool, S_IWUSR | S_IRUGO);
EXPORT_SYMBOL_GPL(scsi_use_blk_mq);
static int __init init_scsi(void) static int __init init_scsi(void)
{ {

View file

@ -29,6 +29,7 @@ extern int scsi_init_hosts(void);
extern void scsi_exit_hosts(void); extern void scsi_exit_hosts(void);
/* scsi.c */ /* scsi.c */
extern bool scsi_use_blk_mq;
extern int scsi_setup_command_freelist(struct Scsi_Host *shost); extern int scsi_setup_command_freelist(struct Scsi_Host *shost);
extern void scsi_destroy_command_freelist(struct Scsi_Host *shost); extern void scsi_destroy_command_freelist(struct Scsi_Host *shost);
#ifdef CONFIG_SCSI_LOGGING #ifdef CONFIG_SCSI_LOGGING

View file

@ -4271,13 +4271,10 @@ int btrfs_check_data_free_space(struct inode *inode, u64 start, u64 len)
if (ret < 0) if (ret < 0)
return ret; return ret;
/* /* Use new btrfs_qgroup_reserve_data to reserve precious data space. */
* Use new btrfs_qgroup_reserve_data to reserve precious data space
*
* TODO: Find a good method to avoid reserve data space for NOCOW
* range, but don't impact performance on quota disable case.
*/
ret = btrfs_qgroup_reserve_data(inode, start, len); ret = btrfs_qgroup_reserve_data(inode, start, len);
if (ret)
btrfs_free_reserved_data_space_noquota(inode, start, len);
return ret; return ret;
} }

View file

@ -1634,6 +1634,9 @@ static noinline int btrfs_ioctl_snap_create_transid(struct file *file,
int namelen; int namelen;
int ret = 0; int ret = 0;
if (!S_ISDIR(file_inode(file)->i_mode))
return -ENOTDIR;
ret = mnt_want_write_file(file); ret = mnt_want_write_file(file);
if (ret) if (ret)
goto out; goto out;
@ -1691,6 +1694,9 @@ static noinline int btrfs_ioctl_snap_create(struct file *file,
struct btrfs_ioctl_vol_args *vol_args; struct btrfs_ioctl_vol_args *vol_args;
int ret; int ret;
if (!S_ISDIR(file_inode(file)->i_mode))
return -ENOTDIR;
vol_args = memdup_user(arg, sizeof(*vol_args)); vol_args = memdup_user(arg, sizeof(*vol_args));
if (IS_ERR(vol_args)) if (IS_ERR(vol_args))
return PTR_ERR(vol_args); return PTR_ERR(vol_args);
@ -1714,6 +1720,9 @@ static noinline int btrfs_ioctl_snap_create_v2(struct file *file,
bool readonly = false; bool readonly = false;
struct btrfs_qgroup_inherit *inherit = NULL; struct btrfs_qgroup_inherit *inherit = NULL;
if (!S_ISDIR(file_inode(file)->i_mode))
return -ENOTDIR;
vol_args = memdup_user(arg, sizeof(*vol_args)); vol_args = memdup_user(arg, sizeof(*vol_args));
if (IS_ERR(vol_args)) if (IS_ERR(vol_args))
return PTR_ERR(vol_args); return PTR_ERR(vol_args);
@ -2357,6 +2366,9 @@ static noinline int btrfs_ioctl_snap_destroy(struct file *file,
int ret; int ret;
int err = 0; int err = 0;
if (!S_ISDIR(dir->i_mode))
return -ENOTDIR;
vol_args = memdup_user(arg, sizeof(*vol_args)); vol_args = memdup_user(arg, sizeof(*vol_args));
if (IS_ERR(vol_args)) if (IS_ERR(vol_args))
return PTR_ERR(vol_args); return PTR_ERR(vol_args);

View file

@ -333,6 +333,7 @@ configfs_write_bin_file(struct file *file, const char __user *buf,
if (bin_attr->cb_max_size && if (bin_attr->cb_max_size &&
*ppos + count > bin_attr->cb_max_size) { *ppos + count > bin_attr->cb_max_size) {
len = -EFBIG; len = -EFBIG;
goto out;
} }
tbuf = vmalloc(*ppos + count); tbuf = vmalloc(*ppos + count);

View file

@ -1842,6 +1842,16 @@ out_commit:
ocfs2_commit_trans(osb, handle); ocfs2_commit_trans(osb, handle);
out: out:
/*
* The mmapped page won't be unlocked in ocfs2_free_write_ctxt(),
* even in case of error here like ENOSPC and ENOMEM. So, we need
* to unlock the target page manually to prevent deadlocks when
* retrying again on ENOSPC, or when returning non-VM_FAULT_LOCKED
* to VM code.
*/
if (wc->w_target_locked)
unlock_page(mmap_page);
ocfs2_free_write_ctxt(inode, wc); ocfs2_free_write_ctxt(inode, wc);
if (data_ac) { if (data_ac) {

View file

@ -32,6 +32,7 @@ enum can_mode {
* CAN common private data * CAN common private data
*/ */
struct can_priv { struct can_priv {
struct net_device *dev;
struct can_device_stats can_stats; struct can_device_stats can_stats;
struct can_bittiming bittiming, data_bittiming; struct can_bittiming bittiming, data_bittiming;
@ -47,7 +48,7 @@ struct can_priv {
u32 ctrlmode_static; /* static enabled options for driver/hardware */ u32 ctrlmode_static; /* static enabled options for driver/hardware */
int restart_ms; int restart_ms;
struct timer_list restart_timer; struct delayed_work restart_work;
int (*do_set_bittiming)(struct net_device *dev); int (*do_set_bittiming)(struct net_device *dev);
int (*do_set_data_bittiming)(struct net_device *dev); int (*do_set_data_bittiming)(struct net_device *dev);

View file

@ -718,7 +718,7 @@ static inline int dma_mmap_wc(struct device *dev,
#define dma_mmap_writecombine dma_mmap_wc #define dma_mmap_writecombine dma_mmap_wc
#endif #endif
#ifdef CONFIG_NEED_DMA_MAP_STATE #if defined(CONFIG_NEED_DMA_MAP_STATE) || defined(CONFIG_DMA_API_DEBUG)
#define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME #define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME
#define DEFINE_DMA_UNMAP_LEN(LEN_NAME) __u32 LEN_NAME #define DEFINE_DMA_UNMAP_LEN(LEN_NAME) __u32 LEN_NAME
#define dma_unmap_addr(PTR, ADDR_NAME) ((PTR)->ADDR_NAME) #define dma_unmap_addr(PTR, ADDR_NAME) ((PTR)->ADDR_NAME)

View file

@ -120,5 +120,5 @@ struct mfc_cache {
struct rtmsg; struct rtmsg;
int ipmr_get_route(struct net *net, struct sk_buff *skb, int ipmr_get_route(struct net *net, struct sk_buff *skb,
__be32 saddr, __be32 daddr, __be32 saddr, __be32 daddr,
struct rtmsg *rtm, int nowait); struct rtmsg *rtm, int nowait, u32 portid);
#endif #endif

View file

@ -116,7 +116,7 @@ struct mfc6_cache {
struct rtmsg; struct rtmsg;
extern int ip6mr_get_route(struct net *net, struct sk_buff *skb, extern int ip6mr_get_route(struct net *net, struct sk_buff *skb,
struct rtmsg *rtm, int nowait); struct rtmsg *rtm, int nowait, u32 portid);
#ifdef CONFIG_IPV6_MROUTE #ifdef CONFIG_IPV6_MROUTE
extern struct sock *mroute6_socket(struct net *net, struct sk_buff *skb); extern struct sock *mroute6_socket(struct net *net, struct sk_buff *skb);

View file

@ -620,6 +620,7 @@ static inline int fault_in_multipages_readable(const char __user *uaddr,
return __get_user(c, end); return __get_user(c, end);
} }
(void)c;
return 0; return 0;
} }

Some files were not shown because too many files have changed in this diff Show more