mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-05-24 10:39:52 +00:00
Merge remote-tracking branch 'tip/perf/urgent' into perf/core
To pick up fixes. Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
This commit is contained in:
commit
ce6c9da111
448 changed files with 4053 additions and 2440 deletions
|
@ -184,6 +184,11 @@ API for implementing a new FPGA Manager driver
|
||||||
API for programming an FPGA
|
API for programming an FPGA
|
||||||
---------------------------
|
---------------------------
|
||||||
|
|
||||||
|
FPGA Manager flags
|
||||||
|
|
||||||
|
.. kernel-doc:: include/linux/fpga/fpga-mgr.h
|
||||||
|
:doc: FPGA Manager flags
|
||||||
|
|
||||||
.. kernel-doc:: include/linux/fpga/fpga-mgr.h
|
.. kernel-doc:: include/linux/fpga/fpga-mgr.h
|
||||||
:functions: fpga_image_info
|
:functions: fpga_image_info
|
||||||
|
|
||||||
|
|
|
@ -15,7 +15,8 @@ than x86. Check the v86d documentation for a list of currently supported
|
||||||
arches.
|
arches.
|
||||||
|
|
||||||
v86d source code can be downloaded from the following website:
|
v86d source code can be downloaded from the following website:
|
||||||
http://dev.gentoo.org/~spock/projects/uvesafb
|
|
||||||
|
https://github.com/mjanusz/v86d
|
||||||
|
|
||||||
Please refer to the v86d documentation for detailed configuration and
|
Please refer to the v86d documentation for detailed configuration and
|
||||||
installation instructions.
|
installation instructions.
|
||||||
|
@ -177,7 +178,7 @@ from the Video BIOS if you set pixclock to 0 in fb_var_screeninfo.
|
||||||
|
|
||||||
--
|
--
|
||||||
Michal Januszewski <spock@gentoo.org>
|
Michal Januszewski <spock@gentoo.org>
|
||||||
Last updated: 2009-03-30
|
Last updated: 2017-10-10
|
||||||
|
|
||||||
Documentation of the uvesafb options is loosely based on vesafb.txt.
|
Documentation of the uvesafb options is loosely based on vesafb.txt.
|
||||||
|
|
||||||
|
|
|
@ -425,7 +425,7 @@ tcp_mtu_probing - INTEGER
|
||||||
1 - Disabled by default, enabled when an ICMP black hole detected
|
1 - Disabled by default, enabled when an ICMP black hole detected
|
||||||
2 - Always enabled, use initial MSS of tcp_base_mss.
|
2 - Always enabled, use initial MSS of tcp_base_mss.
|
||||||
|
|
||||||
tcp_probe_interval - INTEGER
|
tcp_probe_interval - UNSIGNED INTEGER
|
||||||
Controls how often to start TCP Packetization-Layer Path MTU
|
Controls how often to start TCP Packetization-Layer Path MTU
|
||||||
Discovery reprobe. The default is reprobing every 10 minutes as
|
Discovery reprobe. The default is reprobing every 10 minutes as
|
||||||
per RFC4821.
|
per RFC4821.
|
||||||
|
|
23
MAINTAINERS
23
MAINTAINERS
|
@ -324,7 +324,6 @@ F: Documentation/ABI/testing/sysfs-bus-acpi
|
||||||
F: Documentation/ABI/testing/configfs-acpi
|
F: Documentation/ABI/testing/configfs-acpi
|
||||||
F: drivers/pci/*acpi*
|
F: drivers/pci/*acpi*
|
||||||
F: drivers/pci/*/*acpi*
|
F: drivers/pci/*/*acpi*
|
||||||
F: drivers/pci/*/*/*acpi*
|
|
||||||
F: tools/power/acpi/
|
F: tools/power/acpi/
|
||||||
|
|
||||||
ACPI APEI
|
ACPI APEI
|
||||||
|
@ -1251,7 +1250,7 @@ N: meson
|
||||||
|
|
||||||
ARM/Annapurna Labs ALPINE ARCHITECTURE
|
ARM/Annapurna Labs ALPINE ARCHITECTURE
|
||||||
M: Tsahee Zidenberg <tsahee@annapurnalabs.com>
|
M: Tsahee Zidenberg <tsahee@annapurnalabs.com>
|
||||||
M: Antoine Tenart <antoine.tenart@free-electrons.com>
|
M: Antoine Tenart <antoine.tenart@bootlin.com>
|
||||||
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
||||||
S: Maintained
|
S: Maintained
|
||||||
F: arch/arm/mach-alpine/
|
F: arch/arm/mach-alpine/
|
||||||
|
@ -2956,7 +2955,6 @@ F: include/linux/bcm963xx_tag.h
|
||||||
|
|
||||||
BROADCOM BNX2 GIGABIT ETHERNET DRIVER
|
BROADCOM BNX2 GIGABIT ETHERNET DRIVER
|
||||||
M: Rasesh Mody <rasesh.mody@cavium.com>
|
M: Rasesh Mody <rasesh.mody@cavium.com>
|
||||||
M: Harish Patil <harish.patil@cavium.com>
|
|
||||||
M: Dept-GELinuxNICDev@cavium.com
|
M: Dept-GELinuxNICDev@cavium.com
|
||||||
L: netdev@vger.kernel.org
|
L: netdev@vger.kernel.org
|
||||||
S: Supported
|
S: Supported
|
||||||
|
@ -2977,6 +2975,7 @@ F: drivers/scsi/bnx2i/
|
||||||
|
|
||||||
BROADCOM BNX2X 10 GIGABIT ETHERNET DRIVER
|
BROADCOM BNX2X 10 GIGABIT ETHERNET DRIVER
|
||||||
M: Ariel Elior <ariel.elior@cavium.com>
|
M: Ariel Elior <ariel.elior@cavium.com>
|
||||||
|
M: Sudarsana Kalluru <sudarsana.kalluru@cavium.com>
|
||||||
M: everest-linux-l2@cavium.com
|
M: everest-linux-l2@cavium.com
|
||||||
L: netdev@vger.kernel.org
|
L: netdev@vger.kernel.org
|
||||||
S: Supported
|
S: Supported
|
||||||
|
@ -5470,7 +5469,8 @@ S: Odd Fixes
|
||||||
F: drivers/net/ethernet/agere/
|
F: drivers/net/ethernet/agere/
|
||||||
|
|
||||||
ETHERNET BRIDGE
|
ETHERNET BRIDGE
|
||||||
M: Stephen Hemminger <stephen@networkplumber.org>
|
M: Roopa Prabhu <roopa@cumulusnetworks.com>
|
||||||
|
M: Nikolay Aleksandrov <nikolay@cumulusnetworks.com>
|
||||||
L: bridge@lists.linux-foundation.org (moderated for non-subscribers)
|
L: bridge@lists.linux-foundation.org (moderated for non-subscribers)
|
||||||
L: netdev@vger.kernel.org
|
L: netdev@vger.kernel.org
|
||||||
W: http://www.linuxfoundation.org/en/Net:Bridge
|
W: http://www.linuxfoundation.org/en/Net:Bridge
|
||||||
|
@ -8598,7 +8598,6 @@ F: include/linux/spinlock*.h
|
||||||
F: arch/*/include/asm/spinlock*.h
|
F: arch/*/include/asm/spinlock*.h
|
||||||
F: include/linux/rwlock*.h
|
F: include/linux/rwlock*.h
|
||||||
F: include/linux/mutex*.h
|
F: include/linux/mutex*.h
|
||||||
F: arch/*/include/asm/mutex*.h
|
|
||||||
F: include/linux/rwsem*.h
|
F: include/linux/rwsem*.h
|
||||||
F: arch/*/include/asm/rwsem.h
|
F: arch/*/include/asm/rwsem.h
|
||||||
F: include/linux/seqlock.h
|
F: include/linux/seqlock.h
|
||||||
|
@ -9658,7 +9657,8 @@ MIPS/LOONGSON2 ARCHITECTURE
|
||||||
M: Jiaxun Yang <jiaxun.yang@flygoat.com>
|
M: Jiaxun Yang <jiaxun.yang@flygoat.com>
|
||||||
L: linux-mips@linux-mips.org
|
L: linux-mips@linux-mips.org
|
||||||
S: Maintained
|
S: Maintained
|
||||||
F: arch/mips/loongson64/*{2e/2f}*
|
F: arch/mips/loongson64/fuloong-2e/
|
||||||
|
F: arch/mips/loongson64/lemote-2f/
|
||||||
F: arch/mips/include/asm/mach-loongson64/
|
F: arch/mips/include/asm/mach-loongson64/
|
||||||
F: drivers/*/*loongson2*
|
F: drivers/*/*loongson2*
|
||||||
F: drivers/*/*/*loongson2*
|
F: drivers/*/*/*loongson2*
|
||||||
|
@ -9865,7 +9865,7 @@ M: Peter Rosin <peda@axentia.se>
|
||||||
S: Maintained
|
S: Maintained
|
||||||
F: Documentation/ABI/testing/sysfs-class-mux*
|
F: Documentation/ABI/testing/sysfs-class-mux*
|
||||||
F: Documentation/devicetree/bindings/mux/
|
F: Documentation/devicetree/bindings/mux/
|
||||||
F: include/linux/dt-bindings/mux/
|
F: include/dt-bindings/mux/
|
||||||
F: include/linux/mux/
|
F: include/linux/mux/
|
||||||
F: drivers/mux/
|
F: drivers/mux/
|
||||||
|
|
||||||
|
@ -10942,7 +10942,7 @@ M: Willy Tarreau <willy@haproxy.com>
|
||||||
M: Ksenija Stanojevic <ksenija.stanojevic@gmail.com>
|
M: Ksenija Stanojevic <ksenija.stanojevic@gmail.com>
|
||||||
S: Odd Fixes
|
S: Odd Fixes
|
||||||
F: Documentation/auxdisplay/lcd-panel-cgram.txt
|
F: Documentation/auxdisplay/lcd-panel-cgram.txt
|
||||||
F: drivers/misc/panel.c
|
F: drivers/auxdisplay/panel.c
|
||||||
|
|
||||||
PARALLEL PORT SUBSYSTEM
|
PARALLEL PORT SUBSYSTEM
|
||||||
M: Sudip Mukherjee <sudipm.mukherjee@gmail.com>
|
M: Sudip Mukherjee <sudipm.mukherjee@gmail.com>
|
||||||
|
@ -11979,7 +11979,7 @@ F: Documentation/scsi/LICENSE.qla4xxx
|
||||||
F: drivers/scsi/qla4xxx/
|
F: drivers/scsi/qla4xxx/
|
||||||
|
|
||||||
QLOGIC QLCNIC (1/10)Gb ETHERNET DRIVER
|
QLOGIC QLCNIC (1/10)Gb ETHERNET DRIVER
|
||||||
M: Harish Patil <harish.patil@cavium.com>
|
M: Shahed Shaikh <Shahed.Shaikh@cavium.com>
|
||||||
M: Manish Chopra <manish.chopra@cavium.com>
|
M: Manish Chopra <manish.chopra@cavium.com>
|
||||||
M: Dept-GELinuxNICDev@cavium.com
|
M: Dept-GELinuxNICDev@cavium.com
|
||||||
L: netdev@vger.kernel.org
|
L: netdev@vger.kernel.org
|
||||||
|
@ -11987,7 +11987,6 @@ S: Supported
|
||||||
F: drivers/net/ethernet/qlogic/qlcnic/
|
F: drivers/net/ethernet/qlogic/qlcnic/
|
||||||
|
|
||||||
QLOGIC QLGE 10Gb ETHERNET DRIVER
|
QLOGIC QLGE 10Gb ETHERNET DRIVER
|
||||||
M: Harish Patil <harish.patil@cavium.com>
|
|
||||||
M: Manish Chopra <manish.chopra@cavium.com>
|
M: Manish Chopra <manish.chopra@cavium.com>
|
||||||
M: Dept-GELinuxNICDev@cavium.com
|
M: Dept-GELinuxNICDev@cavium.com
|
||||||
L: netdev@vger.kernel.org
|
L: netdev@vger.kernel.org
|
||||||
|
@ -13063,7 +13062,7 @@ SELINUX SECURITY MODULE
|
||||||
M: Paul Moore <paul@paul-moore.com>
|
M: Paul Moore <paul@paul-moore.com>
|
||||||
M: Stephen Smalley <sds@tycho.nsa.gov>
|
M: Stephen Smalley <sds@tycho.nsa.gov>
|
||||||
M: Eric Paris <eparis@parisplace.org>
|
M: Eric Paris <eparis@parisplace.org>
|
||||||
L: selinux@tycho.nsa.gov (moderated for non-subscribers)
|
L: selinux@vger.kernel.org
|
||||||
W: https://selinuxproject.org
|
W: https://selinuxproject.org
|
||||||
W: https://github.com/SELinuxProject
|
W: https://github.com/SELinuxProject
|
||||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/pcmoore/selinux.git
|
T: git git://git.kernel.org/pub/scm/linux/kernel/git/pcmoore/selinux.git
|
||||||
|
@ -15395,7 +15394,7 @@ S: Maintained
|
||||||
UVESAFB DRIVER
|
UVESAFB DRIVER
|
||||||
M: Michal Januszewski <spock@gentoo.org>
|
M: Michal Januszewski <spock@gentoo.org>
|
||||||
L: linux-fbdev@vger.kernel.org
|
L: linux-fbdev@vger.kernel.org
|
||||||
W: http://dev.gentoo.org/~spock/projects/uvesafb/
|
W: https://github.com/mjanusz/v86d
|
||||||
S: Maintained
|
S: Maintained
|
||||||
F: Documentation/fb/uvesafb.txt
|
F: Documentation/fb/uvesafb.txt
|
||||||
F: drivers/video/fbdev/uvesafb.*
|
F: drivers/video/fbdev/uvesafb.*
|
||||||
|
|
10
Makefile
10
Makefile
|
@ -2,7 +2,7 @@
|
||||||
VERSION = 4
|
VERSION = 4
|
||||||
PATCHLEVEL = 19
|
PATCHLEVEL = 19
|
||||||
SUBLEVEL = 0
|
SUBLEVEL = 0
|
||||||
EXTRAVERSION = -rc5
|
EXTRAVERSION = -rc8
|
||||||
NAME = Merciless Moray
|
NAME = Merciless Moray
|
||||||
|
|
||||||
# *DOCUMENTATION*
|
# *DOCUMENTATION*
|
||||||
|
@ -483,13 +483,15 @@ endif
|
||||||
ifeq ($(cc-name),clang)
|
ifeq ($(cc-name),clang)
|
||||||
ifneq ($(CROSS_COMPILE),)
|
ifneq ($(CROSS_COMPILE),)
|
||||||
CLANG_TARGET := --target=$(notdir $(CROSS_COMPILE:%-=%))
|
CLANG_TARGET := --target=$(notdir $(CROSS_COMPILE:%-=%))
|
||||||
GCC_TOOLCHAIN := $(realpath $(dir $(shell which $(LD)))/..)
|
GCC_TOOLCHAIN_DIR := $(dir $(shell which $(LD)))
|
||||||
|
CLANG_PREFIX := --prefix=$(GCC_TOOLCHAIN_DIR)
|
||||||
|
GCC_TOOLCHAIN := $(realpath $(GCC_TOOLCHAIN_DIR)/..)
|
||||||
endif
|
endif
|
||||||
ifneq ($(GCC_TOOLCHAIN),)
|
ifneq ($(GCC_TOOLCHAIN),)
|
||||||
CLANG_GCC_TC := --gcc-toolchain=$(GCC_TOOLCHAIN)
|
CLANG_GCC_TC := --gcc-toolchain=$(GCC_TOOLCHAIN)
|
||||||
endif
|
endif
|
||||||
KBUILD_CFLAGS += $(CLANG_TARGET) $(CLANG_GCC_TC)
|
KBUILD_CFLAGS += $(CLANG_TARGET) $(CLANG_GCC_TC) $(CLANG_PREFIX)
|
||||||
KBUILD_AFLAGS += $(CLANG_TARGET) $(CLANG_GCC_TC)
|
KBUILD_AFLAGS += $(CLANG_TARGET) $(CLANG_GCC_TC) $(CLANG_PREFIX)
|
||||||
KBUILD_CFLAGS += $(call cc-option, -no-integrated-as)
|
KBUILD_CFLAGS += $(call cc-option, -no-integrated-as)
|
||||||
KBUILD_AFLAGS += $(call cc-option, -no-integrated-as)
|
KBUILD_AFLAGS += $(call cc-option, -no-integrated-as)
|
||||||
endif
|
endif
|
||||||
|
|
|
@ -149,7 +149,7 @@ config ARC_CPU_770
|
||||||
Support for ARC770 core introduced with Rel 4.10 (Summer 2011)
|
Support for ARC770 core introduced with Rel 4.10 (Summer 2011)
|
||||||
This core has a bunch of cool new features:
|
This core has a bunch of cool new features:
|
||||||
-MMU-v3: Variable Page Sz (4k, 8k, 16k), bigger J-TLB (128x4)
|
-MMU-v3: Variable Page Sz (4k, 8k, 16k), bigger J-TLB (128x4)
|
||||||
Shared Address Spaces (for sharing TLB entires in MMU)
|
Shared Address Spaces (for sharing TLB entries in MMU)
|
||||||
-Caches: New Prog Model, Region Flush
|
-Caches: New Prog Model, Region Flush
|
||||||
-Insns: endian swap, load-locked/store-conditional, time-stamp-ctr
|
-Insns: endian swap, load-locked/store-conditional, time-stamp-ctr
|
||||||
|
|
||||||
|
|
|
@ -6,33 +6,11 @@
|
||||||
# published by the Free Software Foundation.
|
# published by the Free Software Foundation.
|
||||||
#
|
#
|
||||||
|
|
||||||
ifeq ($(CROSS_COMPILE),)
|
|
||||||
ifndef CONFIG_CPU_BIG_ENDIAN
|
|
||||||
CROSS_COMPILE := arc-linux-
|
|
||||||
else
|
|
||||||
CROSS_COMPILE := arceb-linux-
|
|
||||||
endif
|
|
||||||
endif
|
|
||||||
|
|
||||||
KBUILD_DEFCONFIG := nsim_700_defconfig
|
KBUILD_DEFCONFIG := nsim_700_defconfig
|
||||||
|
|
||||||
cflags-y += -fno-common -pipe -fno-builtin -mmedium-calls -D__linux__
|
cflags-y += -fno-common -pipe -fno-builtin -mmedium-calls -D__linux__
|
||||||
cflags-$(CONFIG_ISA_ARCOMPACT) += -mA7
|
cflags-$(CONFIG_ISA_ARCOMPACT) += -mA7
|
||||||
cflags-$(CONFIG_ISA_ARCV2) += -mcpu=archs
|
cflags-$(CONFIG_ISA_ARCV2) += -mcpu=hs38
|
||||||
|
|
||||||
is_700 = $(shell $(CC) -dM -E - < /dev/null | grep -q "ARC700" && echo 1 || echo 0)
|
|
||||||
|
|
||||||
ifdef CONFIG_ISA_ARCOMPACT
|
|
||||||
ifeq ($(is_700), 0)
|
|
||||||
$(error Toolchain not configured for ARCompact builds)
|
|
||||||
endif
|
|
||||||
endif
|
|
||||||
|
|
||||||
ifdef CONFIG_ISA_ARCV2
|
|
||||||
ifeq ($(is_700), 1)
|
|
||||||
$(error Toolchain not configured for ARCv2 builds)
|
|
||||||
endif
|
|
||||||
endif
|
|
||||||
|
|
||||||
ifdef CONFIG_ARC_CURR_IN_REG
|
ifdef CONFIG_ARC_CURR_IN_REG
|
||||||
# For a global register defintion, make sure it gets passed to every file
|
# For a global register defintion, make sure it gets passed to every file
|
||||||
|
@ -79,7 +57,7 @@ cflags-$(disable_small_data) += -mno-sdata -fcall-used-gp
|
||||||
cflags-$(CONFIG_CPU_BIG_ENDIAN) += -mbig-endian
|
cflags-$(CONFIG_CPU_BIG_ENDIAN) += -mbig-endian
|
||||||
ldflags-$(CONFIG_CPU_BIG_ENDIAN) += -EB
|
ldflags-$(CONFIG_CPU_BIG_ENDIAN) += -EB
|
||||||
|
|
||||||
LIBGCC := $(shell $(CC) $(cflags-y) --print-libgcc-file-name)
|
LIBGCC = $(shell $(CC) $(cflags-y) --print-libgcc-file-name)
|
||||||
|
|
||||||
# Modules with short calls might break for calls into builtin-kernel
|
# Modules with short calls might break for calls into builtin-kernel
|
||||||
KBUILD_CFLAGS_MODULE += -mlong-calls -mno-millicode
|
KBUILD_CFLAGS_MODULE += -mlong-calls -mno-millicode
|
||||||
|
|
|
@ -241,6 +241,26 @@ int copy_thread(unsigned long clone_flags,
|
||||||
task_thread_info(current)->thr_ptr;
|
task_thread_info(current)->thr_ptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/*
|
||||||
|
* setup usermode thread pointer #1:
|
||||||
|
* when child is picked by scheduler, __switch_to() uses @c_callee to
|
||||||
|
* populate usermode callee regs: this works (despite being in a kernel
|
||||||
|
* function) since special return path for child @ret_from_fork()
|
||||||
|
* ensures those regs are not clobbered all the way to RTIE to usermode
|
||||||
|
*/
|
||||||
|
c_callee->r25 = task_thread_info(p)->thr_ptr;
|
||||||
|
|
||||||
|
#ifdef CONFIG_ARC_CURR_IN_REG
|
||||||
|
/*
|
||||||
|
* setup usermode thread pointer #2:
|
||||||
|
* however for this special use of r25 in kernel, __switch_to() sets
|
||||||
|
* r25 for kernel needs and only in the final return path is usermode
|
||||||
|
* r25 setup, from pt_regs->user_r25. So set that up as well
|
||||||
|
*/
|
||||||
|
c_regs->user_r25 = c_callee->r25;
|
||||||
|
#endif
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -11,6 +11,7 @@
|
||||||
#include "sama5d2-pinfunc.h"
|
#include "sama5d2-pinfunc.h"
|
||||||
#include <dt-bindings/mfd/atmel-flexcom.h>
|
#include <dt-bindings/mfd/atmel-flexcom.h>
|
||||||
#include <dt-bindings/gpio/gpio.h>
|
#include <dt-bindings/gpio/gpio.h>
|
||||||
|
#include <dt-bindings/pinctrl/at91.h>
|
||||||
|
|
||||||
/ {
|
/ {
|
||||||
model = "Atmel SAMA5D2 PTC EK";
|
model = "Atmel SAMA5D2 PTC EK";
|
||||||
|
@ -299,6 +300,7 @@
|
||||||
<PIN_PA30__NWE_NANDWE>,
|
<PIN_PA30__NWE_NANDWE>,
|
||||||
<PIN_PB2__NRD_NANDOE>;
|
<PIN_PB2__NRD_NANDOE>;
|
||||||
bias-pull-up;
|
bias-pull-up;
|
||||||
|
atmel,drive-strength = <ATMEL_PIO_DRVSTR_ME>;
|
||||||
};
|
};
|
||||||
|
|
||||||
ale_cle_rdy_cs {
|
ale_cle_rdy_cs {
|
||||||
|
|
|
@ -106,21 +106,23 @@
|
||||||
global_timer: timer@1e200 {
|
global_timer: timer@1e200 {
|
||||||
compatible = "arm,cortex-a9-global-timer";
|
compatible = "arm,cortex-a9-global-timer";
|
||||||
reg = <0x1e200 0x20>;
|
reg = <0x1e200 0x20>;
|
||||||
interrupts = <GIC_PPI 11 IRQ_TYPE_LEVEL_HIGH>;
|
interrupts = <GIC_PPI 11 IRQ_TYPE_EDGE_RISING>;
|
||||||
clocks = <&axi_clk>;
|
clocks = <&axi_clk>;
|
||||||
};
|
};
|
||||||
|
|
||||||
local_timer: local-timer@1e600 {
|
local_timer: local-timer@1e600 {
|
||||||
compatible = "arm,cortex-a9-twd-timer";
|
compatible = "arm,cortex-a9-twd-timer";
|
||||||
reg = <0x1e600 0x20>;
|
reg = <0x1e600 0x20>;
|
||||||
interrupts = <GIC_PPI 13 IRQ_TYPE_LEVEL_HIGH>;
|
interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(2) |
|
||||||
|
IRQ_TYPE_EDGE_RISING)>;
|
||||||
clocks = <&axi_clk>;
|
clocks = <&axi_clk>;
|
||||||
};
|
};
|
||||||
|
|
||||||
twd_watchdog: watchdog@1e620 {
|
twd_watchdog: watchdog@1e620 {
|
||||||
compatible = "arm,cortex-a9-twd-wdt";
|
compatible = "arm,cortex-a9-twd-wdt";
|
||||||
reg = <0x1e620 0x20>;
|
reg = <0x1e620 0x20>;
|
||||||
interrupts = <GIC_PPI 14 IRQ_TYPE_LEVEL_HIGH>;
|
interrupts = <GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(2) |
|
||||||
|
IRQ_TYPE_LEVEL_HIGH)>;
|
||||||
};
|
};
|
||||||
|
|
||||||
armpll: armpll {
|
armpll: armpll {
|
||||||
|
@ -158,7 +160,7 @@
|
||||||
serial0: serial@600 {
|
serial0: serial@600 {
|
||||||
compatible = "brcm,bcm6345-uart";
|
compatible = "brcm,bcm6345-uart";
|
||||||
reg = <0x600 0x1b>;
|
reg = <0x600 0x1b>;
|
||||||
interrupts = <GIC_SPI 32 0>;
|
interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>;
|
||||||
clocks = <&periph_clk>;
|
clocks = <&periph_clk>;
|
||||||
clock-names = "periph";
|
clock-names = "periph";
|
||||||
status = "disabled";
|
status = "disabled";
|
||||||
|
@ -167,7 +169,7 @@
|
||||||
serial1: serial@620 {
|
serial1: serial@620 {
|
||||||
compatible = "brcm,bcm6345-uart";
|
compatible = "brcm,bcm6345-uart";
|
||||||
reg = <0x620 0x1b>;
|
reg = <0x620 0x1b>;
|
||||||
interrupts = <GIC_SPI 33 0>;
|
interrupts = <GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>;
|
||||||
clocks = <&periph_clk>;
|
clocks = <&periph_clk>;
|
||||||
clock-names = "periph";
|
clock-names = "periph";
|
||||||
status = "disabled";
|
status = "disabled";
|
||||||
|
@ -180,7 +182,7 @@
|
||||||
reg = <0x2000 0x600>, <0xf0 0x10>;
|
reg = <0x2000 0x600>, <0xf0 0x10>;
|
||||||
reg-names = "nand", "nand-int-base";
|
reg-names = "nand", "nand-int-base";
|
||||||
status = "disabled";
|
status = "disabled";
|
||||||
interrupts = <GIC_SPI 38 0>;
|
interrupts = <GIC_SPI 38 IRQ_TYPE_LEVEL_HIGH>;
|
||||||
interrupt-names = "nand";
|
interrupt-names = "nand";
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -123,6 +123,17 @@
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
|
&cpu0 {
|
||||||
|
/* CPU rated to 1GHz, not 1.2GHz as per the default settings */
|
||||||
|
operating-points = <
|
||||||
|
/* kHz uV */
|
||||||
|
166666 850000
|
||||||
|
400000 900000
|
||||||
|
800000 1050000
|
||||||
|
1000000 1200000
|
||||||
|
>;
|
||||||
|
};
|
||||||
|
|
||||||
&esdhc1 {
|
&esdhc1 {
|
||||||
pinctrl-names = "default";
|
pinctrl-names = "default";
|
||||||
pinctrl-0 = <&pinctrl_esdhc1>;
|
pinctrl-0 = <&pinctrl_esdhc1>;
|
||||||
|
|
|
@ -1078,8 +1078,8 @@
|
||||||
interrupts = <GIC_SPI 86 IRQ_TYPE_LEVEL_HIGH>;
|
interrupts = <GIC_SPI 86 IRQ_TYPE_LEVEL_HIGH>;
|
||||||
clocks = <&rcc SPI6_K>;
|
clocks = <&rcc SPI6_K>;
|
||||||
resets = <&rcc SPI6_R>;
|
resets = <&rcc SPI6_R>;
|
||||||
dmas = <&mdma1 34 0x0 0x40008 0x0 0x0 0>,
|
dmas = <&mdma1 34 0x0 0x40008 0x0 0x0>,
|
||||||
<&mdma1 35 0x0 0x40002 0x0 0x0 0>;
|
<&mdma1 35 0x0 0x40002 0x0 0x0>;
|
||||||
dma-names = "rx", "tx";
|
dma-names = "rx", "tx";
|
||||||
status = "disabled";
|
status = "disabled";
|
||||||
};
|
};
|
||||||
|
|
|
@ -800,8 +800,7 @@
|
||||||
};
|
};
|
||||||
|
|
||||||
hdmi_phy: hdmi-phy@1ef0000 {
|
hdmi_phy: hdmi-phy@1ef0000 {
|
||||||
compatible = "allwinner,sun8i-r40-hdmi-phy",
|
compatible = "allwinner,sun8i-r40-hdmi-phy";
|
||||||
"allwinner,sun50i-a64-hdmi-phy";
|
|
||||||
reg = <0x01ef0000 0x10000>;
|
reg = <0x01ef0000 0x10000>;
|
||||||
clocks = <&ccu CLK_BUS_HDMI1>, <&ccu CLK_HDMI_SLOW>,
|
clocks = <&ccu CLK_BUS_HDMI1>, <&ccu CLK_HDMI_SLOW>,
|
||||||
<&ccu 7>, <&ccu 16>;
|
<&ccu 7>, <&ccu 16>;
|
||||||
|
|
|
@ -49,6 +49,8 @@
|
||||||
#define ARM_DISCARD \
|
#define ARM_DISCARD \
|
||||||
*(.ARM.exidx.exit.text) \
|
*(.ARM.exidx.exit.text) \
|
||||||
*(.ARM.extab.exit.text) \
|
*(.ARM.extab.exit.text) \
|
||||||
|
*(.ARM.exidx.text.exit) \
|
||||||
|
*(.ARM.extab.text.exit) \
|
||||||
ARM_CPU_DISCARD(*(.ARM.exidx.cpuexit.text)) \
|
ARM_CPU_DISCARD(*(.ARM.exidx.cpuexit.text)) \
|
||||||
ARM_CPU_DISCARD(*(.ARM.extab.cpuexit.text)) \
|
ARM_CPU_DISCARD(*(.ARM.extab.cpuexit.text)) \
|
||||||
ARM_EXIT_DISCARD(EXIT_TEXT) \
|
ARM_EXIT_DISCARD(EXIT_TEXT) \
|
||||||
|
|
|
@ -478,15 +478,15 @@ static const struct coproc_reg cp15_regs[] = {
|
||||||
|
|
||||||
/* ICC_SGI1R */
|
/* ICC_SGI1R */
|
||||||
{ CRm64(12), Op1( 0), is64, access_gic_sgi},
|
{ CRm64(12), Op1( 0), is64, access_gic_sgi},
|
||||||
/* ICC_ASGI1R */
|
|
||||||
{ CRm64(12), Op1( 1), is64, access_gic_sgi},
|
|
||||||
/* ICC_SGI0R */
|
|
||||||
{ CRm64(12), Op1( 2), is64, access_gic_sgi},
|
|
||||||
|
|
||||||
/* VBAR: swapped by interrupt.S. */
|
/* VBAR: swapped by interrupt.S. */
|
||||||
{ CRn(12), CRm( 0), Op1( 0), Op2( 0), is32,
|
{ CRn(12), CRm( 0), Op1( 0), Op2( 0), is32,
|
||||||
NULL, reset_val, c12_VBAR, 0x00000000 },
|
NULL, reset_val, c12_VBAR, 0x00000000 },
|
||||||
|
|
||||||
|
/* ICC_ASGI1R */
|
||||||
|
{ CRm64(12), Op1( 1), is64, access_gic_sgi},
|
||||||
|
/* ICC_SGI0R */
|
||||||
|
{ CRm64(12), Op1( 2), is64, access_gic_sgi},
|
||||||
/* ICC_SRE */
|
/* ICC_SRE */
|
||||||
{ CRn(12), CRm(12), Op1( 0), Op2(5), is32, access_gic_sre },
|
{ CRn(12), CRm(12), Op1( 0), Op2(5), is32, access_gic_sre },
|
||||||
|
|
||||||
|
|
|
@ -473,7 +473,7 @@ void pci_ioremap_set_mem_type(int mem_type)
|
||||||
|
|
||||||
int pci_ioremap_io(unsigned int offset, phys_addr_t phys_addr)
|
int pci_ioremap_io(unsigned int offset, phys_addr_t phys_addr)
|
||||||
{
|
{
|
||||||
BUG_ON(offset + SZ_64K > IO_SPACE_LIMIT);
|
BUG_ON(offset + SZ_64K - 1 > IO_SPACE_LIMIT);
|
||||||
|
|
||||||
return ioremap_page_range(PCI_IO_VIRT_BASE + offset,
|
return ioremap_page_range(PCI_IO_VIRT_BASE + offset,
|
||||||
PCI_IO_VIRT_BASE + offset + SZ_64K,
|
PCI_IO_VIRT_BASE + offset + SZ_64K,
|
||||||
|
|
|
@ -413,3 +413,4 @@
|
||||||
396 common pkey_free sys_pkey_free
|
396 common pkey_free sys_pkey_free
|
||||||
397 common statx sys_statx
|
397 common statx sys_statx
|
||||||
398 common rseq sys_rseq
|
398 common rseq sys_rseq
|
||||||
|
399 common io_pgetevents sys_io_pgetevents
|
||||||
|
|
|
@ -966,6 +966,12 @@ static int armv8pmu_set_event_filter(struct hw_perf_event *event,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int armv8pmu_filter_match(struct perf_event *event)
|
||||||
|
{
|
||||||
|
unsigned long evtype = event->hw.config_base & ARMV8_PMU_EVTYPE_EVENT;
|
||||||
|
return evtype != ARMV8_PMUV3_PERFCTR_CHAIN;
|
||||||
|
}
|
||||||
|
|
||||||
static void armv8pmu_reset(void *info)
|
static void armv8pmu_reset(void *info)
|
||||||
{
|
{
|
||||||
struct arm_pmu *cpu_pmu = (struct arm_pmu *)info;
|
struct arm_pmu *cpu_pmu = (struct arm_pmu *)info;
|
||||||
|
@ -1114,6 +1120,7 @@ static int armv8_pmu_init(struct arm_pmu *cpu_pmu)
|
||||||
cpu_pmu->stop = armv8pmu_stop,
|
cpu_pmu->stop = armv8pmu_stop,
|
||||||
cpu_pmu->reset = armv8pmu_reset,
|
cpu_pmu->reset = armv8pmu_reset,
|
||||||
cpu_pmu->set_event_filter = armv8pmu_set_event_filter;
|
cpu_pmu->set_event_filter = armv8pmu_set_event_filter;
|
||||||
|
cpu_pmu->filter_match = armv8pmu_filter_match;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
|
@ -64,6 +64,9 @@
|
||||||
#include <asm/xen/hypervisor.h>
|
#include <asm/xen/hypervisor.h>
|
||||||
#include <asm/mmu_context.h>
|
#include <asm/mmu_context.h>
|
||||||
|
|
||||||
|
static int num_standard_resources;
|
||||||
|
static struct resource *standard_resources;
|
||||||
|
|
||||||
phys_addr_t __fdt_pointer __initdata;
|
phys_addr_t __fdt_pointer __initdata;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -206,14 +209,19 @@ static void __init request_standard_resources(void)
|
||||||
{
|
{
|
||||||
struct memblock_region *region;
|
struct memblock_region *region;
|
||||||
struct resource *res;
|
struct resource *res;
|
||||||
|
unsigned long i = 0;
|
||||||
|
|
||||||
kernel_code.start = __pa_symbol(_text);
|
kernel_code.start = __pa_symbol(_text);
|
||||||
kernel_code.end = __pa_symbol(__init_begin - 1);
|
kernel_code.end = __pa_symbol(__init_begin - 1);
|
||||||
kernel_data.start = __pa_symbol(_sdata);
|
kernel_data.start = __pa_symbol(_sdata);
|
||||||
kernel_data.end = __pa_symbol(_end - 1);
|
kernel_data.end = __pa_symbol(_end - 1);
|
||||||
|
|
||||||
|
num_standard_resources = memblock.memory.cnt;
|
||||||
|
standard_resources = alloc_bootmem_low(num_standard_resources *
|
||||||
|
sizeof(*standard_resources));
|
||||||
|
|
||||||
for_each_memblock(memory, region) {
|
for_each_memblock(memory, region) {
|
||||||
res = alloc_bootmem_low(sizeof(*res));
|
res = &standard_resources[i++];
|
||||||
if (memblock_is_nomap(region)) {
|
if (memblock_is_nomap(region)) {
|
||||||
res->name = "reserved";
|
res->name = "reserved";
|
||||||
res->flags = IORESOURCE_MEM;
|
res->flags = IORESOURCE_MEM;
|
||||||
|
@ -243,36 +251,26 @@ static void __init request_standard_resources(void)
|
||||||
|
|
||||||
static int __init reserve_memblock_reserved_regions(void)
|
static int __init reserve_memblock_reserved_regions(void)
|
||||||
{
|
{
|
||||||
phys_addr_t start, end, roundup_end = 0;
|
u64 i, j;
|
||||||
struct resource *mem, *res;
|
|
||||||
u64 i;
|
|
||||||
|
|
||||||
for_each_reserved_mem_region(i, &start, &end) {
|
for (i = 0; i < num_standard_resources; ++i) {
|
||||||
if (end <= roundup_end)
|
struct resource *mem = &standard_resources[i];
|
||||||
continue; /* done already */
|
phys_addr_t r_start, r_end, mem_size = resource_size(mem);
|
||||||
|
|
||||||
start = __pfn_to_phys(PFN_DOWN(start));
|
if (!memblock_is_region_reserved(mem->start, mem_size))
|
||||||
end = __pfn_to_phys(PFN_UP(end)) - 1;
|
|
||||||
roundup_end = end;
|
|
||||||
|
|
||||||
res = kzalloc(sizeof(*res), GFP_ATOMIC);
|
|
||||||
if (WARN_ON(!res))
|
|
||||||
return -ENOMEM;
|
|
||||||
res->start = start;
|
|
||||||
res->end = end;
|
|
||||||
res->name = "reserved";
|
|
||||||
res->flags = IORESOURCE_MEM;
|
|
||||||
|
|
||||||
mem = request_resource_conflict(&iomem_resource, res);
|
|
||||||
/*
|
|
||||||
* We expected memblock_reserve() regions to conflict with
|
|
||||||
* memory created by request_standard_resources().
|
|
||||||
*/
|
|
||||||
if (WARN_ON_ONCE(!mem))
|
|
||||||
continue;
|
continue;
|
||||||
kfree(res);
|
|
||||||
|
|
||||||
reserve_region_with_split(mem, start, end, "reserved");
|
for_each_reserved_mem_region(j, &r_start, &r_end) {
|
||||||
|
resource_size_t start, end;
|
||||||
|
|
||||||
|
start = max(PFN_PHYS(PFN_DOWN(r_start)), mem->start);
|
||||||
|
end = min(PFN_PHYS(PFN_UP(r_end)) - 1, mem->end);
|
||||||
|
|
||||||
|
if (start > mem->end || end < mem->start)
|
||||||
|
continue;
|
||||||
|
|
||||||
|
reserve_region_with_split(mem, start, end, "reserved");
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
|
@ -57,6 +57,45 @@ static u64 core_reg_offset_from_id(u64 id)
|
||||||
return id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK | KVM_REG_ARM_CORE);
|
return id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK | KVM_REG_ARM_CORE);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int validate_core_offset(const struct kvm_one_reg *reg)
|
||||||
|
{
|
||||||
|
u64 off = core_reg_offset_from_id(reg->id);
|
||||||
|
int size;
|
||||||
|
|
||||||
|
switch (off) {
|
||||||
|
case KVM_REG_ARM_CORE_REG(regs.regs[0]) ...
|
||||||
|
KVM_REG_ARM_CORE_REG(regs.regs[30]):
|
||||||
|
case KVM_REG_ARM_CORE_REG(regs.sp):
|
||||||
|
case KVM_REG_ARM_CORE_REG(regs.pc):
|
||||||
|
case KVM_REG_ARM_CORE_REG(regs.pstate):
|
||||||
|
case KVM_REG_ARM_CORE_REG(sp_el1):
|
||||||
|
case KVM_REG_ARM_CORE_REG(elr_el1):
|
||||||
|
case KVM_REG_ARM_CORE_REG(spsr[0]) ...
|
||||||
|
KVM_REG_ARM_CORE_REG(spsr[KVM_NR_SPSR - 1]):
|
||||||
|
size = sizeof(__u64);
|
||||||
|
break;
|
||||||
|
|
||||||
|
case KVM_REG_ARM_CORE_REG(fp_regs.vregs[0]) ...
|
||||||
|
KVM_REG_ARM_CORE_REG(fp_regs.vregs[31]):
|
||||||
|
size = sizeof(__uint128_t);
|
||||||
|
break;
|
||||||
|
|
||||||
|
case KVM_REG_ARM_CORE_REG(fp_regs.fpsr):
|
||||||
|
case KVM_REG_ARM_CORE_REG(fp_regs.fpcr):
|
||||||
|
size = sizeof(__u32);
|
||||||
|
break;
|
||||||
|
|
||||||
|
default:
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (KVM_REG_SIZE(reg->id) == size &&
|
||||||
|
IS_ALIGNED(off, size / sizeof(__u32)))
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
static int get_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
|
static int get_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
|
||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
|
@ -76,6 +115,9 @@ static int get_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
|
||||||
(off + (KVM_REG_SIZE(reg->id) / sizeof(__u32))) >= nr_regs)
|
(off + (KVM_REG_SIZE(reg->id) / sizeof(__u32))) >= nr_regs)
|
||||||
return -ENOENT;
|
return -ENOENT;
|
||||||
|
|
||||||
|
if (validate_core_offset(reg))
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
if (copy_to_user(uaddr, ((u32 *)regs) + off, KVM_REG_SIZE(reg->id)))
|
if (copy_to_user(uaddr, ((u32 *)regs) + off, KVM_REG_SIZE(reg->id)))
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
|
|
||||||
|
@ -98,6 +140,9 @@ static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
|
||||||
(off + (KVM_REG_SIZE(reg->id) / sizeof(__u32))) >= nr_regs)
|
(off + (KVM_REG_SIZE(reg->id) / sizeof(__u32))) >= nr_regs)
|
||||||
return -ENOENT;
|
return -ENOENT;
|
||||||
|
|
||||||
|
if (validate_core_offset(reg))
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
if (KVM_REG_SIZE(reg->id) > sizeof(tmp))
|
if (KVM_REG_SIZE(reg->id) > sizeof(tmp))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
|
@ -107,17 +152,25 @@ static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
|
||||||
}
|
}
|
||||||
|
|
||||||
if (off == KVM_REG_ARM_CORE_REG(regs.pstate)) {
|
if (off == KVM_REG_ARM_CORE_REG(regs.pstate)) {
|
||||||
u32 mode = (*(u32 *)valp) & PSR_AA32_MODE_MASK;
|
u64 mode = (*(u64 *)valp) & PSR_AA32_MODE_MASK;
|
||||||
switch (mode) {
|
switch (mode) {
|
||||||
case PSR_AA32_MODE_USR:
|
case PSR_AA32_MODE_USR:
|
||||||
|
if (!system_supports_32bit_el0())
|
||||||
|
return -EINVAL;
|
||||||
|
break;
|
||||||
case PSR_AA32_MODE_FIQ:
|
case PSR_AA32_MODE_FIQ:
|
||||||
case PSR_AA32_MODE_IRQ:
|
case PSR_AA32_MODE_IRQ:
|
||||||
case PSR_AA32_MODE_SVC:
|
case PSR_AA32_MODE_SVC:
|
||||||
case PSR_AA32_MODE_ABT:
|
case PSR_AA32_MODE_ABT:
|
||||||
case PSR_AA32_MODE_UND:
|
case PSR_AA32_MODE_UND:
|
||||||
|
if (!vcpu_el1_is_32bit(vcpu))
|
||||||
|
return -EINVAL;
|
||||||
|
break;
|
||||||
case PSR_MODE_EL0t:
|
case PSR_MODE_EL0t:
|
||||||
case PSR_MODE_EL1t:
|
case PSR_MODE_EL1t:
|
||||||
case PSR_MODE_EL1h:
|
case PSR_MODE_EL1h:
|
||||||
|
if (vcpu_el1_is_32bit(vcpu))
|
||||||
|
return -EINVAL;
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
err = -EINVAL;
|
err = -EINVAL;
|
||||||
|
|
|
@ -117,11 +117,14 @@ static pte_t get_clear_flush(struct mm_struct *mm,
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If HW_AFDBM is enabled, then the HW could turn on
|
* If HW_AFDBM is enabled, then the HW could turn on
|
||||||
* the dirty bit for any page in the set, so check
|
* the dirty or accessed bit for any page in the set,
|
||||||
* them all. All hugetlb entries are already young.
|
* so check them all.
|
||||||
*/
|
*/
|
||||||
if (pte_dirty(pte))
|
if (pte_dirty(pte))
|
||||||
orig_pte = pte_mkdirty(orig_pte);
|
orig_pte = pte_mkdirty(orig_pte);
|
||||||
|
|
||||||
|
if (pte_young(pte))
|
||||||
|
orig_pte = pte_mkyoung(orig_pte);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (valid) {
|
if (valid) {
|
||||||
|
@ -320,11 +323,40 @@ pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
|
||||||
return get_clear_flush(mm, addr, ptep, pgsize, ncontig);
|
return get_clear_flush(mm, addr, ptep, pgsize, ncontig);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* huge_ptep_set_access_flags will update access flags (dirty, accesssed)
|
||||||
|
* and write permission.
|
||||||
|
*
|
||||||
|
* For a contiguous huge pte range we need to check whether or not write
|
||||||
|
* permission has to change only on the first pte in the set. Then for
|
||||||
|
* all the contiguous ptes we need to check whether or not there is a
|
||||||
|
* discrepancy between dirty or young.
|
||||||
|
*/
|
||||||
|
static int __cont_access_flags_changed(pte_t *ptep, pte_t pte, int ncontig)
|
||||||
|
{
|
||||||
|
int i;
|
||||||
|
|
||||||
|
if (pte_write(pte) != pte_write(huge_ptep_get(ptep)))
|
||||||
|
return 1;
|
||||||
|
|
||||||
|
for (i = 0; i < ncontig; i++) {
|
||||||
|
pte_t orig_pte = huge_ptep_get(ptep + i);
|
||||||
|
|
||||||
|
if (pte_dirty(pte) != pte_dirty(orig_pte))
|
||||||
|
return 1;
|
||||||
|
|
||||||
|
if (pte_young(pte) != pte_young(orig_pte))
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
int huge_ptep_set_access_flags(struct vm_area_struct *vma,
|
int huge_ptep_set_access_flags(struct vm_area_struct *vma,
|
||||||
unsigned long addr, pte_t *ptep,
|
unsigned long addr, pte_t *ptep,
|
||||||
pte_t pte, int dirty)
|
pte_t pte, int dirty)
|
||||||
{
|
{
|
||||||
int ncontig, i, changed = 0;
|
int ncontig, i;
|
||||||
size_t pgsize = 0;
|
size_t pgsize = 0;
|
||||||
unsigned long pfn = pte_pfn(pte), dpfn;
|
unsigned long pfn = pte_pfn(pte), dpfn;
|
||||||
pgprot_t hugeprot;
|
pgprot_t hugeprot;
|
||||||
|
@ -336,19 +368,23 @@ int huge_ptep_set_access_flags(struct vm_area_struct *vma,
|
||||||
ncontig = find_num_contig(vma->vm_mm, addr, ptep, &pgsize);
|
ncontig = find_num_contig(vma->vm_mm, addr, ptep, &pgsize);
|
||||||
dpfn = pgsize >> PAGE_SHIFT;
|
dpfn = pgsize >> PAGE_SHIFT;
|
||||||
|
|
||||||
orig_pte = get_clear_flush(vma->vm_mm, addr, ptep, pgsize, ncontig);
|
if (!__cont_access_flags_changed(ptep, pte, ncontig))
|
||||||
if (!pte_same(orig_pte, pte))
|
return 0;
|
||||||
changed = 1;
|
|
||||||
|
|
||||||
/* Make sure we don't lose the dirty state */
|
orig_pte = get_clear_flush(vma->vm_mm, addr, ptep, pgsize, ncontig);
|
||||||
|
|
||||||
|
/* Make sure we don't lose the dirty or young state */
|
||||||
if (pte_dirty(orig_pte))
|
if (pte_dirty(orig_pte))
|
||||||
pte = pte_mkdirty(pte);
|
pte = pte_mkdirty(pte);
|
||||||
|
|
||||||
|
if (pte_young(orig_pte))
|
||||||
|
pte = pte_mkyoung(pte);
|
||||||
|
|
||||||
hugeprot = pte_pgprot(pte);
|
hugeprot = pte_pgprot(pte);
|
||||||
for (i = 0; i < ncontig; i++, ptep++, addr += pgsize, pfn += dpfn)
|
for (i = 0; i < ncontig; i++, ptep++, addr += pgsize, pfn += dpfn)
|
||||||
set_pte_at(vma->vm_mm, addr, ptep, pfn_pte(pfn, hugeprot));
|
set_pte_at(vma->vm_mm, addr, ptep, pfn_pte(pfn, hugeprot));
|
||||||
|
|
||||||
return changed;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
void huge_ptep_set_wrprotect(struct mm_struct *mm,
|
void huge_ptep_set_wrprotect(struct mm_struct *mm,
|
||||||
|
|
|
@ -13,6 +13,7 @@
|
||||||
|
|
||||||
#include <linux/atomic.h>
|
#include <linux/atomic.h>
|
||||||
#include <linux/cpumask.h>
|
#include <linux/cpumask.h>
|
||||||
|
#include <linux/sizes.h>
|
||||||
#include <linux/threads.h>
|
#include <linux/threads.h>
|
||||||
|
|
||||||
#include <asm/cachectl.h>
|
#include <asm/cachectl.h>
|
||||||
|
@ -80,11 +81,10 @@ extern unsigned int vced_count, vcei_count;
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/*
|
#define VDSO_RANDOMIZE_SIZE (TASK_IS_32BIT_ADDR ? SZ_1M : SZ_256M)
|
||||||
* One page above the stack is used for branch delay slot "emulation".
|
|
||||||
* See dsemul.c for details.
|
extern unsigned long mips_stack_top(void);
|
||||||
*/
|
#define STACK_TOP mips_stack_top()
|
||||||
#define STACK_TOP ((TASK_SIZE & PAGE_MASK) - PAGE_SIZE)
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* This decides where the kernel will search for a free chunk of vm
|
* This decides where the kernel will search for a free chunk of vm
|
||||||
|
|
|
@ -32,6 +32,7 @@
|
||||||
#include <linux/nmi.h>
|
#include <linux/nmi.h>
|
||||||
#include <linux/cpu.h>
|
#include <linux/cpu.h>
|
||||||
|
|
||||||
|
#include <asm/abi.h>
|
||||||
#include <asm/asm.h>
|
#include <asm/asm.h>
|
||||||
#include <asm/bootinfo.h>
|
#include <asm/bootinfo.h>
|
||||||
#include <asm/cpu.h>
|
#include <asm/cpu.h>
|
||||||
|
@ -39,6 +40,7 @@
|
||||||
#include <asm/dsp.h>
|
#include <asm/dsp.h>
|
||||||
#include <asm/fpu.h>
|
#include <asm/fpu.h>
|
||||||
#include <asm/irq.h>
|
#include <asm/irq.h>
|
||||||
|
#include <asm/mips-cps.h>
|
||||||
#include <asm/msa.h>
|
#include <asm/msa.h>
|
||||||
#include <asm/pgtable.h>
|
#include <asm/pgtable.h>
|
||||||
#include <asm/mipsregs.h>
|
#include <asm/mipsregs.h>
|
||||||
|
@ -645,6 +647,29 @@ out:
|
||||||
return pc;
|
return pc;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
unsigned long mips_stack_top(void)
|
||||||
|
{
|
||||||
|
unsigned long top = TASK_SIZE & PAGE_MASK;
|
||||||
|
|
||||||
|
/* One page for branch delay slot "emulation" */
|
||||||
|
top -= PAGE_SIZE;
|
||||||
|
|
||||||
|
/* Space for the VDSO, data page & GIC user page */
|
||||||
|
top -= PAGE_ALIGN(current->thread.abi->vdso->size);
|
||||||
|
top -= PAGE_SIZE;
|
||||||
|
top -= mips_gic_present() ? PAGE_SIZE : 0;
|
||||||
|
|
||||||
|
/* Space for cache colour alignment */
|
||||||
|
if (cpu_has_dc_aliases)
|
||||||
|
top -= shm_align_mask + 1;
|
||||||
|
|
||||||
|
/* Space to randomize the VDSO base */
|
||||||
|
if (current->flags & PF_RANDOMIZE)
|
||||||
|
top -= VDSO_RANDOMIZE_SIZE;
|
||||||
|
|
||||||
|
return top;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Don't forget that the stack pointer must be aligned on a 8 bytes
|
* Don't forget that the stack pointer must be aligned on a 8 bytes
|
||||||
* boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
|
* boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
|
||||||
|
|
|
@ -846,6 +846,34 @@ static void __init arch_mem_init(char **cmdline_p)
|
||||||
struct memblock_region *reg;
|
struct memblock_region *reg;
|
||||||
extern void plat_mem_setup(void);
|
extern void plat_mem_setup(void);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Initialize boot_command_line to an innocuous but non-empty string in
|
||||||
|
* order to prevent early_init_dt_scan_chosen() from copying
|
||||||
|
* CONFIG_CMDLINE into it without our knowledge. We handle
|
||||||
|
* CONFIG_CMDLINE ourselves below & don't want to duplicate its
|
||||||
|
* content because repeating arguments can be problematic.
|
||||||
|
*/
|
||||||
|
strlcpy(boot_command_line, " ", COMMAND_LINE_SIZE);
|
||||||
|
|
||||||
|
/* call board setup routine */
|
||||||
|
plat_mem_setup();
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Make sure all kernel memory is in the maps. The "UP" and
|
||||||
|
* "DOWN" are opposite for initdata since if it crosses over
|
||||||
|
* into another memory section you don't want that to be
|
||||||
|
* freed when the initdata is freed.
|
||||||
|
*/
|
||||||
|
arch_mem_addpart(PFN_DOWN(__pa_symbol(&_text)) << PAGE_SHIFT,
|
||||||
|
PFN_UP(__pa_symbol(&_edata)) << PAGE_SHIFT,
|
||||||
|
BOOT_MEM_RAM);
|
||||||
|
arch_mem_addpart(PFN_UP(__pa_symbol(&__init_begin)) << PAGE_SHIFT,
|
||||||
|
PFN_DOWN(__pa_symbol(&__init_end)) << PAGE_SHIFT,
|
||||||
|
BOOT_MEM_INIT_RAM);
|
||||||
|
|
||||||
|
pr_info("Determined physical RAM map:\n");
|
||||||
|
print_memory_map();
|
||||||
|
|
||||||
#if defined(CONFIG_CMDLINE_BOOL) && defined(CONFIG_CMDLINE_OVERRIDE)
|
#if defined(CONFIG_CMDLINE_BOOL) && defined(CONFIG_CMDLINE_OVERRIDE)
|
||||||
strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
|
strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
|
||||||
#else
|
#else
|
||||||
|
@ -873,26 +901,6 @@ static void __init arch_mem_init(char **cmdline_p)
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* call board setup routine */
|
|
||||||
plat_mem_setup();
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Make sure all kernel memory is in the maps. The "UP" and
|
|
||||||
* "DOWN" are opposite for initdata since if it crosses over
|
|
||||||
* into another memory section you don't want that to be
|
|
||||||
* freed when the initdata is freed.
|
|
||||||
*/
|
|
||||||
arch_mem_addpart(PFN_DOWN(__pa_symbol(&_text)) << PAGE_SHIFT,
|
|
||||||
PFN_UP(__pa_symbol(&_edata)) << PAGE_SHIFT,
|
|
||||||
BOOT_MEM_RAM);
|
|
||||||
arch_mem_addpart(PFN_UP(__pa_symbol(&__init_begin)) << PAGE_SHIFT,
|
|
||||||
PFN_DOWN(__pa_symbol(&__init_end)) << PAGE_SHIFT,
|
|
||||||
BOOT_MEM_INIT_RAM);
|
|
||||||
|
|
||||||
pr_info("Determined physical RAM map:\n");
|
|
||||||
print_memory_map();
|
|
||||||
|
|
||||||
strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE);
|
strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE);
|
||||||
|
|
||||||
*cmdline_p = command_line;
|
*cmdline_p = command_line;
|
||||||
|
|
|
@ -15,6 +15,7 @@
|
||||||
#include <linux/ioport.h>
|
#include <linux/ioport.h>
|
||||||
#include <linux/kernel.h>
|
#include <linux/kernel.h>
|
||||||
#include <linux/mm.h>
|
#include <linux/mm.h>
|
||||||
|
#include <linux/random.h>
|
||||||
#include <linux/sched.h>
|
#include <linux/sched.h>
|
||||||
#include <linux/slab.h>
|
#include <linux/slab.h>
|
||||||
#include <linux/timekeeper_internal.h>
|
#include <linux/timekeeper_internal.h>
|
||||||
|
@ -97,6 +98,21 @@ void update_vsyscall_tz(void)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static unsigned long vdso_base(void)
|
||||||
|
{
|
||||||
|
unsigned long base;
|
||||||
|
|
||||||
|
/* Skip the delay slot emulation page */
|
||||||
|
base = STACK_TOP + PAGE_SIZE;
|
||||||
|
|
||||||
|
if (current->flags & PF_RANDOMIZE) {
|
||||||
|
base += get_random_int() & (VDSO_RANDOMIZE_SIZE - 1);
|
||||||
|
base = PAGE_ALIGN(base);
|
||||||
|
}
|
||||||
|
|
||||||
|
return base;
|
||||||
|
}
|
||||||
|
|
||||||
int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
|
int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
|
||||||
{
|
{
|
||||||
struct mips_vdso_image *image = current->thread.abi->vdso;
|
struct mips_vdso_image *image = current->thread.abi->vdso;
|
||||||
|
@ -137,7 +153,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
|
||||||
if (cpu_has_dc_aliases)
|
if (cpu_has_dc_aliases)
|
||||||
size += shm_align_mask + 1;
|
size += shm_align_mask + 1;
|
||||||
|
|
||||||
base = get_unmapped_area(NULL, 0, size, 0, 0);
|
base = get_unmapped_area(NULL, vdso_base(), size, 0, 0);
|
||||||
if (IS_ERR_VALUE(base)) {
|
if (IS_ERR_VALUE(base)) {
|
||||||
ret = base;
|
ret = base;
|
||||||
goto out;
|
goto out;
|
||||||
|
|
|
@ -280,9 +280,11 @@
|
||||||
* unset_bytes = end_addr - current_addr + 1
|
* unset_bytes = end_addr - current_addr + 1
|
||||||
* a2 = t1 - a0 + 1
|
* a2 = t1 - a0 + 1
|
||||||
*/
|
*/
|
||||||
|
.set reorder
|
||||||
PTR_SUBU a2, t1, a0
|
PTR_SUBU a2, t1, a0
|
||||||
|
PTR_ADDIU a2, 1
|
||||||
jr ra
|
jr ra
|
||||||
PTR_ADDIU a2, 1
|
.set noreorder
|
||||||
|
|
||||||
.endm
|
.endm
|
||||||
|
|
||||||
|
|
|
@ -114,7 +114,7 @@
|
||||||
*/
|
*/
|
||||||
#define _HPAGE_CHG_MASK (PTE_RPN_MASK | _PAGE_HPTEFLAGS | _PAGE_DIRTY | \
|
#define _HPAGE_CHG_MASK (PTE_RPN_MASK | _PAGE_HPTEFLAGS | _PAGE_DIRTY | \
|
||||||
_PAGE_ACCESSED | H_PAGE_THP_HUGE | _PAGE_PTE | \
|
_PAGE_ACCESSED | H_PAGE_THP_HUGE | _PAGE_PTE | \
|
||||||
_PAGE_SOFT_DIRTY)
|
_PAGE_SOFT_DIRTY | _PAGE_DEVMAP)
|
||||||
/*
|
/*
|
||||||
* user access blocked by key
|
* user access blocked by key
|
||||||
*/
|
*/
|
||||||
|
@ -132,7 +132,7 @@
|
||||||
*/
|
*/
|
||||||
#define _PAGE_CHG_MASK (PTE_RPN_MASK | _PAGE_HPTEFLAGS | _PAGE_DIRTY | \
|
#define _PAGE_CHG_MASK (PTE_RPN_MASK | _PAGE_HPTEFLAGS | _PAGE_DIRTY | \
|
||||||
_PAGE_ACCESSED | _PAGE_SPECIAL | _PAGE_PTE | \
|
_PAGE_ACCESSED | _PAGE_SPECIAL | _PAGE_PTE | \
|
||||||
_PAGE_SOFT_DIRTY)
|
_PAGE_SOFT_DIRTY | _PAGE_DEVMAP)
|
||||||
|
|
||||||
#define H_PTE_PKEY (H_PTE_PKEY_BIT0 | H_PTE_PKEY_BIT1 | H_PTE_PKEY_BIT2 | \
|
#define H_PTE_PKEY (H_PTE_PKEY_BIT0 | H_PTE_PKEY_BIT1 | H_PTE_PKEY_BIT2 | \
|
||||||
H_PTE_PKEY_BIT3 | H_PTE_PKEY_BIT4)
|
H_PTE_PKEY_BIT3 | H_PTE_PKEY_BIT4)
|
||||||
|
|
|
@ -1306,6 +1306,16 @@ void show_user_instructions(struct pt_regs *regs)
|
||||||
|
|
||||||
pc = regs->nip - (instructions_to_print * 3 / 4 * sizeof(int));
|
pc = regs->nip - (instructions_to_print * 3 / 4 * sizeof(int));
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Make sure the NIP points at userspace, not kernel text/data or
|
||||||
|
* elsewhere.
|
||||||
|
*/
|
||||||
|
if (!__access_ok(pc, instructions_to_print * sizeof(int), USER_DS)) {
|
||||||
|
pr_info("%s[%d]: Bad NIP, not dumping instructions.\n",
|
||||||
|
current->comm, current->pid);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
pr_info("%s[%d]: code: ", current->comm, current->pid);
|
pr_info("%s[%d]: code: ", current->comm, current->pid);
|
||||||
|
|
||||||
for (i = 0; i < instructions_to_print; i++) {
|
for (i = 0; i < instructions_to_print; i++) {
|
||||||
|
|
|
@ -646,6 +646,16 @@ int kvmppc_book3s_radix_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
||||||
*/
|
*/
|
||||||
local_irq_disable();
|
local_irq_disable();
|
||||||
ptep = __find_linux_pte(vcpu->arch.pgdir, hva, NULL, &shift);
|
ptep = __find_linux_pte(vcpu->arch.pgdir, hva, NULL, &shift);
|
||||||
|
/*
|
||||||
|
* If the PTE disappeared temporarily due to a THP
|
||||||
|
* collapse, just return and let the guest try again.
|
||||||
|
*/
|
||||||
|
if (!ptep) {
|
||||||
|
local_irq_enable();
|
||||||
|
if (page)
|
||||||
|
put_page(page);
|
||||||
|
return RESUME_GUEST;
|
||||||
|
}
|
||||||
pte = *ptep;
|
pte = *ptep;
|
||||||
local_irq_enable();
|
local_irq_enable();
|
||||||
|
|
||||||
|
|
|
@ -28,12 +28,6 @@ static int __patch_instruction(unsigned int *exec_addr, unsigned int instr,
|
||||||
{
|
{
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
/* Make sure we aren't patching a freed init section */
|
|
||||||
if (init_mem_is_free && init_section_contains(exec_addr, 4)) {
|
|
||||||
pr_debug("Skipping init section patching addr: 0x%px\n", exec_addr);
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
__put_user_size(instr, patch_addr, 4, err);
|
__put_user_size(instr, patch_addr, 4, err);
|
||||||
if (err)
|
if (err)
|
||||||
return err;
|
return err;
|
||||||
|
@ -148,7 +142,7 @@ static inline int unmap_patch_area(unsigned long addr)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
int patch_instruction(unsigned int *addr, unsigned int instr)
|
static int do_patch_instruction(unsigned int *addr, unsigned int instr)
|
||||||
{
|
{
|
||||||
int err;
|
int err;
|
||||||
unsigned int *patch_addr = NULL;
|
unsigned int *patch_addr = NULL;
|
||||||
|
@ -188,12 +182,22 @@ out:
|
||||||
}
|
}
|
||||||
#else /* !CONFIG_STRICT_KERNEL_RWX */
|
#else /* !CONFIG_STRICT_KERNEL_RWX */
|
||||||
|
|
||||||
int patch_instruction(unsigned int *addr, unsigned int instr)
|
static int do_patch_instruction(unsigned int *addr, unsigned int instr)
|
||||||
{
|
{
|
||||||
return raw_patch_instruction(addr, instr);
|
return raw_patch_instruction(addr, instr);
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif /* CONFIG_STRICT_KERNEL_RWX */
|
#endif /* CONFIG_STRICT_KERNEL_RWX */
|
||||||
|
|
||||||
|
int patch_instruction(unsigned int *addr, unsigned int instr)
|
||||||
|
{
|
||||||
|
/* Make sure we aren't patching a freed init section */
|
||||||
|
if (init_mem_is_free && init_section_contains(addr, 4)) {
|
||||||
|
pr_debug("Skipping init section patching addr: 0x%px\n", addr);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
return do_patch_instruction(addr, instr);
|
||||||
|
}
|
||||||
NOKPROBE_SYMBOL(patch_instruction);
|
NOKPROBE_SYMBOL(patch_instruction);
|
||||||
|
|
||||||
int patch_branch(unsigned int *addr, unsigned long target, int flags)
|
int patch_branch(unsigned int *addr, unsigned long target, int flags)
|
||||||
|
|
|
@ -1217,9 +1217,10 @@ int find_and_online_cpu_nid(int cpu)
|
||||||
* Need to ensure that NODE_DATA is initialized for a node from
|
* Need to ensure that NODE_DATA is initialized for a node from
|
||||||
* available memory (see memblock_alloc_try_nid). If unable to
|
* available memory (see memblock_alloc_try_nid). If unable to
|
||||||
* init the node, then default to nearest node that has memory
|
* init the node, then default to nearest node that has memory
|
||||||
* installed.
|
* installed. Skip onlining a node if the subsystems are not
|
||||||
|
* yet initialized.
|
||||||
*/
|
*/
|
||||||
if (try_online_node(new_nid))
|
if (!topology_inited || try_online_node(new_nid))
|
||||||
new_nid = first_online_node;
|
new_nid = first_online_node;
|
||||||
#else
|
#else
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -186,7 +186,7 @@ static void __init setup_bootmem(void)
|
||||||
BUG_ON(mem_size == 0);
|
BUG_ON(mem_size == 0);
|
||||||
|
|
||||||
set_max_mapnr(PFN_DOWN(mem_size));
|
set_max_mapnr(PFN_DOWN(mem_size));
|
||||||
max_low_pfn = pfn_base + PFN_DOWN(mem_size);
|
max_low_pfn = memblock_end_of_DRAM();
|
||||||
|
|
||||||
#ifdef CONFIG_BLK_DEV_INITRD
|
#ifdef CONFIG_BLK_DEV_INITRD
|
||||||
setup_initrd();
|
setup_initrd();
|
||||||
|
|
|
@ -108,7 +108,8 @@ int sclp_early_get_core_info(struct sclp_core_info *info);
|
||||||
void sclp_early_get_ipl_info(struct sclp_ipl_info *info);
|
void sclp_early_get_ipl_info(struct sclp_ipl_info *info);
|
||||||
void sclp_early_detect(void);
|
void sclp_early_detect(void);
|
||||||
void sclp_early_printk(const char *s);
|
void sclp_early_printk(const char *s);
|
||||||
void __sclp_early_printk(const char *s, unsigned int len);
|
void sclp_early_printk_force(const char *s);
|
||||||
|
void __sclp_early_printk(const char *s, unsigned int len, unsigned int force);
|
||||||
|
|
||||||
int _sclp_get_core_info(struct sclp_core_info *info);
|
int _sclp_get_core_info(struct sclp_core_info *info);
|
||||||
int sclp_core_configure(u8 core);
|
int sclp_core_configure(u8 core);
|
||||||
|
|
|
@ -10,7 +10,7 @@
|
||||||
|
|
||||||
static void sclp_early_write(struct console *con, const char *s, unsigned int len)
|
static void sclp_early_write(struct console *con, const char *s, unsigned int len)
|
||||||
{
|
{
|
||||||
__sclp_early_printk(s, len);
|
__sclp_early_printk(s, len, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct console sclp_early_console = {
|
static struct console sclp_early_console = {
|
||||||
|
|
|
@ -198,12 +198,10 @@ pgm_check_entry:
|
||||||
|
|
||||||
/* Suspend CPU not available -> panic */
|
/* Suspend CPU not available -> panic */
|
||||||
larl %r15,init_thread_union
|
larl %r15,init_thread_union
|
||||||
ahi %r15,1<<(PAGE_SHIFT+THREAD_SIZE_ORDER)
|
aghi %r15,1<<(PAGE_SHIFT+THREAD_SIZE_ORDER)
|
||||||
|
aghi %r15,-STACK_FRAME_OVERHEAD
|
||||||
larl %r2,.Lpanic_string
|
larl %r2,.Lpanic_string
|
||||||
lghi %r1,0
|
brasl %r14,sclp_early_printk_force
|
||||||
sam31
|
|
||||||
sigp %r1,%r0,SIGP_SET_ARCHITECTURE
|
|
||||||
brasl %r14,sclp_early_printk
|
|
||||||
larl %r3,.Ldisabled_wait_31
|
larl %r3,.Ldisabled_wait_31
|
||||||
lpsw 0(%r3)
|
lpsw 0(%r3)
|
||||||
4:
|
4:
|
||||||
|
|
|
@ -28,7 +28,7 @@ typedef struct {
|
||||||
unsigned short sock_id; /* physical package */
|
unsigned short sock_id; /* physical package */
|
||||||
unsigned short core_id;
|
unsigned short core_id;
|
||||||
unsigned short max_cache_id; /* groupings of highest shared cache */
|
unsigned short max_cache_id; /* groupings of highest shared cache */
|
||||||
unsigned short proc_id; /* strand (aka HW thread) id */
|
signed short proc_id; /* strand (aka HW thread) id */
|
||||||
} cpuinfo_sparc;
|
} cpuinfo_sparc;
|
||||||
|
|
||||||
DECLARE_PER_CPU(cpuinfo_sparc, __cpu_data);
|
DECLARE_PER_CPU(cpuinfo_sparc, __cpu_data);
|
||||||
|
|
|
@ -427,8 +427,9 @@
|
||||||
#define __NR_preadv2 358
|
#define __NR_preadv2 358
|
||||||
#define __NR_pwritev2 359
|
#define __NR_pwritev2 359
|
||||||
#define __NR_statx 360
|
#define __NR_statx 360
|
||||||
|
#define __NR_io_pgetevents 361
|
||||||
|
|
||||||
#define NR_syscalls 361
|
#define NR_syscalls 362
|
||||||
|
|
||||||
/* Bitmask values returned from kern_features system call. */
|
/* Bitmask values returned from kern_features system call. */
|
||||||
#define KERN_FEATURE_MIXED_MODE_STACK 0x00000001
|
#define KERN_FEATURE_MIXED_MODE_STACK 0x00000001
|
||||||
|
|
|
@ -122,7 +122,7 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
|
||||||
linux_regs->pc = addr;
|
linux_regs->pc = addr;
|
||||||
linux_regs->npc = addr + 4;
|
linux_regs->npc = addr + 4;
|
||||||
}
|
}
|
||||||
/* fallthru */
|
/* fall through */
|
||||||
|
|
||||||
case 'D':
|
case 'D':
|
||||||
case 'k':
|
case 'k':
|
||||||
|
|
|
@ -148,7 +148,7 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
|
||||||
linux_regs->tpc = addr;
|
linux_regs->tpc = addr;
|
||||||
linux_regs->tnpc = addr + 4;
|
linux_regs->tnpc = addr + 4;
|
||||||
}
|
}
|
||||||
/* fallthru */
|
/* fall through */
|
||||||
|
|
||||||
case 'D':
|
case 'D':
|
||||||
case 'k':
|
case 'k':
|
||||||
|
|
|
@ -24,6 +24,7 @@
|
||||||
#include <asm/cpudata.h>
|
#include <asm/cpudata.h>
|
||||||
#include <linux/uaccess.h>
|
#include <linux/uaccess.h>
|
||||||
#include <linux/atomic.h>
|
#include <linux/atomic.h>
|
||||||
|
#include <linux/sched/clock.h>
|
||||||
#include <asm/nmi.h>
|
#include <asm/nmi.h>
|
||||||
#include <asm/pcr.h>
|
#include <asm/pcr.h>
|
||||||
#include <asm/cacheflush.h>
|
#include <asm/cacheflush.h>
|
||||||
|
@ -927,6 +928,8 @@ static void read_in_all_counters(struct cpu_hw_events *cpuc)
|
||||||
sparc_perf_event_update(cp, &cp->hw,
|
sparc_perf_event_update(cp, &cp->hw,
|
||||||
cpuc->current_idx[i]);
|
cpuc->current_idx[i]);
|
||||||
cpuc->current_idx[i] = PIC_NO_INDEX;
|
cpuc->current_idx[i] = PIC_NO_INDEX;
|
||||||
|
if (cp->hw.state & PERF_HES_STOPPED)
|
||||||
|
cp->hw.state |= PERF_HES_ARCH;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -959,10 +962,12 @@ static void calculate_single_pcr(struct cpu_hw_events *cpuc)
|
||||||
|
|
||||||
enc = perf_event_get_enc(cpuc->events[i]);
|
enc = perf_event_get_enc(cpuc->events[i]);
|
||||||
cpuc->pcr[0] &= ~mask_for_index(idx);
|
cpuc->pcr[0] &= ~mask_for_index(idx);
|
||||||
if (hwc->state & PERF_HES_STOPPED)
|
if (hwc->state & PERF_HES_ARCH) {
|
||||||
cpuc->pcr[0] |= nop_for_index(idx);
|
cpuc->pcr[0] |= nop_for_index(idx);
|
||||||
else
|
} else {
|
||||||
cpuc->pcr[0] |= event_encoding(enc, idx);
|
cpuc->pcr[0] |= event_encoding(enc, idx);
|
||||||
|
hwc->state = 0;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
out:
|
out:
|
||||||
cpuc->pcr[0] |= cpuc->event[0]->hw.config_base;
|
cpuc->pcr[0] |= cpuc->event[0]->hw.config_base;
|
||||||
|
@ -988,6 +993,9 @@ static void calculate_multiple_pcrs(struct cpu_hw_events *cpuc)
|
||||||
|
|
||||||
cpuc->current_idx[i] = idx;
|
cpuc->current_idx[i] = idx;
|
||||||
|
|
||||||
|
if (cp->hw.state & PERF_HES_ARCH)
|
||||||
|
continue;
|
||||||
|
|
||||||
sparc_pmu_start(cp, PERF_EF_RELOAD);
|
sparc_pmu_start(cp, PERF_EF_RELOAD);
|
||||||
}
|
}
|
||||||
out:
|
out:
|
||||||
|
@ -1079,6 +1087,8 @@ static void sparc_pmu_start(struct perf_event *event, int flags)
|
||||||
event->hw.state = 0;
|
event->hw.state = 0;
|
||||||
|
|
||||||
sparc_pmu_enable_event(cpuc, &event->hw, idx);
|
sparc_pmu_enable_event(cpuc, &event->hw, idx);
|
||||||
|
|
||||||
|
perf_event_update_userpage(event);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void sparc_pmu_stop(struct perf_event *event, int flags)
|
static void sparc_pmu_stop(struct perf_event *event, int flags)
|
||||||
|
@ -1371,9 +1381,9 @@ static int sparc_pmu_add(struct perf_event *event, int ef_flags)
|
||||||
cpuc->events[n0] = event->hw.event_base;
|
cpuc->events[n0] = event->hw.event_base;
|
||||||
cpuc->current_idx[n0] = PIC_NO_INDEX;
|
cpuc->current_idx[n0] = PIC_NO_INDEX;
|
||||||
|
|
||||||
event->hw.state = PERF_HES_UPTODATE;
|
event->hw.state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
|
||||||
if (!(ef_flags & PERF_EF_START))
|
if (!(ef_flags & PERF_EF_START))
|
||||||
event->hw.state |= PERF_HES_STOPPED;
|
event->hw.state |= PERF_HES_ARCH;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If group events scheduling transaction was started,
|
* If group events scheduling transaction was started,
|
||||||
|
@ -1603,6 +1613,8 @@ static int __kprobes perf_event_nmi_handler(struct notifier_block *self,
|
||||||
struct perf_sample_data data;
|
struct perf_sample_data data;
|
||||||
struct cpu_hw_events *cpuc;
|
struct cpu_hw_events *cpuc;
|
||||||
struct pt_regs *regs;
|
struct pt_regs *regs;
|
||||||
|
u64 finish_clock;
|
||||||
|
u64 start_clock;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
if (!atomic_read(&active_events))
|
if (!atomic_read(&active_events))
|
||||||
|
@ -1616,6 +1628,8 @@ static int __kprobes perf_event_nmi_handler(struct notifier_block *self,
|
||||||
return NOTIFY_DONE;
|
return NOTIFY_DONE;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
start_clock = sched_clock();
|
||||||
|
|
||||||
regs = args->regs;
|
regs = args->regs;
|
||||||
|
|
||||||
cpuc = this_cpu_ptr(&cpu_hw_events);
|
cpuc = this_cpu_ptr(&cpu_hw_events);
|
||||||
|
@ -1654,6 +1668,10 @@ static int __kprobes perf_event_nmi_handler(struct notifier_block *self,
|
||||||
sparc_pmu_stop(event, 0);
|
sparc_pmu_stop(event, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
finish_clock = sched_clock();
|
||||||
|
|
||||||
|
perf_sample_event_took(finish_clock - start_clock);
|
||||||
|
|
||||||
return NOTIFY_STOP;
|
return NOTIFY_STOP;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -84,8 +84,9 @@ __handle_signal:
|
||||||
ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1
|
ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1
|
||||||
sethi %hi(0xf << 20), %l4
|
sethi %hi(0xf << 20), %l4
|
||||||
and %l1, %l4, %l4
|
and %l1, %l4, %l4
|
||||||
|
andn %l1, %l4, %l1
|
||||||
ba,pt %xcc, __handle_preemption_continue
|
ba,pt %xcc, __handle_preemption_continue
|
||||||
andn %l1, %l4, %l1
|
srl %l4, 20, %l4
|
||||||
|
|
||||||
/* When returning from a NMI (%pil==15) interrupt we want to
|
/* When returning from a NMI (%pil==15) interrupt we want to
|
||||||
* avoid running softirqs, doing IRQ tracing, preempting, etc.
|
* avoid running softirqs, doing IRQ tracing, preempting, etc.
|
||||||
|
|
|
@ -90,4 +90,4 @@ sys_call_table:
|
||||||
/*345*/ .long sys_renameat2, sys_seccomp, sys_getrandom, sys_memfd_create, sys_bpf
|
/*345*/ .long sys_renameat2, sys_seccomp, sys_getrandom, sys_memfd_create, sys_bpf
|
||||||
/*350*/ .long sys_execveat, sys_membarrier, sys_userfaultfd, sys_bind, sys_listen
|
/*350*/ .long sys_execveat, sys_membarrier, sys_userfaultfd, sys_bind, sys_listen
|
||||||
/*355*/ .long sys_setsockopt, sys_mlock2, sys_copy_file_range, sys_preadv2, sys_pwritev2
|
/*355*/ .long sys_setsockopt, sys_mlock2, sys_copy_file_range, sys_preadv2, sys_pwritev2
|
||||||
/*360*/ .long sys_statx
|
/*360*/ .long sys_statx, sys_io_pgetevents
|
||||||
|
|
|
@ -91,7 +91,7 @@ sys_call_table32:
|
||||||
.word sys_renameat2, sys_seccomp, sys_getrandom, sys_memfd_create, sys_bpf
|
.word sys_renameat2, sys_seccomp, sys_getrandom, sys_memfd_create, sys_bpf
|
||||||
/*350*/ .word sys32_execveat, sys_membarrier, sys_userfaultfd, sys_bind, sys_listen
|
/*350*/ .word sys32_execveat, sys_membarrier, sys_userfaultfd, sys_bind, sys_listen
|
||||||
.word compat_sys_setsockopt, sys_mlock2, sys_copy_file_range, compat_sys_preadv2, compat_sys_pwritev2
|
.word compat_sys_setsockopt, sys_mlock2, sys_copy_file_range, compat_sys_preadv2, compat_sys_pwritev2
|
||||||
/*360*/ .word sys_statx
|
/*360*/ .word sys_statx, compat_sys_io_pgetevents
|
||||||
|
|
||||||
#endif /* CONFIG_COMPAT */
|
#endif /* CONFIG_COMPAT */
|
||||||
|
|
||||||
|
@ -173,4 +173,4 @@ sys_call_table:
|
||||||
.word sys_renameat2, sys_seccomp, sys_getrandom, sys_memfd_create, sys_bpf
|
.word sys_renameat2, sys_seccomp, sys_getrandom, sys_memfd_create, sys_bpf
|
||||||
/*350*/ .word sys64_execveat, sys_membarrier, sys_userfaultfd, sys_bind, sys_listen
|
/*350*/ .word sys64_execveat, sys_membarrier, sys_userfaultfd, sys_bind, sys_listen
|
||||||
.word sys_setsockopt, sys_mlock2, sys_copy_file_range, sys_preadv2, sys_pwritev2
|
.word sys_setsockopt, sys_mlock2, sys_copy_file_range, sys_preadv2, sys_pwritev2
|
||||||
/*360*/ .word sys_statx
|
/*360*/ .word sys_statx, sys_io_pgetevents
|
||||||
|
|
|
@ -180,11 +180,17 @@ static int send_dreg(struct vio_driver_state *vio)
|
||||||
struct vio_dring_register pkt;
|
struct vio_dring_register pkt;
|
||||||
char all[sizeof(struct vio_dring_register) +
|
char all[sizeof(struct vio_dring_register) +
|
||||||
(sizeof(struct ldc_trans_cookie) *
|
(sizeof(struct ldc_trans_cookie) *
|
||||||
dr->ncookies)];
|
VIO_MAX_RING_COOKIES)];
|
||||||
} u;
|
} u;
|
||||||
|
size_t bytes = sizeof(struct vio_dring_register) +
|
||||||
|
(sizeof(struct ldc_trans_cookie) *
|
||||||
|
dr->ncookies);
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
memset(&u, 0, sizeof(u));
|
if (WARN_ON(bytes > sizeof(u)))
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
memset(&u, 0, bytes);
|
||||||
init_tag(&u.pkt.tag, VIO_TYPE_CTRL, VIO_SUBTYPE_INFO, VIO_DRING_REG);
|
init_tag(&u.pkt.tag, VIO_TYPE_CTRL, VIO_SUBTYPE_INFO, VIO_DRING_REG);
|
||||||
u.pkt.dring_ident = 0;
|
u.pkt.dring_ident = 0;
|
||||||
u.pkt.num_descr = dr->num_entries;
|
u.pkt.num_descr = dr->num_entries;
|
||||||
|
@ -206,7 +212,7 @@ static int send_dreg(struct vio_driver_state *vio)
|
||||||
(unsigned long long) u.pkt.cookies[i].cookie_size);
|
(unsigned long long) u.pkt.cookies[i].cookie_size);
|
||||||
}
|
}
|
||||||
|
|
||||||
return send_ctrl(vio, &u.pkt.tag, sizeof(u));
|
return send_ctrl(vio, &u.pkt.tag, bytes);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int send_rdx(struct vio_driver_state *vio)
|
static int send_rdx(struct vio_driver_state *vio)
|
||||||
|
|
|
@ -31,23 +31,21 @@ obj-y += $(vdso_img_objs)
|
||||||
targets += $(vdso_img_cfiles)
|
targets += $(vdso_img_cfiles)
|
||||||
targets += $(vdso_img_sodbg) $(vdso_img-y:%=vdso%.so)
|
targets += $(vdso_img_sodbg) $(vdso_img-y:%=vdso%.so)
|
||||||
|
|
||||||
export CPPFLAGS_vdso.lds += -P -C
|
CPPFLAGS_vdso.lds += -P -C
|
||||||
|
|
||||||
VDSO_LDFLAGS_vdso.lds = -m64 -Wl,-soname=linux-vdso.so.1 \
|
VDSO_LDFLAGS_vdso.lds = -m64 -Wl,-soname=linux-vdso.so.1 \
|
||||||
-Wl,--no-undefined \
|
-Wl,--no-undefined \
|
||||||
-Wl,-z,max-page-size=8192 -Wl,-z,common-page-size=8192 \
|
-Wl,-z,max-page-size=8192 -Wl,-z,common-page-size=8192 \
|
||||||
$(DISABLE_LTO)
|
$(DISABLE_LTO)
|
||||||
|
|
||||||
$(obj)/vdso64.so.dbg: $(src)/vdso.lds $(vobjs) FORCE
|
$(obj)/vdso64.so.dbg: $(obj)/vdso.lds $(vobjs) FORCE
|
||||||
$(call if_changed,vdso)
|
$(call if_changed,vdso)
|
||||||
|
|
||||||
HOST_EXTRACFLAGS += -I$(srctree)/tools/include
|
HOST_EXTRACFLAGS += -I$(srctree)/tools/include
|
||||||
hostprogs-y += vdso2c
|
hostprogs-y += vdso2c
|
||||||
|
|
||||||
quiet_cmd_vdso2c = VDSO2C $@
|
quiet_cmd_vdso2c = VDSO2C $@
|
||||||
define cmd_vdso2c
|
cmd_vdso2c = $(obj)/vdso2c $< $(<:%.dbg=%) $@
|
||||||
$(obj)/vdso2c $< $(<:%.dbg=%) $@
|
|
||||||
endef
|
|
||||||
|
|
||||||
$(obj)/vdso-image-%.c: $(obj)/vdso%.so.dbg $(obj)/vdso%.so $(obj)/vdso2c FORCE
|
$(obj)/vdso-image-%.c: $(obj)/vdso%.so.dbg $(obj)/vdso%.so $(obj)/vdso2c FORCE
|
||||||
$(call if_changed,vdso2c)
|
$(call if_changed,vdso2c)
|
||||||
|
|
|
@ -68,7 +68,13 @@ $(obj)/vdso-image-%.c: $(obj)/vdso%.so.dbg $(obj)/vdso%.so $(obj)/vdso2c FORCE
|
||||||
CFL := $(PROFILING) -mcmodel=small -fPIC -O2 -fasynchronous-unwind-tables -m64 \
|
CFL := $(PROFILING) -mcmodel=small -fPIC -O2 -fasynchronous-unwind-tables -m64 \
|
||||||
$(filter -g%,$(KBUILD_CFLAGS)) $(call cc-option, -fno-stack-protector) \
|
$(filter -g%,$(KBUILD_CFLAGS)) $(call cc-option, -fno-stack-protector) \
|
||||||
-fno-omit-frame-pointer -foptimize-sibling-calls \
|
-fno-omit-frame-pointer -foptimize-sibling-calls \
|
||||||
-DDISABLE_BRANCH_PROFILING -DBUILD_VDSO $(RETPOLINE_VDSO_CFLAGS)
|
-DDISABLE_BRANCH_PROFILING -DBUILD_VDSO
|
||||||
|
|
||||||
|
ifdef CONFIG_RETPOLINE
|
||||||
|
ifneq ($(RETPOLINE_VDSO_CFLAGS),)
|
||||||
|
CFL += $(RETPOLINE_VDSO_CFLAGS)
|
||||||
|
endif
|
||||||
|
endif
|
||||||
|
|
||||||
$(vobjs): KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS) $(RETPOLINE_CFLAGS),$(KBUILD_CFLAGS)) $(CFL)
|
$(vobjs): KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS) $(RETPOLINE_CFLAGS),$(KBUILD_CFLAGS)) $(CFL)
|
||||||
|
|
||||||
|
@ -138,7 +144,13 @@ KBUILD_CFLAGS_32 += $(call cc-option, -fno-stack-protector)
|
||||||
KBUILD_CFLAGS_32 += $(call cc-option, -foptimize-sibling-calls)
|
KBUILD_CFLAGS_32 += $(call cc-option, -foptimize-sibling-calls)
|
||||||
KBUILD_CFLAGS_32 += -fno-omit-frame-pointer
|
KBUILD_CFLAGS_32 += -fno-omit-frame-pointer
|
||||||
KBUILD_CFLAGS_32 += -DDISABLE_BRANCH_PROFILING
|
KBUILD_CFLAGS_32 += -DDISABLE_BRANCH_PROFILING
|
||||||
KBUILD_CFLAGS_32 += $(RETPOLINE_VDSO_CFLAGS)
|
|
||||||
|
ifdef CONFIG_RETPOLINE
|
||||||
|
ifneq ($(RETPOLINE_VDSO_CFLAGS),)
|
||||||
|
KBUILD_CFLAGS_32 += $(RETPOLINE_VDSO_CFLAGS)
|
||||||
|
endif
|
||||||
|
endif
|
||||||
|
|
||||||
$(obj)/vdso32.so.dbg: KBUILD_CFLAGS = $(KBUILD_CFLAGS_32)
|
$(obj)/vdso32.so.dbg: KBUILD_CFLAGS = $(KBUILD_CFLAGS_32)
|
||||||
|
|
||||||
$(obj)/vdso32.so.dbg: FORCE \
|
$(obj)/vdso32.so.dbg: FORCE \
|
||||||
|
|
|
@ -43,8 +43,9 @@ extern u8 hvclock_page
|
||||||
notrace static long vdso_fallback_gettime(long clock, struct timespec *ts)
|
notrace static long vdso_fallback_gettime(long clock, struct timespec *ts)
|
||||||
{
|
{
|
||||||
long ret;
|
long ret;
|
||||||
asm("syscall" : "=a" (ret) :
|
asm ("syscall" : "=a" (ret), "=m" (*ts) :
|
||||||
"0" (__NR_clock_gettime), "D" (clock), "S" (ts) : "memory");
|
"0" (__NR_clock_gettime), "D" (clock), "S" (ts) :
|
||||||
|
"memory", "rcx", "r11");
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -52,8 +53,9 @@ notrace static long vdso_fallback_gtod(struct timeval *tv, struct timezone *tz)
|
||||||
{
|
{
|
||||||
long ret;
|
long ret;
|
||||||
|
|
||||||
asm("syscall" : "=a" (ret) :
|
asm ("syscall" : "=a" (ret), "=m" (*tv), "=m" (*tz) :
|
||||||
"0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "memory");
|
"0" (__NR_gettimeofday), "D" (tv), "S" (tz) :
|
||||||
|
"memory", "rcx", "r11");
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -64,13 +66,13 @@ notrace static long vdso_fallback_gettime(long clock, struct timespec *ts)
|
||||||
{
|
{
|
||||||
long ret;
|
long ret;
|
||||||
|
|
||||||
asm(
|
asm (
|
||||||
"mov %%ebx, %%edx \n"
|
"mov %%ebx, %%edx \n"
|
||||||
"mov %2, %%ebx \n"
|
"mov %[clock], %%ebx \n"
|
||||||
"call __kernel_vsyscall \n"
|
"call __kernel_vsyscall \n"
|
||||||
"mov %%edx, %%ebx \n"
|
"mov %%edx, %%ebx \n"
|
||||||
: "=a" (ret)
|
: "=a" (ret), "=m" (*ts)
|
||||||
: "0" (__NR_clock_gettime), "g" (clock), "c" (ts)
|
: "0" (__NR_clock_gettime), [clock] "g" (clock), "c" (ts)
|
||||||
: "memory", "edx");
|
: "memory", "edx");
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
@ -79,13 +81,13 @@ notrace static long vdso_fallback_gtod(struct timeval *tv, struct timezone *tz)
|
||||||
{
|
{
|
||||||
long ret;
|
long ret;
|
||||||
|
|
||||||
asm(
|
asm (
|
||||||
"mov %%ebx, %%edx \n"
|
"mov %%ebx, %%edx \n"
|
||||||
"mov %2, %%ebx \n"
|
"mov %[tv], %%ebx \n"
|
||||||
"call __kernel_vsyscall \n"
|
"call __kernel_vsyscall \n"
|
||||||
"mov %%edx, %%ebx \n"
|
"mov %%edx, %%ebx \n"
|
||||||
: "=a" (ret)
|
: "=a" (ret), "=m" (*tv), "=m" (*tz)
|
||||||
: "0" (__NR_gettimeofday), "g" (tv), "c" (tz)
|
: "0" (__NR_gettimeofday), [tv] "g" (tv), "c" (tz)
|
||||||
: "memory", "edx");
|
: "memory", "edx");
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
|
@ -124,7 +124,7 @@
|
||||||
*/
|
*/
|
||||||
#define _PAGE_CHG_MASK (PTE_PFN_MASK | _PAGE_PCD | _PAGE_PWT | \
|
#define _PAGE_CHG_MASK (PTE_PFN_MASK | _PAGE_PCD | _PAGE_PWT | \
|
||||||
_PAGE_SPECIAL | _PAGE_ACCESSED | _PAGE_DIRTY | \
|
_PAGE_SPECIAL | _PAGE_ACCESSED | _PAGE_DIRTY | \
|
||||||
_PAGE_SOFT_DIRTY)
|
_PAGE_SOFT_DIRTY | _PAGE_DEVMAP)
|
||||||
#define _HPAGE_CHG_MASK (_PAGE_CHG_MASK | _PAGE_PSE)
|
#define _HPAGE_CHG_MASK (_PAGE_CHG_MASK | _PAGE_PSE)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -10,8 +10,13 @@ struct cpumask;
|
||||||
struct mm_struct;
|
struct mm_struct;
|
||||||
|
|
||||||
#ifdef CONFIG_X86_UV
|
#ifdef CONFIG_X86_UV
|
||||||
|
#include <linux/efi.h>
|
||||||
|
|
||||||
extern enum uv_system_type get_uv_system_type(void);
|
extern enum uv_system_type get_uv_system_type(void);
|
||||||
|
static inline bool is_early_uv_system(void)
|
||||||
|
{
|
||||||
|
return !((efi.uv_systab == EFI_INVALID_TABLE_ADDR) || !efi.uv_systab);
|
||||||
|
}
|
||||||
extern int is_uv_system(void);
|
extern int is_uv_system(void);
|
||||||
extern int is_uv_hubless(void);
|
extern int is_uv_hubless(void);
|
||||||
extern void uv_cpu_init(void);
|
extern void uv_cpu_init(void);
|
||||||
|
@ -23,6 +28,7 @@ extern const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
|
||||||
#else /* X86_UV */
|
#else /* X86_UV */
|
||||||
|
|
||||||
static inline enum uv_system_type get_uv_system_type(void) { return UV_NONE; }
|
static inline enum uv_system_type get_uv_system_type(void) { return UV_NONE; }
|
||||||
|
static inline bool is_early_uv_system(void) { return 0; }
|
||||||
static inline int is_uv_system(void) { return 0; }
|
static inline int is_uv_system(void) { return 0; }
|
||||||
static inline int is_uv_hubless(void) { return 0; }
|
static inline int is_uv_hubless(void) { return 0; }
|
||||||
static inline void uv_cpu_init(void) { }
|
static inline void uv_cpu_init(void) { }
|
||||||
|
|
|
@ -922,7 +922,7 @@ static void init_amd(struct cpuinfo_x86 *c)
|
||||||
static unsigned int amd_size_cache(struct cpuinfo_x86 *c, unsigned int size)
|
static unsigned int amd_size_cache(struct cpuinfo_x86 *c, unsigned int size)
|
||||||
{
|
{
|
||||||
/* AMD errata T13 (order #21922) */
|
/* AMD errata T13 (order #21922) */
|
||||||
if ((c->x86 == 6)) {
|
if (c->x86 == 6) {
|
||||||
/* Duron Rev A0 */
|
/* Duron Rev A0 */
|
||||||
if (c->x86_model == 3 && c->x86_stepping == 0)
|
if (c->x86_model == 3 && c->x86_stepping == 0)
|
||||||
size = 64;
|
size = 64;
|
||||||
|
|
|
@ -529,14 +529,14 @@ ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of,
|
||||||
int rdtgroup_schemata_show(struct kernfs_open_file *of,
|
int rdtgroup_schemata_show(struct kernfs_open_file *of,
|
||||||
struct seq_file *s, void *v);
|
struct seq_file *s, void *v);
|
||||||
bool rdtgroup_cbm_overlaps(struct rdt_resource *r, struct rdt_domain *d,
|
bool rdtgroup_cbm_overlaps(struct rdt_resource *r, struct rdt_domain *d,
|
||||||
u32 _cbm, int closid, bool exclusive);
|
unsigned long cbm, int closid, bool exclusive);
|
||||||
unsigned int rdtgroup_cbm_to_size(struct rdt_resource *r, struct rdt_domain *d,
|
unsigned int rdtgroup_cbm_to_size(struct rdt_resource *r, struct rdt_domain *d,
|
||||||
u32 cbm);
|
unsigned long cbm);
|
||||||
enum rdtgrp_mode rdtgroup_mode_by_closid(int closid);
|
enum rdtgrp_mode rdtgroup_mode_by_closid(int closid);
|
||||||
int rdtgroup_tasks_assigned(struct rdtgroup *r);
|
int rdtgroup_tasks_assigned(struct rdtgroup *r);
|
||||||
int rdtgroup_locksetup_enter(struct rdtgroup *rdtgrp);
|
int rdtgroup_locksetup_enter(struct rdtgroup *rdtgrp);
|
||||||
int rdtgroup_locksetup_exit(struct rdtgroup *rdtgrp);
|
int rdtgroup_locksetup_exit(struct rdtgroup *rdtgrp);
|
||||||
bool rdtgroup_cbm_overlaps_pseudo_locked(struct rdt_domain *d, u32 _cbm);
|
bool rdtgroup_cbm_overlaps_pseudo_locked(struct rdt_domain *d, unsigned long cbm);
|
||||||
bool rdtgroup_pseudo_locked_in_hierarchy(struct rdt_domain *d);
|
bool rdtgroup_pseudo_locked_in_hierarchy(struct rdt_domain *d);
|
||||||
int rdt_pseudo_lock_init(void);
|
int rdt_pseudo_lock_init(void);
|
||||||
void rdt_pseudo_lock_release(void);
|
void rdt_pseudo_lock_release(void);
|
||||||
|
|
|
@ -789,25 +789,27 @@ int rdtgroup_locksetup_exit(struct rdtgroup *rdtgrp)
|
||||||
/**
|
/**
|
||||||
* rdtgroup_cbm_overlaps_pseudo_locked - Test if CBM or portion is pseudo-locked
|
* rdtgroup_cbm_overlaps_pseudo_locked - Test if CBM or portion is pseudo-locked
|
||||||
* @d: RDT domain
|
* @d: RDT domain
|
||||||
* @_cbm: CBM to test
|
* @cbm: CBM to test
|
||||||
*
|
*
|
||||||
* @d represents a cache instance and @_cbm a capacity bitmask that is
|
* @d represents a cache instance and @cbm a capacity bitmask that is
|
||||||
* considered for it. Determine if @_cbm overlaps with any existing
|
* considered for it. Determine if @cbm overlaps with any existing
|
||||||
* pseudo-locked region on @d.
|
* pseudo-locked region on @d.
|
||||||
*
|
*
|
||||||
* Return: true if @_cbm overlaps with pseudo-locked region on @d, false
|
* @cbm is unsigned long, even if only 32 bits are used, to make the
|
||||||
|
* bitmap functions work correctly.
|
||||||
|
*
|
||||||
|
* Return: true if @cbm overlaps with pseudo-locked region on @d, false
|
||||||
* otherwise.
|
* otherwise.
|
||||||
*/
|
*/
|
||||||
bool rdtgroup_cbm_overlaps_pseudo_locked(struct rdt_domain *d, u32 _cbm)
|
bool rdtgroup_cbm_overlaps_pseudo_locked(struct rdt_domain *d, unsigned long cbm)
|
||||||
{
|
{
|
||||||
unsigned long *cbm = (unsigned long *)&_cbm;
|
|
||||||
unsigned long *cbm_b;
|
|
||||||
unsigned int cbm_len;
|
unsigned int cbm_len;
|
||||||
|
unsigned long cbm_b;
|
||||||
|
|
||||||
if (d->plr) {
|
if (d->plr) {
|
||||||
cbm_len = d->plr->r->cache.cbm_len;
|
cbm_len = d->plr->r->cache.cbm_len;
|
||||||
cbm_b = (unsigned long *)&d->plr->cbm;
|
cbm_b = d->plr->cbm;
|
||||||
if (bitmap_intersects(cbm, cbm_b, cbm_len))
|
if (bitmap_intersects(&cbm, &cbm_b, cbm_len))
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
return false;
|
return false;
|
||||||
|
|
|
@ -975,33 +975,34 @@ static int rdtgroup_mode_show(struct kernfs_open_file *of,
|
||||||
* is false then overlaps with any resource group or hardware entities
|
* is false then overlaps with any resource group or hardware entities
|
||||||
* will be considered.
|
* will be considered.
|
||||||
*
|
*
|
||||||
|
* @cbm is unsigned long, even if only 32 bits are used, to make the
|
||||||
|
* bitmap functions work correctly.
|
||||||
|
*
|
||||||
* Return: false if CBM does not overlap, true if it does.
|
* Return: false if CBM does not overlap, true if it does.
|
||||||
*/
|
*/
|
||||||
bool rdtgroup_cbm_overlaps(struct rdt_resource *r, struct rdt_domain *d,
|
bool rdtgroup_cbm_overlaps(struct rdt_resource *r, struct rdt_domain *d,
|
||||||
u32 _cbm, int closid, bool exclusive)
|
unsigned long cbm, int closid, bool exclusive)
|
||||||
{
|
{
|
||||||
unsigned long *cbm = (unsigned long *)&_cbm;
|
|
||||||
unsigned long *ctrl_b;
|
|
||||||
enum rdtgrp_mode mode;
|
enum rdtgrp_mode mode;
|
||||||
|
unsigned long ctrl_b;
|
||||||
u32 *ctrl;
|
u32 *ctrl;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
/* Check for any overlap with regions used by hardware directly */
|
/* Check for any overlap with regions used by hardware directly */
|
||||||
if (!exclusive) {
|
if (!exclusive) {
|
||||||
if (bitmap_intersects(cbm,
|
ctrl_b = r->cache.shareable_bits;
|
||||||
(unsigned long *)&r->cache.shareable_bits,
|
if (bitmap_intersects(&cbm, &ctrl_b, r->cache.cbm_len))
|
||||||
r->cache.cbm_len))
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Check for overlap with other resource groups */
|
/* Check for overlap with other resource groups */
|
||||||
ctrl = d->ctrl_val;
|
ctrl = d->ctrl_val;
|
||||||
for (i = 0; i < closids_supported(); i++, ctrl++) {
|
for (i = 0; i < closids_supported(); i++, ctrl++) {
|
||||||
ctrl_b = (unsigned long *)ctrl;
|
ctrl_b = *ctrl;
|
||||||
mode = rdtgroup_mode_by_closid(i);
|
mode = rdtgroup_mode_by_closid(i);
|
||||||
if (closid_allocated(i) && i != closid &&
|
if (closid_allocated(i) && i != closid &&
|
||||||
mode != RDT_MODE_PSEUDO_LOCKSETUP) {
|
mode != RDT_MODE_PSEUDO_LOCKSETUP) {
|
||||||
if (bitmap_intersects(cbm, ctrl_b, r->cache.cbm_len)) {
|
if (bitmap_intersects(&cbm, &ctrl_b, r->cache.cbm_len)) {
|
||||||
if (exclusive) {
|
if (exclusive) {
|
||||||
if (mode == RDT_MODE_EXCLUSIVE)
|
if (mode == RDT_MODE_EXCLUSIVE)
|
||||||
return true;
|
return true;
|
||||||
|
@ -1138,15 +1139,18 @@ out:
|
||||||
* computed by first dividing the total cache size by the CBM length to
|
* computed by first dividing the total cache size by the CBM length to
|
||||||
* determine how many bytes each bit in the bitmask represents. The result
|
* determine how many bytes each bit in the bitmask represents. The result
|
||||||
* is multiplied with the number of bits set in the bitmask.
|
* is multiplied with the number of bits set in the bitmask.
|
||||||
|
*
|
||||||
|
* @cbm is unsigned long, even if only 32 bits are used to make the
|
||||||
|
* bitmap functions work correctly.
|
||||||
*/
|
*/
|
||||||
unsigned int rdtgroup_cbm_to_size(struct rdt_resource *r,
|
unsigned int rdtgroup_cbm_to_size(struct rdt_resource *r,
|
||||||
struct rdt_domain *d, u32 cbm)
|
struct rdt_domain *d, unsigned long cbm)
|
||||||
{
|
{
|
||||||
struct cpu_cacheinfo *ci;
|
struct cpu_cacheinfo *ci;
|
||||||
unsigned int size = 0;
|
unsigned int size = 0;
|
||||||
int num_b, i;
|
int num_b, i;
|
||||||
|
|
||||||
num_b = bitmap_weight((unsigned long *)&cbm, r->cache.cbm_len);
|
num_b = bitmap_weight(&cbm, r->cache.cbm_len);
|
||||||
ci = get_cpu_cacheinfo(cpumask_any(&d->cpu_mask));
|
ci = get_cpu_cacheinfo(cpumask_any(&d->cpu_mask));
|
||||||
for (i = 0; i < ci->num_leaves; i++) {
|
for (i = 0; i < ci->num_leaves; i++) {
|
||||||
if (ci->info_list[i].level == r->cache_level) {
|
if (ci->info_list[i].level == r->cache_level) {
|
||||||
|
@ -2353,6 +2357,7 @@ static int rdtgroup_init_alloc(struct rdtgroup *rdtgrp)
|
||||||
u32 used_b = 0, unused_b = 0;
|
u32 used_b = 0, unused_b = 0;
|
||||||
u32 closid = rdtgrp->closid;
|
u32 closid = rdtgrp->closid;
|
||||||
struct rdt_resource *r;
|
struct rdt_resource *r;
|
||||||
|
unsigned long tmp_cbm;
|
||||||
enum rdtgrp_mode mode;
|
enum rdtgrp_mode mode;
|
||||||
struct rdt_domain *d;
|
struct rdt_domain *d;
|
||||||
int i, ret;
|
int i, ret;
|
||||||
|
@ -2390,9 +2395,14 @@ static int rdtgroup_init_alloc(struct rdtgroup *rdtgrp)
|
||||||
* modify the CBM based on system availability.
|
* modify the CBM based on system availability.
|
||||||
*/
|
*/
|
||||||
cbm_ensure_valid(&d->new_ctrl, r);
|
cbm_ensure_valid(&d->new_ctrl, r);
|
||||||
if (bitmap_weight((unsigned long *) &d->new_ctrl,
|
/*
|
||||||
r->cache.cbm_len) <
|
* Assign the u32 CBM to an unsigned long to ensure
|
||||||
r->cache.min_cbm_bits) {
|
* that bitmap_weight() does not access out-of-bound
|
||||||
|
* memory.
|
||||||
|
*/
|
||||||
|
tmp_cbm = d->new_ctrl;
|
||||||
|
if (bitmap_weight(&tmp_cbm, r->cache.cbm_len) <
|
||||||
|
r->cache.min_cbm_bits) {
|
||||||
rdt_last_cmd_printf("no space on %s:%d\n",
|
rdt_last_cmd_printf("no space on %s:%d\n",
|
||||||
r->name, d->id);
|
r->name, d->id);
|
||||||
return -ENOSPC;
|
return -ENOSPC;
|
||||||
|
|
|
@ -26,6 +26,7 @@
|
||||||
#include <asm/apic.h>
|
#include <asm/apic.h>
|
||||||
#include <asm/intel-family.h>
|
#include <asm/intel-family.h>
|
||||||
#include <asm/i8259.h>
|
#include <asm/i8259.h>
|
||||||
|
#include <asm/uv/uv.h>
|
||||||
|
|
||||||
unsigned int __read_mostly cpu_khz; /* TSC clocks / usec, not used here */
|
unsigned int __read_mostly cpu_khz; /* TSC clocks / usec, not used here */
|
||||||
EXPORT_SYMBOL(cpu_khz);
|
EXPORT_SYMBOL(cpu_khz);
|
||||||
|
@ -1433,6 +1434,9 @@ void __init tsc_early_init(void)
|
||||||
{
|
{
|
||||||
if (!boot_cpu_has(X86_FEATURE_TSC))
|
if (!boot_cpu_has(X86_FEATURE_TSC))
|
||||||
return;
|
return;
|
||||||
|
/* Don't change UV TSC multi-chassis synchronization */
|
||||||
|
if (is_early_uv_system())
|
||||||
|
return;
|
||||||
if (!determine_cpu_tsc_frequencies(true))
|
if (!determine_cpu_tsc_frequencies(true))
|
||||||
return;
|
return;
|
||||||
loops_per_jiffy = get_loops_per_jiffy();
|
loops_per_jiffy = get_loops_per_jiffy();
|
||||||
|
|
|
@ -249,6 +249,17 @@ static u64 __read_mostly shadow_nonpresent_or_rsvd_mask;
|
||||||
*/
|
*/
|
||||||
static const u64 shadow_nonpresent_or_rsvd_mask_len = 5;
|
static const u64 shadow_nonpresent_or_rsvd_mask_len = 5;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* In some cases, we need to preserve the GFN of a non-present or reserved
|
||||||
|
* SPTE when we usurp the upper five bits of the physical address space to
|
||||||
|
* defend against L1TF, e.g. for MMIO SPTEs. To preserve the GFN, we'll
|
||||||
|
* shift bits of the GFN that overlap with shadow_nonpresent_or_rsvd_mask
|
||||||
|
* left into the reserved bits, i.e. the GFN in the SPTE will be split into
|
||||||
|
* high and low parts. This mask covers the lower bits of the GFN.
|
||||||
|
*/
|
||||||
|
static u64 __read_mostly shadow_nonpresent_or_rsvd_lower_gfn_mask;
|
||||||
|
|
||||||
|
|
||||||
static void mmu_spte_set(u64 *sptep, u64 spte);
|
static void mmu_spte_set(u64 *sptep, u64 spte);
|
||||||
static union kvm_mmu_page_role
|
static union kvm_mmu_page_role
|
||||||
kvm_mmu_calc_root_page_role(struct kvm_vcpu *vcpu);
|
kvm_mmu_calc_root_page_role(struct kvm_vcpu *vcpu);
|
||||||
|
@ -357,9 +368,7 @@ static bool is_mmio_spte(u64 spte)
|
||||||
|
|
||||||
static gfn_t get_mmio_spte_gfn(u64 spte)
|
static gfn_t get_mmio_spte_gfn(u64 spte)
|
||||||
{
|
{
|
||||||
u64 mask = generation_mmio_spte_mask(MMIO_GEN_MASK) | shadow_mmio_mask |
|
u64 gpa = spte & shadow_nonpresent_or_rsvd_lower_gfn_mask;
|
||||||
shadow_nonpresent_or_rsvd_mask;
|
|
||||||
u64 gpa = spte & ~mask;
|
|
||||||
|
|
||||||
gpa |= (spte >> shadow_nonpresent_or_rsvd_mask_len)
|
gpa |= (spte >> shadow_nonpresent_or_rsvd_mask_len)
|
||||||
& shadow_nonpresent_or_rsvd_mask;
|
& shadow_nonpresent_or_rsvd_mask;
|
||||||
|
@ -423,6 +432,8 @@ EXPORT_SYMBOL_GPL(kvm_mmu_set_mask_ptes);
|
||||||
|
|
||||||
static void kvm_mmu_reset_all_pte_masks(void)
|
static void kvm_mmu_reset_all_pte_masks(void)
|
||||||
{
|
{
|
||||||
|
u8 low_phys_bits;
|
||||||
|
|
||||||
shadow_user_mask = 0;
|
shadow_user_mask = 0;
|
||||||
shadow_accessed_mask = 0;
|
shadow_accessed_mask = 0;
|
||||||
shadow_dirty_mask = 0;
|
shadow_dirty_mask = 0;
|
||||||
|
@ -437,12 +448,17 @@ static void kvm_mmu_reset_all_pte_masks(void)
|
||||||
* appropriate mask to guard against L1TF attacks. Otherwise, it is
|
* appropriate mask to guard against L1TF attacks. Otherwise, it is
|
||||||
* assumed that the CPU is not vulnerable to L1TF.
|
* assumed that the CPU is not vulnerable to L1TF.
|
||||||
*/
|
*/
|
||||||
|
low_phys_bits = boot_cpu_data.x86_phys_bits;
|
||||||
if (boot_cpu_data.x86_phys_bits <
|
if (boot_cpu_data.x86_phys_bits <
|
||||||
52 - shadow_nonpresent_or_rsvd_mask_len)
|
52 - shadow_nonpresent_or_rsvd_mask_len) {
|
||||||
shadow_nonpresent_or_rsvd_mask =
|
shadow_nonpresent_or_rsvd_mask =
|
||||||
rsvd_bits(boot_cpu_data.x86_phys_bits -
|
rsvd_bits(boot_cpu_data.x86_phys_bits -
|
||||||
shadow_nonpresent_or_rsvd_mask_len,
|
shadow_nonpresent_or_rsvd_mask_len,
|
||||||
boot_cpu_data.x86_phys_bits - 1);
|
boot_cpu_data.x86_phys_bits - 1);
|
||||||
|
low_phys_bits -= shadow_nonpresent_or_rsvd_mask_len;
|
||||||
|
}
|
||||||
|
shadow_nonpresent_or_rsvd_lower_gfn_mask =
|
||||||
|
GENMASK_ULL(low_phys_bits - 1, PAGE_SHIFT);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int is_cpuid_PSE36(void)
|
static int is_cpuid_PSE36(void)
|
||||||
|
|
|
@ -436,14 +436,18 @@ static inline struct kvm_svm *to_kvm_svm(struct kvm *kvm)
|
||||||
|
|
||||||
static inline bool svm_sev_enabled(void)
|
static inline bool svm_sev_enabled(void)
|
||||||
{
|
{
|
||||||
return max_sev_asid;
|
return IS_ENABLED(CONFIG_KVM_AMD_SEV) ? max_sev_asid : 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline bool sev_guest(struct kvm *kvm)
|
static inline bool sev_guest(struct kvm *kvm)
|
||||||
{
|
{
|
||||||
|
#ifdef CONFIG_KVM_AMD_SEV
|
||||||
struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
|
struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
|
||||||
|
|
||||||
return sev->active;
|
return sev->active;
|
||||||
|
#else
|
||||||
|
return false;
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int sev_get_asid(struct kvm *kvm)
|
static inline int sev_get_asid(struct kvm *kvm)
|
||||||
|
|
|
@ -121,7 +121,6 @@ module_param_named(pml, enable_pml, bool, S_IRUGO);
|
||||||
|
|
||||||
#define MSR_BITMAP_MODE_X2APIC 1
|
#define MSR_BITMAP_MODE_X2APIC 1
|
||||||
#define MSR_BITMAP_MODE_X2APIC_APICV 2
|
#define MSR_BITMAP_MODE_X2APIC_APICV 2
|
||||||
#define MSR_BITMAP_MODE_LM 4
|
|
||||||
|
|
||||||
#define KVM_VMX_TSC_MULTIPLIER_MAX 0xffffffffffffffffULL
|
#define KVM_VMX_TSC_MULTIPLIER_MAX 0xffffffffffffffffULL
|
||||||
|
|
||||||
|
@ -857,6 +856,7 @@ struct nested_vmx {
|
||||||
|
|
||||||
/* to migrate it to L2 if VM_ENTRY_LOAD_DEBUG_CONTROLS is off */
|
/* to migrate it to L2 if VM_ENTRY_LOAD_DEBUG_CONTROLS is off */
|
||||||
u64 vmcs01_debugctl;
|
u64 vmcs01_debugctl;
|
||||||
|
u64 vmcs01_guest_bndcfgs;
|
||||||
|
|
||||||
u16 vpid02;
|
u16 vpid02;
|
||||||
u16 last_vpid;
|
u16 last_vpid;
|
||||||
|
@ -1572,8 +1572,12 @@ static int vmx_hv_remote_flush_tlb(struct kvm *kvm)
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* FLUSH_GUEST_PHYSICAL_ADDRESS_SPACE hypercall needs the address of the
|
||||||
|
* base of EPT PML4 table, strip off EPT configuration information.
|
||||||
|
*/
|
||||||
ret = hyperv_flush_guest_mapping(
|
ret = hyperv_flush_guest_mapping(
|
||||||
to_vmx(kvm_get_vcpu(kvm, 0))->ept_pointer);
|
to_vmx(kvm_get_vcpu(kvm, 0))->ept_pointer & PAGE_MASK);
|
||||||
|
|
||||||
out:
|
out:
|
||||||
spin_unlock(&to_kvm_vmx(kvm)->ept_pointer_lock);
|
spin_unlock(&to_kvm_vmx(kvm)->ept_pointer_lock);
|
||||||
|
@ -2899,8 +2903,7 @@ static void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu)
|
||||||
vmx->msr_host_kernel_gs_base = read_msr(MSR_KERNEL_GS_BASE);
|
vmx->msr_host_kernel_gs_base = read_msr(MSR_KERNEL_GS_BASE);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (is_long_mode(&vmx->vcpu))
|
wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
|
||||||
wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
|
|
||||||
#else
|
#else
|
||||||
savesegment(fs, fs_sel);
|
savesegment(fs, fs_sel);
|
||||||
savesegment(gs, gs_sel);
|
savesegment(gs, gs_sel);
|
||||||
|
@ -2951,8 +2954,7 @@ static void vmx_prepare_switch_to_host(struct vcpu_vmx *vmx)
|
||||||
vmx->loaded_cpu_state = NULL;
|
vmx->loaded_cpu_state = NULL;
|
||||||
|
|
||||||
#ifdef CONFIG_X86_64
|
#ifdef CONFIG_X86_64
|
||||||
if (is_long_mode(&vmx->vcpu))
|
rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
|
||||||
rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
|
|
||||||
#endif
|
#endif
|
||||||
if (host_state->ldt_sel || (host_state->gs_sel & 7)) {
|
if (host_state->ldt_sel || (host_state->gs_sel & 7)) {
|
||||||
kvm_load_ldt(host_state->ldt_sel);
|
kvm_load_ldt(host_state->ldt_sel);
|
||||||
|
@ -2980,24 +2982,19 @@ static void vmx_prepare_switch_to_host(struct vcpu_vmx *vmx)
|
||||||
#ifdef CONFIG_X86_64
|
#ifdef CONFIG_X86_64
|
||||||
static u64 vmx_read_guest_kernel_gs_base(struct vcpu_vmx *vmx)
|
static u64 vmx_read_guest_kernel_gs_base(struct vcpu_vmx *vmx)
|
||||||
{
|
{
|
||||||
if (is_long_mode(&vmx->vcpu)) {
|
preempt_disable();
|
||||||
preempt_disable();
|
if (vmx->loaded_cpu_state)
|
||||||
if (vmx->loaded_cpu_state)
|
rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
|
||||||
rdmsrl(MSR_KERNEL_GS_BASE,
|
preempt_enable();
|
||||||
vmx->msr_guest_kernel_gs_base);
|
|
||||||
preempt_enable();
|
|
||||||
}
|
|
||||||
return vmx->msr_guest_kernel_gs_base;
|
return vmx->msr_guest_kernel_gs_base;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void vmx_write_guest_kernel_gs_base(struct vcpu_vmx *vmx, u64 data)
|
static void vmx_write_guest_kernel_gs_base(struct vcpu_vmx *vmx, u64 data)
|
||||||
{
|
{
|
||||||
if (is_long_mode(&vmx->vcpu)) {
|
preempt_disable();
|
||||||
preempt_disable();
|
if (vmx->loaded_cpu_state)
|
||||||
if (vmx->loaded_cpu_state)
|
wrmsrl(MSR_KERNEL_GS_BASE, data);
|
||||||
wrmsrl(MSR_KERNEL_GS_BASE, data);
|
preempt_enable();
|
||||||
preempt_enable();
|
|
||||||
}
|
|
||||||
vmx->msr_guest_kernel_gs_base = data;
|
vmx->msr_guest_kernel_gs_base = data;
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
@ -3533,9 +3530,6 @@ static void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, bool apicv)
|
||||||
VM_EXIT_LOAD_IA32_EFER | VM_EXIT_SAVE_IA32_EFER |
|
VM_EXIT_LOAD_IA32_EFER | VM_EXIT_SAVE_IA32_EFER |
|
||||||
VM_EXIT_SAVE_VMX_PREEMPTION_TIMER | VM_EXIT_ACK_INTR_ON_EXIT;
|
VM_EXIT_SAVE_VMX_PREEMPTION_TIMER | VM_EXIT_ACK_INTR_ON_EXIT;
|
||||||
|
|
||||||
if (kvm_mpx_supported())
|
|
||||||
msrs->exit_ctls_high |= VM_EXIT_CLEAR_BNDCFGS;
|
|
||||||
|
|
||||||
/* We support free control of debug control saving. */
|
/* We support free control of debug control saving. */
|
||||||
msrs->exit_ctls_low &= ~VM_EXIT_SAVE_DEBUG_CONTROLS;
|
msrs->exit_ctls_low &= ~VM_EXIT_SAVE_DEBUG_CONTROLS;
|
||||||
|
|
||||||
|
@ -3552,8 +3546,6 @@ static void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, bool apicv)
|
||||||
VM_ENTRY_LOAD_IA32_PAT;
|
VM_ENTRY_LOAD_IA32_PAT;
|
||||||
msrs->entry_ctls_high |=
|
msrs->entry_ctls_high |=
|
||||||
(VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR | VM_ENTRY_LOAD_IA32_EFER);
|
(VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR | VM_ENTRY_LOAD_IA32_EFER);
|
||||||
if (kvm_mpx_supported())
|
|
||||||
msrs->entry_ctls_high |= VM_ENTRY_LOAD_BNDCFGS;
|
|
||||||
|
|
||||||
/* We support free control of debug control loading. */
|
/* We support free control of debug control loading. */
|
||||||
msrs->entry_ctls_low &= ~VM_ENTRY_LOAD_DEBUG_CONTROLS;
|
msrs->entry_ctls_low &= ~VM_ENTRY_LOAD_DEBUG_CONTROLS;
|
||||||
|
@ -3601,12 +3593,12 @@ static void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, bool apicv)
|
||||||
msrs->secondary_ctls_high);
|
msrs->secondary_ctls_high);
|
||||||
msrs->secondary_ctls_low = 0;
|
msrs->secondary_ctls_low = 0;
|
||||||
msrs->secondary_ctls_high &=
|
msrs->secondary_ctls_high &=
|
||||||
SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
|
|
||||||
SECONDARY_EXEC_DESC |
|
SECONDARY_EXEC_DESC |
|
||||||
SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
|
SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
|
||||||
SECONDARY_EXEC_APIC_REGISTER_VIRT |
|
SECONDARY_EXEC_APIC_REGISTER_VIRT |
|
||||||
SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
|
SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
|
||||||
SECONDARY_EXEC_WBINVD_EXITING;
|
SECONDARY_EXEC_WBINVD_EXITING;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We can emulate "VMCS shadowing," even if the hardware
|
* We can emulate "VMCS shadowing," even if the hardware
|
||||||
* doesn't support it.
|
* doesn't support it.
|
||||||
|
@ -3663,6 +3655,10 @@ static void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, bool apicv)
|
||||||
msrs->secondary_ctls_high |=
|
msrs->secondary_ctls_high |=
|
||||||
SECONDARY_EXEC_UNRESTRICTED_GUEST;
|
SECONDARY_EXEC_UNRESTRICTED_GUEST;
|
||||||
|
|
||||||
|
if (flexpriority_enabled)
|
||||||
|
msrs->secondary_ctls_high |=
|
||||||
|
SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
|
||||||
|
|
||||||
/* miscellaneous data */
|
/* miscellaneous data */
|
||||||
rdmsr(MSR_IA32_VMX_MISC,
|
rdmsr(MSR_IA32_VMX_MISC,
|
||||||
msrs->misc_low,
|
msrs->misc_low,
|
||||||
|
@ -5073,19 +5069,6 @@ static void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer)
|
||||||
if (!msr)
|
if (!msr)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
/*
|
|
||||||
* MSR_KERNEL_GS_BASE is not intercepted when the guest is in
|
|
||||||
* 64-bit mode as a 64-bit kernel may frequently access the
|
|
||||||
* MSR. This means we need to manually save/restore the MSR
|
|
||||||
* when switching between guest and host state, but only if
|
|
||||||
* the guest is in 64-bit mode. Sync our cached value if the
|
|
||||||
* guest is transitioning to 32-bit mode and the CPU contains
|
|
||||||
* guest state, i.e. the cache is stale.
|
|
||||||
*/
|
|
||||||
#ifdef CONFIG_X86_64
|
|
||||||
if (!(efer & EFER_LMA))
|
|
||||||
(void)vmx_read_guest_kernel_gs_base(vmx);
|
|
||||||
#endif
|
|
||||||
vcpu->arch.efer = efer;
|
vcpu->arch.efer = efer;
|
||||||
if (efer & EFER_LMA) {
|
if (efer & EFER_LMA) {
|
||||||
vm_entry_controls_setbit(to_vmx(vcpu), VM_ENTRY_IA32E_MODE);
|
vm_entry_controls_setbit(to_vmx(vcpu), VM_ENTRY_IA32E_MODE);
|
||||||
|
@ -6078,9 +6061,6 @@ static u8 vmx_msr_bitmap_mode(struct kvm_vcpu *vcpu)
|
||||||
mode |= MSR_BITMAP_MODE_X2APIC_APICV;
|
mode |= MSR_BITMAP_MODE_X2APIC_APICV;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (is_long_mode(vcpu))
|
|
||||||
mode |= MSR_BITMAP_MODE_LM;
|
|
||||||
|
|
||||||
return mode;
|
return mode;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -6121,9 +6101,6 @@ static void vmx_update_msr_bitmap(struct kvm_vcpu *vcpu)
|
||||||
if (!changed)
|
if (!changed)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
vmx_set_intercept_for_msr(msr_bitmap, MSR_KERNEL_GS_BASE, MSR_TYPE_RW,
|
|
||||||
!(mode & MSR_BITMAP_MODE_LM));
|
|
||||||
|
|
||||||
if (changed & (MSR_BITMAP_MODE_X2APIC | MSR_BITMAP_MODE_X2APIC_APICV))
|
if (changed & (MSR_BITMAP_MODE_X2APIC | MSR_BITMAP_MODE_X2APIC_APICV))
|
||||||
vmx_update_msr_bitmap_x2apic(msr_bitmap, mode);
|
vmx_update_msr_bitmap_x2apic(msr_bitmap, mode);
|
||||||
|
|
||||||
|
@ -6189,6 +6166,11 @@ static void vmx_complete_nested_posted_interrupt(struct kvm_vcpu *vcpu)
|
||||||
nested_mark_vmcs12_pages_dirty(vcpu);
|
nested_mark_vmcs12_pages_dirty(vcpu);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static u8 vmx_get_rvi(void)
|
||||||
|
{
|
||||||
|
return vmcs_read16(GUEST_INTR_STATUS) & 0xff;
|
||||||
|
}
|
||||||
|
|
||||||
static bool vmx_guest_apic_has_interrupt(struct kvm_vcpu *vcpu)
|
static bool vmx_guest_apic_has_interrupt(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
struct vcpu_vmx *vmx = to_vmx(vcpu);
|
struct vcpu_vmx *vmx = to_vmx(vcpu);
|
||||||
|
@ -6201,7 +6183,7 @@ static bool vmx_guest_apic_has_interrupt(struct kvm_vcpu *vcpu)
|
||||||
WARN_ON_ONCE(!vmx->nested.virtual_apic_page))
|
WARN_ON_ONCE(!vmx->nested.virtual_apic_page))
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
rvi = vmcs_read16(GUEST_INTR_STATUS) & 0xff;
|
rvi = vmx_get_rvi();
|
||||||
|
|
||||||
vapic_page = kmap(vmx->nested.virtual_apic_page);
|
vapic_page = kmap(vmx->nested.virtual_apic_page);
|
||||||
vppr = *((u32 *)(vapic_page + APIC_PROCPRI));
|
vppr = *((u32 *)(vapic_page + APIC_PROCPRI));
|
||||||
|
@ -10245,15 +10227,16 @@ static void vmx_set_virtual_apic_mode(struct kvm_vcpu *vcpu)
|
||||||
if (!lapic_in_kernel(vcpu))
|
if (!lapic_in_kernel(vcpu))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
if (!flexpriority_enabled &&
|
||||||
|
!cpu_has_vmx_virtualize_x2apic_mode())
|
||||||
|
return;
|
||||||
|
|
||||||
/* Postpone execution until vmcs01 is the current VMCS. */
|
/* Postpone execution until vmcs01 is the current VMCS. */
|
||||||
if (is_guest_mode(vcpu)) {
|
if (is_guest_mode(vcpu)) {
|
||||||
to_vmx(vcpu)->nested.change_vmcs01_virtual_apic_mode = true;
|
to_vmx(vcpu)->nested.change_vmcs01_virtual_apic_mode = true;
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!cpu_need_tpr_shadow(vcpu))
|
|
||||||
return;
|
|
||||||
|
|
||||||
sec_exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL);
|
sec_exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL);
|
||||||
sec_exec_control &= ~(SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
|
sec_exec_control &= ~(SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
|
||||||
SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE);
|
SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE);
|
||||||
|
@ -10375,6 +10358,14 @@ static int vmx_sync_pir_to_irr(struct kvm_vcpu *vcpu)
|
||||||
return max_irr;
|
return max_irr;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static u8 vmx_has_apicv_interrupt(struct kvm_vcpu *vcpu)
|
||||||
|
{
|
||||||
|
u8 rvi = vmx_get_rvi();
|
||||||
|
u8 vppr = kvm_lapic_get_reg(vcpu->arch.apic, APIC_PROCPRI);
|
||||||
|
|
||||||
|
return ((rvi & 0xf0) > (vppr & 0xf0));
|
||||||
|
}
|
||||||
|
|
||||||
static void vmx_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap)
|
static void vmx_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap)
|
||||||
{
|
{
|
||||||
if (!kvm_vcpu_apicv_active(vcpu))
|
if (!kvm_vcpu_apicv_active(vcpu))
|
||||||
|
@ -11264,6 +11255,23 @@ static void nested_vmx_cr_fixed1_bits_update(struct kvm_vcpu *vcpu)
|
||||||
#undef cr4_fixed1_update
|
#undef cr4_fixed1_update
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void nested_vmx_entry_exit_ctls_update(struct kvm_vcpu *vcpu)
|
||||||
|
{
|
||||||
|
struct vcpu_vmx *vmx = to_vmx(vcpu);
|
||||||
|
|
||||||
|
if (kvm_mpx_supported()) {
|
||||||
|
bool mpx_enabled = guest_cpuid_has(vcpu, X86_FEATURE_MPX);
|
||||||
|
|
||||||
|
if (mpx_enabled) {
|
||||||
|
vmx->nested.msrs.entry_ctls_high |= VM_ENTRY_LOAD_BNDCFGS;
|
||||||
|
vmx->nested.msrs.exit_ctls_high |= VM_EXIT_CLEAR_BNDCFGS;
|
||||||
|
} else {
|
||||||
|
vmx->nested.msrs.entry_ctls_high &= ~VM_ENTRY_LOAD_BNDCFGS;
|
||||||
|
vmx->nested.msrs.exit_ctls_high &= ~VM_EXIT_CLEAR_BNDCFGS;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
static void vmx_cpuid_update(struct kvm_vcpu *vcpu)
|
static void vmx_cpuid_update(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
struct vcpu_vmx *vmx = to_vmx(vcpu);
|
struct vcpu_vmx *vmx = to_vmx(vcpu);
|
||||||
|
@ -11280,8 +11288,10 @@ static void vmx_cpuid_update(struct kvm_vcpu *vcpu)
|
||||||
to_vmx(vcpu)->msr_ia32_feature_control_valid_bits &=
|
to_vmx(vcpu)->msr_ia32_feature_control_valid_bits &=
|
||||||
~FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX;
|
~FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX;
|
||||||
|
|
||||||
if (nested_vmx_allowed(vcpu))
|
if (nested_vmx_allowed(vcpu)) {
|
||||||
nested_vmx_cr_fixed1_bits_update(vcpu);
|
nested_vmx_cr_fixed1_bits_update(vcpu);
|
||||||
|
nested_vmx_entry_exit_ctls_update(vcpu);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void vmx_set_supported_cpuid(u32 func, struct kvm_cpuid_entry2 *entry)
|
static void vmx_set_supported_cpuid(u32 func, struct kvm_cpuid_entry2 *entry)
|
||||||
|
@ -12049,8 +12059,13 @@ static void prepare_vmcs02_full(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
|
||||||
|
|
||||||
set_cr4_guest_host_mask(vmx);
|
set_cr4_guest_host_mask(vmx);
|
||||||
|
|
||||||
if (vmx_mpx_supported())
|
if (kvm_mpx_supported()) {
|
||||||
vmcs_write64(GUEST_BNDCFGS, vmcs12->guest_bndcfgs);
|
if (vmx->nested.nested_run_pending &&
|
||||||
|
(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS))
|
||||||
|
vmcs_write64(GUEST_BNDCFGS, vmcs12->guest_bndcfgs);
|
||||||
|
else
|
||||||
|
vmcs_write64(GUEST_BNDCFGS, vmx->nested.vmcs01_guest_bndcfgs);
|
||||||
|
}
|
||||||
|
|
||||||
if (enable_vpid) {
|
if (enable_vpid) {
|
||||||
if (nested_cpu_has_vpid(vmcs12) && vmx->nested.vpid02)
|
if (nested_cpu_has_vpid(vmcs12) && vmx->nested.vpid02)
|
||||||
|
@ -12595,15 +12610,21 @@ static int enter_vmx_non_root_mode(struct kvm_vcpu *vcpu, u32 *exit_qual)
|
||||||
struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
|
struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
|
||||||
bool from_vmentry = !!exit_qual;
|
bool from_vmentry = !!exit_qual;
|
||||||
u32 dummy_exit_qual;
|
u32 dummy_exit_qual;
|
||||||
u32 vmcs01_cpu_exec_ctrl;
|
bool evaluate_pending_interrupts;
|
||||||
int r = 0;
|
int r = 0;
|
||||||
|
|
||||||
vmcs01_cpu_exec_ctrl = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
|
evaluate_pending_interrupts = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL) &
|
||||||
|
(CPU_BASED_VIRTUAL_INTR_PENDING | CPU_BASED_VIRTUAL_NMI_PENDING);
|
||||||
|
if (likely(!evaluate_pending_interrupts) && kvm_vcpu_apicv_active(vcpu))
|
||||||
|
evaluate_pending_interrupts |= vmx_has_apicv_interrupt(vcpu);
|
||||||
|
|
||||||
enter_guest_mode(vcpu);
|
enter_guest_mode(vcpu);
|
||||||
|
|
||||||
if (!(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS))
|
if (!(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS))
|
||||||
vmx->nested.vmcs01_debugctl = vmcs_read64(GUEST_IA32_DEBUGCTL);
|
vmx->nested.vmcs01_debugctl = vmcs_read64(GUEST_IA32_DEBUGCTL);
|
||||||
|
if (kvm_mpx_supported() &&
|
||||||
|
!(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS))
|
||||||
|
vmx->nested.vmcs01_guest_bndcfgs = vmcs_read64(GUEST_BNDCFGS);
|
||||||
|
|
||||||
vmx_switch_vmcs(vcpu, &vmx->nested.vmcs02);
|
vmx_switch_vmcs(vcpu, &vmx->nested.vmcs02);
|
||||||
vmx_segment_cache_clear(vmx);
|
vmx_segment_cache_clear(vmx);
|
||||||
|
@ -12643,16 +12664,14 @@ static int enter_vmx_non_root_mode(struct kvm_vcpu *vcpu, u32 *exit_qual)
|
||||||
* to L1 or delivered directly to L2 (e.g. In case L1 don't
|
* to L1 or delivered directly to L2 (e.g. In case L1 don't
|
||||||
* intercept EXTERNAL_INTERRUPT).
|
* intercept EXTERNAL_INTERRUPT).
|
||||||
*
|
*
|
||||||
* Usually this would be handled by L0 requesting a
|
* Usually this would be handled by the processor noticing an
|
||||||
* IRQ/NMI window by setting VMCS accordingly. However,
|
* IRQ/NMI window request, or checking RVI during evaluation of
|
||||||
* this setting was done on VMCS01 and now VMCS02 is active
|
* pending virtual interrupts. However, this setting was done
|
||||||
* instead. Thus, we force L0 to perform pending event
|
* on VMCS01 and now VMCS02 is active instead. Thus, we force L0
|
||||||
* evaluation by requesting a KVM_REQ_EVENT.
|
* to perform pending event evaluation by requesting a KVM_REQ_EVENT.
|
||||||
*/
|
*/
|
||||||
if (vmcs01_cpu_exec_ctrl &
|
if (unlikely(evaluate_pending_interrupts))
|
||||||
(CPU_BASED_VIRTUAL_INTR_PENDING | CPU_BASED_VIRTUAL_NMI_PENDING)) {
|
|
||||||
kvm_make_request(KVM_REQ_EVENT, vcpu);
|
kvm_make_request(KVM_REQ_EVENT, vcpu);
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Note no nested_vmx_succeed or nested_vmx_fail here. At this point
|
* Note no nested_vmx_succeed or nested_vmx_fail here. At this point
|
||||||
|
|
|
@ -4698,7 +4698,7 @@ static void kvm_init_msr_list(void)
|
||||||
*/
|
*/
|
||||||
switch (msrs_to_save[i]) {
|
switch (msrs_to_save[i]) {
|
||||||
case MSR_IA32_BNDCFGS:
|
case MSR_IA32_BNDCFGS:
|
||||||
if (!kvm_x86_ops->mpx_supported())
|
if (!kvm_mpx_supported())
|
||||||
continue;
|
continue;
|
||||||
break;
|
break;
|
||||||
case MSR_TSC_AUX:
|
case MSR_TSC_AUX:
|
||||||
|
|
|
@ -115,6 +115,8 @@ static inline void pgd_list_del(pgd_t *pgd)
|
||||||
|
|
||||||
#define UNSHARED_PTRS_PER_PGD \
|
#define UNSHARED_PTRS_PER_PGD \
|
||||||
(SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
|
(SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
|
||||||
|
#define MAX_UNSHARED_PTRS_PER_PGD \
|
||||||
|
max_t(size_t, KERNEL_PGD_BOUNDARY, PTRS_PER_PGD)
|
||||||
|
|
||||||
|
|
||||||
static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm)
|
static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm)
|
||||||
|
@ -181,6 +183,7 @@ static void pgd_dtor(pgd_t *pgd)
|
||||||
* and initialize the kernel pmds here.
|
* and initialize the kernel pmds here.
|
||||||
*/
|
*/
|
||||||
#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
|
#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
|
||||||
|
#define MAX_PREALLOCATED_PMDS MAX_UNSHARED_PTRS_PER_PGD
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We allocate separate PMDs for the kernel part of the user page-table
|
* We allocate separate PMDs for the kernel part of the user page-table
|
||||||
|
@ -189,6 +192,7 @@ static void pgd_dtor(pgd_t *pgd)
|
||||||
*/
|
*/
|
||||||
#define PREALLOCATED_USER_PMDS (static_cpu_has(X86_FEATURE_PTI) ? \
|
#define PREALLOCATED_USER_PMDS (static_cpu_has(X86_FEATURE_PTI) ? \
|
||||||
KERNEL_PGD_PTRS : 0)
|
KERNEL_PGD_PTRS : 0)
|
||||||
|
#define MAX_PREALLOCATED_USER_PMDS KERNEL_PGD_PTRS
|
||||||
|
|
||||||
void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
|
void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
|
||||||
{
|
{
|
||||||
|
@ -210,7 +214,9 @@ void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
|
||||||
|
|
||||||
/* No need to prepopulate any pagetable entries in non-PAE modes. */
|
/* No need to prepopulate any pagetable entries in non-PAE modes. */
|
||||||
#define PREALLOCATED_PMDS 0
|
#define PREALLOCATED_PMDS 0
|
||||||
|
#define MAX_PREALLOCATED_PMDS 0
|
||||||
#define PREALLOCATED_USER_PMDS 0
|
#define PREALLOCATED_USER_PMDS 0
|
||||||
|
#define MAX_PREALLOCATED_USER_PMDS 0
|
||||||
#endif /* CONFIG_X86_PAE */
|
#endif /* CONFIG_X86_PAE */
|
||||||
|
|
||||||
static void free_pmds(struct mm_struct *mm, pmd_t *pmds[], int count)
|
static void free_pmds(struct mm_struct *mm, pmd_t *pmds[], int count)
|
||||||
|
@ -428,8 +434,8 @@ static inline void _pgd_free(pgd_t *pgd)
|
||||||
pgd_t *pgd_alloc(struct mm_struct *mm)
|
pgd_t *pgd_alloc(struct mm_struct *mm)
|
||||||
{
|
{
|
||||||
pgd_t *pgd;
|
pgd_t *pgd;
|
||||||
pmd_t *u_pmds[PREALLOCATED_USER_PMDS];
|
pmd_t *u_pmds[MAX_PREALLOCATED_USER_PMDS];
|
||||||
pmd_t *pmds[PREALLOCATED_PMDS];
|
pmd_t *pmds[MAX_PREALLOCATED_PMDS];
|
||||||
|
|
||||||
pgd = _pgd_alloc();
|
pgd = _pgd_alloc();
|
||||||
|
|
||||||
|
|
|
@ -310,6 +310,7 @@ static void scale_up(struct rq_wb *rwb)
|
||||||
rq_depth_scale_up(&rwb->rq_depth);
|
rq_depth_scale_up(&rwb->rq_depth);
|
||||||
calc_wb_limits(rwb);
|
calc_wb_limits(rwb);
|
||||||
rwb->unknown_cnt = 0;
|
rwb->unknown_cnt = 0;
|
||||||
|
rwb_wake_all(rwb);
|
||||||
rwb_trace_step(rwb, "scale up");
|
rwb_trace_step(rwb, "scale up");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -318,7 +319,6 @@ static void scale_down(struct rq_wb *rwb, bool hard_throttle)
|
||||||
rq_depth_scale_down(&rwb->rq_depth, hard_throttle);
|
rq_depth_scale_down(&rwb->rq_depth, hard_throttle);
|
||||||
calc_wb_limits(rwb);
|
calc_wb_limits(rwb);
|
||||||
rwb->unknown_cnt = 0;
|
rwb->unknown_cnt = 0;
|
||||||
rwb_wake_all(rwb);
|
|
||||||
rwb_trace_step(rwb, "scale down");
|
rwb_trace_step(rwb, "scale down");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -226,8 +226,11 @@ static int alloc_lookup_fw_priv(const char *fw_name,
|
||||||
}
|
}
|
||||||
|
|
||||||
tmp = __allocate_fw_priv(fw_name, fwc, dbuf, size);
|
tmp = __allocate_fw_priv(fw_name, fwc, dbuf, size);
|
||||||
if (tmp && !(opt_flags & FW_OPT_NOCACHE))
|
if (tmp) {
|
||||||
list_add(&tmp->list, &fwc->head);
|
INIT_LIST_HEAD(&tmp->list);
|
||||||
|
if (!(opt_flags & FW_OPT_NOCACHE))
|
||||||
|
list_add(&tmp->list, &fwc->head);
|
||||||
|
}
|
||||||
spin_unlock(&fwc->lock);
|
spin_unlock(&fwc->lock);
|
||||||
|
|
||||||
*fw_priv = tmp;
|
*fw_priv = tmp;
|
||||||
|
|
|
@ -1713,8 +1713,10 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
|
||||||
|
|
||||||
dpm_wait_for_subordinate(dev, async);
|
dpm_wait_for_subordinate(dev, async);
|
||||||
|
|
||||||
if (async_error)
|
if (async_error) {
|
||||||
|
dev->power.direct_complete = false;
|
||||||
goto Complete;
|
goto Complete;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If a device configured to wake up the system from sleep states
|
* If a device configured to wake up the system from sleep states
|
||||||
|
@ -1726,6 +1728,7 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
|
||||||
pm_wakeup_event(dev, 0);
|
pm_wakeup_event(dev, 0);
|
||||||
|
|
||||||
if (pm_wakeup_pending()) {
|
if (pm_wakeup_pending()) {
|
||||||
|
dev->power.direct_complete = false;
|
||||||
async_error = -EBUSY;
|
async_error = -EBUSY;
|
||||||
goto Complete;
|
goto Complete;
|
||||||
}
|
}
|
||||||
|
|
|
@ -36,6 +36,10 @@ MODULE_VERSION(DRV_MODULE_VERSION);
|
||||||
#define VDC_TX_RING_SIZE 512
|
#define VDC_TX_RING_SIZE 512
|
||||||
#define VDC_DEFAULT_BLK_SIZE 512
|
#define VDC_DEFAULT_BLK_SIZE 512
|
||||||
|
|
||||||
|
#define MAX_XFER_BLKS (128 * 1024)
|
||||||
|
#define MAX_XFER_SIZE (MAX_XFER_BLKS / VDC_DEFAULT_BLK_SIZE)
|
||||||
|
#define MAX_RING_COOKIES ((MAX_XFER_BLKS / PAGE_SIZE) + 2)
|
||||||
|
|
||||||
#define WAITING_FOR_LINK_UP 0x01
|
#define WAITING_FOR_LINK_UP 0x01
|
||||||
#define WAITING_FOR_TX_SPACE 0x02
|
#define WAITING_FOR_TX_SPACE 0x02
|
||||||
#define WAITING_FOR_GEN_CMD 0x04
|
#define WAITING_FOR_GEN_CMD 0x04
|
||||||
|
@ -450,7 +454,7 @@ static int __send_request(struct request *req)
|
||||||
{
|
{
|
||||||
struct vdc_port *port = req->rq_disk->private_data;
|
struct vdc_port *port = req->rq_disk->private_data;
|
||||||
struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
|
struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
|
||||||
struct scatterlist sg[port->ring_cookies];
|
struct scatterlist sg[MAX_RING_COOKIES];
|
||||||
struct vdc_req_entry *rqe;
|
struct vdc_req_entry *rqe;
|
||||||
struct vio_disk_desc *desc;
|
struct vio_disk_desc *desc;
|
||||||
unsigned int map_perm;
|
unsigned int map_perm;
|
||||||
|
@ -458,6 +462,9 @@ static int __send_request(struct request *req)
|
||||||
u64 len;
|
u64 len;
|
||||||
u8 op;
|
u8 op;
|
||||||
|
|
||||||
|
if (WARN_ON(port->ring_cookies > MAX_RING_COOKIES))
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
map_perm = LDC_MAP_SHADOW | LDC_MAP_DIRECT | LDC_MAP_IO;
|
map_perm = LDC_MAP_SHADOW | LDC_MAP_DIRECT | LDC_MAP_IO;
|
||||||
|
|
||||||
if (rq_data_dir(req) == READ) {
|
if (rq_data_dir(req) == READ) {
|
||||||
|
@ -984,9 +991,8 @@ static int vdc_port_probe(struct vio_dev *vdev, const struct vio_device_id *id)
|
||||||
goto err_out_free_port;
|
goto err_out_free_port;
|
||||||
|
|
||||||
port->vdisk_block_size = VDC_DEFAULT_BLK_SIZE;
|
port->vdisk_block_size = VDC_DEFAULT_BLK_SIZE;
|
||||||
port->max_xfer_size = ((128 * 1024) / port->vdisk_block_size);
|
port->max_xfer_size = MAX_XFER_SIZE;
|
||||||
port->ring_cookies = ((port->max_xfer_size *
|
port->ring_cookies = MAX_RING_COOKIES;
|
||||||
port->vdisk_block_size) / PAGE_SIZE) + 2;
|
|
||||||
|
|
||||||
err = vio_ldc_alloc(&port->vio, &vdc_ldc_cfg, port);
|
err = vio_ldc_alloc(&port->vio, &vdc_ldc_cfg, port);
|
||||||
if (err)
|
if (err)
|
||||||
|
|
|
@ -1322,7 +1322,7 @@ static int qca_init_regulators(struct qca_power *qca,
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
qca->vreg_bulk = devm_kzalloc(qca->dev, num_vregs *
|
qca->vreg_bulk = devm_kcalloc(qca->dev, num_vregs,
|
||||||
sizeof(struct regulator_bulk_data),
|
sizeof(struct regulator_bulk_data),
|
||||||
GFP_KERNEL);
|
GFP_KERNEL);
|
||||||
if (!qca->vreg_bulk)
|
if (!qca->vreg_bulk)
|
||||||
|
|
|
@ -1553,8 +1553,8 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
|
||||||
edesc->src_nents = src_nents;
|
edesc->src_nents = src_nents;
|
||||||
edesc->dst_nents = dst_nents;
|
edesc->dst_nents = dst_nents;
|
||||||
edesc->sec4_sg_bytes = sec4_sg_bytes;
|
edesc->sec4_sg_bytes = sec4_sg_bytes;
|
||||||
edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) +
|
edesc->sec4_sg = (struct sec4_sg_entry *)((u8 *)edesc->hw_desc +
|
||||||
desc_bytes;
|
desc_bytes);
|
||||||
edesc->iv_dir = DMA_TO_DEVICE;
|
edesc->iv_dir = DMA_TO_DEVICE;
|
||||||
|
|
||||||
/* Make sure IV is located in a DMAable area */
|
/* Make sure IV is located in a DMAable area */
|
||||||
|
@ -1757,8 +1757,8 @@ static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
|
||||||
edesc->src_nents = src_nents;
|
edesc->src_nents = src_nents;
|
||||||
edesc->dst_nents = dst_nents;
|
edesc->dst_nents = dst_nents;
|
||||||
edesc->sec4_sg_bytes = sec4_sg_bytes;
|
edesc->sec4_sg_bytes = sec4_sg_bytes;
|
||||||
edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) +
|
edesc->sec4_sg = (struct sec4_sg_entry *)((u8 *)edesc->hw_desc +
|
||||||
desc_bytes;
|
desc_bytes);
|
||||||
edesc->iv_dir = DMA_FROM_DEVICE;
|
edesc->iv_dir = DMA_FROM_DEVICE;
|
||||||
|
|
||||||
/* Make sure IV is located in a DMAable area */
|
/* Make sure IV is located in a DMAable area */
|
||||||
|
|
|
@ -367,7 +367,8 @@ static inline void dsgl_walk_init(struct dsgl_walk *walk,
|
||||||
walk->to = (struct phys_sge_pairs *)(dsgl + 1);
|
walk->to = (struct phys_sge_pairs *)(dsgl + 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void dsgl_walk_end(struct dsgl_walk *walk, unsigned short qid)
|
static inline void dsgl_walk_end(struct dsgl_walk *walk, unsigned short qid,
|
||||||
|
int pci_chan_id)
|
||||||
{
|
{
|
||||||
struct cpl_rx_phys_dsgl *phys_cpl;
|
struct cpl_rx_phys_dsgl *phys_cpl;
|
||||||
|
|
||||||
|
@ -385,6 +386,7 @@ static inline void dsgl_walk_end(struct dsgl_walk *walk, unsigned short qid)
|
||||||
phys_cpl->rss_hdr_int.opcode = CPL_RX_PHYS_ADDR;
|
phys_cpl->rss_hdr_int.opcode = CPL_RX_PHYS_ADDR;
|
||||||
phys_cpl->rss_hdr_int.qid = htons(qid);
|
phys_cpl->rss_hdr_int.qid = htons(qid);
|
||||||
phys_cpl->rss_hdr_int.hash_val = 0;
|
phys_cpl->rss_hdr_int.hash_val = 0;
|
||||||
|
phys_cpl->rss_hdr_int.channel = pci_chan_id;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void dsgl_walk_add_page(struct dsgl_walk *walk,
|
static inline void dsgl_walk_add_page(struct dsgl_walk *walk,
|
||||||
|
@ -718,7 +720,7 @@ static inline void create_wreq(struct chcr_context *ctx,
|
||||||
FILL_WR_RX_Q_ID(ctx->dev->rx_channel_id, qid,
|
FILL_WR_RX_Q_ID(ctx->dev->rx_channel_id, qid,
|
||||||
!!lcb, ctx->tx_qidx);
|
!!lcb, ctx->tx_qidx);
|
||||||
|
|
||||||
chcr_req->ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(ctx->dev->tx_channel_id,
|
chcr_req->ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(ctx->tx_chan_id,
|
||||||
qid);
|
qid);
|
||||||
chcr_req->ulptx.len = htonl((DIV_ROUND_UP(len16, 16) -
|
chcr_req->ulptx.len = htonl((DIV_ROUND_UP(len16, 16) -
|
||||||
((sizeof(chcr_req->wreq)) >> 4)));
|
((sizeof(chcr_req->wreq)) >> 4)));
|
||||||
|
@ -1339,16 +1341,23 @@ static int chcr_device_init(struct chcr_context *ctx)
|
||||||
adap->vres.ncrypto_fc);
|
adap->vres.ncrypto_fc);
|
||||||
rxq_perchan = u_ctx->lldi.nrxq / u_ctx->lldi.nchan;
|
rxq_perchan = u_ctx->lldi.nrxq / u_ctx->lldi.nchan;
|
||||||
txq_perchan = ntxq / u_ctx->lldi.nchan;
|
txq_perchan = ntxq / u_ctx->lldi.nchan;
|
||||||
rxq_idx = ctx->dev->tx_channel_id * rxq_perchan;
|
|
||||||
rxq_idx += id % rxq_perchan;
|
|
||||||
txq_idx = ctx->dev->tx_channel_id * txq_perchan;
|
|
||||||
txq_idx += id % txq_perchan;
|
|
||||||
spin_lock(&ctx->dev->lock_chcr_dev);
|
spin_lock(&ctx->dev->lock_chcr_dev);
|
||||||
ctx->rx_qidx = rxq_idx;
|
ctx->tx_chan_id = ctx->dev->tx_channel_id;
|
||||||
ctx->tx_qidx = txq_idx;
|
|
||||||
ctx->dev->tx_channel_id = !ctx->dev->tx_channel_id;
|
ctx->dev->tx_channel_id = !ctx->dev->tx_channel_id;
|
||||||
ctx->dev->rx_channel_id = 0;
|
ctx->dev->rx_channel_id = 0;
|
||||||
spin_unlock(&ctx->dev->lock_chcr_dev);
|
spin_unlock(&ctx->dev->lock_chcr_dev);
|
||||||
|
rxq_idx = ctx->tx_chan_id * rxq_perchan;
|
||||||
|
rxq_idx += id % rxq_perchan;
|
||||||
|
txq_idx = ctx->tx_chan_id * txq_perchan;
|
||||||
|
txq_idx += id % txq_perchan;
|
||||||
|
ctx->rx_qidx = rxq_idx;
|
||||||
|
ctx->tx_qidx = txq_idx;
|
||||||
|
/* Channel Id used by SGE to forward packet to Host.
|
||||||
|
* Same value should be used in cpl_fw6_pld RSS_CH field
|
||||||
|
* by FW. Driver programs PCI channel ID to be used in fw
|
||||||
|
* at the time of queue allocation with value "pi->tx_chan"
|
||||||
|
*/
|
||||||
|
ctx->pci_chan_id = txq_idx / txq_perchan;
|
||||||
}
|
}
|
||||||
out:
|
out:
|
||||||
return err;
|
return err;
|
||||||
|
@ -2503,6 +2512,7 @@ void chcr_add_aead_dst_ent(struct aead_request *req,
|
||||||
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
|
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
|
||||||
struct dsgl_walk dsgl_walk;
|
struct dsgl_walk dsgl_walk;
|
||||||
unsigned int authsize = crypto_aead_authsize(tfm);
|
unsigned int authsize = crypto_aead_authsize(tfm);
|
||||||
|
struct chcr_context *ctx = a_ctx(tfm);
|
||||||
u32 temp;
|
u32 temp;
|
||||||
|
|
||||||
dsgl_walk_init(&dsgl_walk, phys_cpl);
|
dsgl_walk_init(&dsgl_walk, phys_cpl);
|
||||||
|
@ -2512,7 +2522,7 @@ void chcr_add_aead_dst_ent(struct aead_request *req,
|
||||||
dsgl_walk_add_page(&dsgl_walk, IV, &reqctx->iv_dma);
|
dsgl_walk_add_page(&dsgl_walk, IV, &reqctx->iv_dma);
|
||||||
temp = req->cryptlen + (reqctx->op ? -authsize : authsize);
|
temp = req->cryptlen + (reqctx->op ? -authsize : authsize);
|
||||||
dsgl_walk_add_sg(&dsgl_walk, req->dst, temp, req->assoclen);
|
dsgl_walk_add_sg(&dsgl_walk, req->dst, temp, req->assoclen);
|
||||||
dsgl_walk_end(&dsgl_walk, qid);
|
dsgl_walk_end(&dsgl_walk, qid, ctx->pci_chan_id);
|
||||||
}
|
}
|
||||||
|
|
||||||
void chcr_add_cipher_src_ent(struct ablkcipher_request *req,
|
void chcr_add_cipher_src_ent(struct ablkcipher_request *req,
|
||||||
|
@ -2544,6 +2554,8 @@ void chcr_add_cipher_dst_ent(struct ablkcipher_request *req,
|
||||||
unsigned short qid)
|
unsigned short qid)
|
||||||
{
|
{
|
||||||
struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
|
struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
|
||||||
|
struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(wrparam->req);
|
||||||
|
struct chcr_context *ctx = c_ctx(tfm);
|
||||||
struct dsgl_walk dsgl_walk;
|
struct dsgl_walk dsgl_walk;
|
||||||
|
|
||||||
dsgl_walk_init(&dsgl_walk, phys_cpl);
|
dsgl_walk_init(&dsgl_walk, phys_cpl);
|
||||||
|
@ -2552,7 +2564,7 @@ void chcr_add_cipher_dst_ent(struct ablkcipher_request *req,
|
||||||
reqctx->dstsg = dsgl_walk.last_sg;
|
reqctx->dstsg = dsgl_walk.last_sg;
|
||||||
reqctx->dst_ofst = dsgl_walk.last_sg_len;
|
reqctx->dst_ofst = dsgl_walk.last_sg_len;
|
||||||
|
|
||||||
dsgl_walk_end(&dsgl_walk, qid);
|
dsgl_walk_end(&dsgl_walk, qid, ctx->pci_chan_id);
|
||||||
}
|
}
|
||||||
|
|
||||||
void chcr_add_hash_src_ent(struct ahash_request *req,
|
void chcr_add_hash_src_ent(struct ahash_request *req,
|
||||||
|
|
|
@ -255,6 +255,8 @@ struct chcr_context {
|
||||||
struct chcr_dev *dev;
|
struct chcr_dev *dev;
|
||||||
unsigned char tx_qidx;
|
unsigned char tx_qidx;
|
||||||
unsigned char rx_qidx;
|
unsigned char rx_qidx;
|
||||||
|
unsigned char tx_chan_id;
|
||||||
|
unsigned char pci_chan_id;
|
||||||
struct __crypto_ctx crypto_ctx[0];
|
struct __crypto_ctx crypto_ctx[0];
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -1044,7 +1044,8 @@ static int safexcel_probe(struct platform_device *pdev)
|
||||||
|
|
||||||
safexcel_configure(priv);
|
safexcel_configure(priv);
|
||||||
|
|
||||||
priv->ring = devm_kzalloc(dev, priv->config.rings * sizeof(*priv->ring),
|
priv->ring = devm_kcalloc(dev, priv->config.rings,
|
||||||
|
sizeof(*priv->ring),
|
||||||
GFP_KERNEL);
|
GFP_KERNEL);
|
||||||
if (!priv->ring) {
|
if (!priv->ring) {
|
||||||
ret = -ENOMEM;
|
ret = -ENOMEM;
|
||||||
|
@ -1063,8 +1064,9 @@ static int safexcel_probe(struct platform_device *pdev)
|
||||||
if (ret)
|
if (ret)
|
||||||
goto err_reg_clk;
|
goto err_reg_clk;
|
||||||
|
|
||||||
priv->ring[i].rdr_req = devm_kzalloc(dev,
|
priv->ring[i].rdr_req = devm_kcalloc(dev,
|
||||||
sizeof(priv->ring[i].rdr_req) * EIP197_DEFAULT_RING_SIZE,
|
EIP197_DEFAULT_RING_SIZE,
|
||||||
|
sizeof(priv->ring[i].rdr_req),
|
||||||
GFP_KERNEL);
|
GFP_KERNEL);
|
||||||
if (!priv->ring[i].rdr_req) {
|
if (!priv->ring[i].rdr_req) {
|
||||||
ret = -ENOMEM;
|
ret = -ENOMEM;
|
||||||
|
|
|
@ -63,7 +63,7 @@ struct dcp {
|
||||||
struct dcp_coherent_block *coh;
|
struct dcp_coherent_block *coh;
|
||||||
|
|
||||||
struct completion completion[DCP_MAX_CHANS];
|
struct completion completion[DCP_MAX_CHANS];
|
||||||
struct mutex mutex[DCP_MAX_CHANS];
|
spinlock_t lock[DCP_MAX_CHANS];
|
||||||
struct task_struct *thread[DCP_MAX_CHANS];
|
struct task_struct *thread[DCP_MAX_CHANS];
|
||||||
struct crypto_queue queue[DCP_MAX_CHANS];
|
struct crypto_queue queue[DCP_MAX_CHANS];
|
||||||
};
|
};
|
||||||
|
@ -349,13 +349,20 @@ static int dcp_chan_thread_aes(void *data)
|
||||||
|
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
do {
|
while (!kthread_should_stop()) {
|
||||||
__set_current_state(TASK_INTERRUPTIBLE);
|
set_current_state(TASK_INTERRUPTIBLE);
|
||||||
|
|
||||||
mutex_lock(&sdcp->mutex[chan]);
|
spin_lock(&sdcp->lock[chan]);
|
||||||
backlog = crypto_get_backlog(&sdcp->queue[chan]);
|
backlog = crypto_get_backlog(&sdcp->queue[chan]);
|
||||||
arq = crypto_dequeue_request(&sdcp->queue[chan]);
|
arq = crypto_dequeue_request(&sdcp->queue[chan]);
|
||||||
mutex_unlock(&sdcp->mutex[chan]);
|
spin_unlock(&sdcp->lock[chan]);
|
||||||
|
|
||||||
|
if (!backlog && !arq) {
|
||||||
|
schedule();
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
set_current_state(TASK_RUNNING);
|
||||||
|
|
||||||
if (backlog)
|
if (backlog)
|
||||||
backlog->complete(backlog, -EINPROGRESS);
|
backlog->complete(backlog, -EINPROGRESS);
|
||||||
|
@ -363,11 +370,8 @@ static int dcp_chan_thread_aes(void *data)
|
||||||
if (arq) {
|
if (arq) {
|
||||||
ret = mxs_dcp_aes_block_crypt(arq);
|
ret = mxs_dcp_aes_block_crypt(arq);
|
||||||
arq->complete(arq, ret);
|
arq->complete(arq, ret);
|
||||||
continue;
|
|
||||||
}
|
}
|
||||||
|
}
|
||||||
schedule();
|
|
||||||
} while (!kthread_should_stop());
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -409,9 +413,9 @@ static int mxs_dcp_aes_enqueue(struct ablkcipher_request *req, int enc, int ecb)
|
||||||
rctx->ecb = ecb;
|
rctx->ecb = ecb;
|
||||||
actx->chan = DCP_CHAN_CRYPTO;
|
actx->chan = DCP_CHAN_CRYPTO;
|
||||||
|
|
||||||
mutex_lock(&sdcp->mutex[actx->chan]);
|
spin_lock(&sdcp->lock[actx->chan]);
|
||||||
ret = crypto_enqueue_request(&sdcp->queue[actx->chan], &req->base);
|
ret = crypto_enqueue_request(&sdcp->queue[actx->chan], &req->base);
|
||||||
mutex_unlock(&sdcp->mutex[actx->chan]);
|
spin_unlock(&sdcp->lock[actx->chan]);
|
||||||
|
|
||||||
wake_up_process(sdcp->thread[actx->chan]);
|
wake_up_process(sdcp->thread[actx->chan]);
|
||||||
|
|
||||||
|
@ -640,13 +644,20 @@ static int dcp_chan_thread_sha(void *data)
|
||||||
struct ahash_request *req;
|
struct ahash_request *req;
|
||||||
int ret, fini;
|
int ret, fini;
|
||||||
|
|
||||||
do {
|
while (!kthread_should_stop()) {
|
||||||
__set_current_state(TASK_INTERRUPTIBLE);
|
set_current_state(TASK_INTERRUPTIBLE);
|
||||||
|
|
||||||
mutex_lock(&sdcp->mutex[chan]);
|
spin_lock(&sdcp->lock[chan]);
|
||||||
backlog = crypto_get_backlog(&sdcp->queue[chan]);
|
backlog = crypto_get_backlog(&sdcp->queue[chan]);
|
||||||
arq = crypto_dequeue_request(&sdcp->queue[chan]);
|
arq = crypto_dequeue_request(&sdcp->queue[chan]);
|
||||||
mutex_unlock(&sdcp->mutex[chan]);
|
spin_unlock(&sdcp->lock[chan]);
|
||||||
|
|
||||||
|
if (!backlog && !arq) {
|
||||||
|
schedule();
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
set_current_state(TASK_RUNNING);
|
||||||
|
|
||||||
if (backlog)
|
if (backlog)
|
||||||
backlog->complete(backlog, -EINPROGRESS);
|
backlog->complete(backlog, -EINPROGRESS);
|
||||||
|
@ -658,12 +669,8 @@ static int dcp_chan_thread_sha(void *data)
|
||||||
ret = dcp_sha_req_to_buf(arq);
|
ret = dcp_sha_req_to_buf(arq);
|
||||||
fini = rctx->fini;
|
fini = rctx->fini;
|
||||||
arq->complete(arq, ret);
|
arq->complete(arq, ret);
|
||||||
if (!fini)
|
|
||||||
continue;
|
|
||||||
}
|
}
|
||||||
|
}
|
||||||
schedule();
|
|
||||||
} while (!kthread_should_stop());
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -721,9 +728,9 @@ static int dcp_sha_update_fx(struct ahash_request *req, int fini)
|
||||||
rctx->init = 1;
|
rctx->init = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
mutex_lock(&sdcp->mutex[actx->chan]);
|
spin_lock(&sdcp->lock[actx->chan]);
|
||||||
ret = crypto_enqueue_request(&sdcp->queue[actx->chan], &req->base);
|
ret = crypto_enqueue_request(&sdcp->queue[actx->chan], &req->base);
|
||||||
mutex_unlock(&sdcp->mutex[actx->chan]);
|
spin_unlock(&sdcp->lock[actx->chan]);
|
||||||
|
|
||||||
wake_up_process(sdcp->thread[actx->chan]);
|
wake_up_process(sdcp->thread[actx->chan]);
|
||||||
mutex_unlock(&actx->mutex);
|
mutex_unlock(&actx->mutex);
|
||||||
|
@ -997,7 +1004,7 @@ static int mxs_dcp_probe(struct platform_device *pdev)
|
||||||
platform_set_drvdata(pdev, sdcp);
|
platform_set_drvdata(pdev, sdcp);
|
||||||
|
|
||||||
for (i = 0; i < DCP_MAX_CHANS; i++) {
|
for (i = 0; i < DCP_MAX_CHANS; i++) {
|
||||||
mutex_init(&sdcp->mutex[i]);
|
spin_lock_init(&sdcp->lock[i]);
|
||||||
init_completion(&sdcp->completion[i]);
|
init_completion(&sdcp->completion[i]);
|
||||||
crypto_init_queue(&sdcp->queue[i], 50);
|
crypto_init_queue(&sdcp->queue[i], 50);
|
||||||
}
|
}
|
||||||
|
|
|
@ -123,7 +123,8 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||||
struct adf_hw_device_data *hw_data;
|
struct adf_hw_device_data *hw_data;
|
||||||
char name[ADF_DEVICE_NAME_LENGTH];
|
char name[ADF_DEVICE_NAME_LENGTH];
|
||||||
unsigned int i, bar_nr;
|
unsigned int i, bar_nr;
|
||||||
int ret, bar_mask;
|
unsigned long bar_mask;
|
||||||
|
int ret;
|
||||||
|
|
||||||
switch (ent->device) {
|
switch (ent->device) {
|
||||||
case ADF_C3XXX_PCI_DEVICE_ID:
|
case ADF_C3XXX_PCI_DEVICE_ID:
|
||||||
|
@ -235,8 +236,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||||
/* Find and map all the device's BARS */
|
/* Find and map all the device's BARS */
|
||||||
i = 0;
|
i = 0;
|
||||||
bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
|
bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
|
||||||
for_each_set_bit(bar_nr, (const unsigned long *)&bar_mask,
|
for_each_set_bit(bar_nr, &bar_mask, ADF_PCI_MAX_BARS * 2) {
|
||||||
ADF_PCI_MAX_BARS * 2) {
|
|
||||||
struct adf_bar *bar = &accel_pci_dev->pci_bars[i++];
|
struct adf_bar *bar = &accel_pci_dev->pci_bars[i++];
|
||||||
|
|
||||||
bar->base_addr = pci_resource_start(pdev, bar_nr);
|
bar->base_addr = pci_resource_start(pdev, bar_nr);
|
||||||
|
|
|
@ -125,7 +125,8 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||||
struct adf_hw_device_data *hw_data;
|
struct adf_hw_device_data *hw_data;
|
||||||
char name[ADF_DEVICE_NAME_LENGTH];
|
char name[ADF_DEVICE_NAME_LENGTH];
|
||||||
unsigned int i, bar_nr;
|
unsigned int i, bar_nr;
|
||||||
int ret, bar_mask;
|
unsigned long bar_mask;
|
||||||
|
int ret;
|
||||||
|
|
||||||
switch (ent->device) {
|
switch (ent->device) {
|
||||||
case ADF_C3XXXIOV_PCI_DEVICE_ID:
|
case ADF_C3XXXIOV_PCI_DEVICE_ID:
|
||||||
|
@ -215,8 +216,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||||
/* Find and map all the device's BARS */
|
/* Find and map all the device's BARS */
|
||||||
i = 0;
|
i = 0;
|
||||||
bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
|
bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
|
||||||
for_each_set_bit(bar_nr, (const unsigned long *)&bar_mask,
|
for_each_set_bit(bar_nr, &bar_mask, ADF_PCI_MAX_BARS * 2) {
|
||||||
ADF_PCI_MAX_BARS * 2) {
|
|
||||||
struct adf_bar *bar = &accel_pci_dev->pci_bars[i++];
|
struct adf_bar *bar = &accel_pci_dev->pci_bars[i++];
|
||||||
|
|
||||||
bar->base_addr = pci_resource_start(pdev, bar_nr);
|
bar->base_addr = pci_resource_start(pdev, bar_nr);
|
||||||
|
|
|
@ -123,7 +123,8 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||||
struct adf_hw_device_data *hw_data;
|
struct adf_hw_device_data *hw_data;
|
||||||
char name[ADF_DEVICE_NAME_LENGTH];
|
char name[ADF_DEVICE_NAME_LENGTH];
|
||||||
unsigned int i, bar_nr;
|
unsigned int i, bar_nr;
|
||||||
int ret, bar_mask;
|
unsigned long bar_mask;
|
||||||
|
int ret;
|
||||||
|
|
||||||
switch (ent->device) {
|
switch (ent->device) {
|
||||||
case ADF_C62X_PCI_DEVICE_ID:
|
case ADF_C62X_PCI_DEVICE_ID:
|
||||||
|
@ -235,8 +236,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||||
/* Find and map all the device's BARS */
|
/* Find and map all the device's BARS */
|
||||||
i = (hw_data->fuses & ADF_DEVICE_FUSECTL_MASK) ? 1 : 0;
|
i = (hw_data->fuses & ADF_DEVICE_FUSECTL_MASK) ? 1 : 0;
|
||||||
bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
|
bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
|
||||||
for_each_set_bit(bar_nr, (const unsigned long *)&bar_mask,
|
for_each_set_bit(bar_nr, &bar_mask, ADF_PCI_MAX_BARS * 2) {
|
||||||
ADF_PCI_MAX_BARS * 2) {
|
|
||||||
struct adf_bar *bar = &accel_pci_dev->pci_bars[i++];
|
struct adf_bar *bar = &accel_pci_dev->pci_bars[i++];
|
||||||
|
|
||||||
bar->base_addr = pci_resource_start(pdev, bar_nr);
|
bar->base_addr = pci_resource_start(pdev, bar_nr);
|
||||||
|
|
|
@ -125,7 +125,8 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||||
struct adf_hw_device_data *hw_data;
|
struct adf_hw_device_data *hw_data;
|
||||||
char name[ADF_DEVICE_NAME_LENGTH];
|
char name[ADF_DEVICE_NAME_LENGTH];
|
||||||
unsigned int i, bar_nr;
|
unsigned int i, bar_nr;
|
||||||
int ret, bar_mask;
|
unsigned long bar_mask;
|
||||||
|
int ret;
|
||||||
|
|
||||||
switch (ent->device) {
|
switch (ent->device) {
|
||||||
case ADF_C62XIOV_PCI_DEVICE_ID:
|
case ADF_C62XIOV_PCI_DEVICE_ID:
|
||||||
|
@ -215,8 +216,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||||
/* Find and map all the device's BARS */
|
/* Find and map all the device's BARS */
|
||||||
i = 0;
|
i = 0;
|
||||||
bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
|
bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
|
||||||
for_each_set_bit(bar_nr, (const unsigned long *)&bar_mask,
|
for_each_set_bit(bar_nr, &bar_mask, ADF_PCI_MAX_BARS * 2) {
|
||||||
ADF_PCI_MAX_BARS * 2) {
|
|
||||||
struct adf_bar *bar = &accel_pci_dev->pci_bars[i++];
|
struct adf_bar *bar = &accel_pci_dev->pci_bars[i++];
|
||||||
|
|
||||||
bar->base_addr = pci_resource_start(pdev, bar_nr);
|
bar->base_addr = pci_resource_start(pdev, bar_nr);
|
||||||
|
|
|
@ -123,7 +123,8 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||||
struct adf_hw_device_data *hw_data;
|
struct adf_hw_device_data *hw_data;
|
||||||
char name[ADF_DEVICE_NAME_LENGTH];
|
char name[ADF_DEVICE_NAME_LENGTH];
|
||||||
unsigned int i, bar_nr;
|
unsigned int i, bar_nr;
|
||||||
int ret, bar_mask;
|
unsigned long bar_mask;
|
||||||
|
int ret;
|
||||||
|
|
||||||
switch (ent->device) {
|
switch (ent->device) {
|
||||||
case ADF_DH895XCC_PCI_DEVICE_ID:
|
case ADF_DH895XCC_PCI_DEVICE_ID:
|
||||||
|
@ -237,8 +238,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||||
/* Find and map all the device's BARS */
|
/* Find and map all the device's BARS */
|
||||||
i = 0;
|
i = 0;
|
||||||
bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
|
bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
|
||||||
for_each_set_bit(bar_nr, (const unsigned long *)&bar_mask,
|
for_each_set_bit(bar_nr, &bar_mask, ADF_PCI_MAX_BARS * 2) {
|
||||||
ADF_PCI_MAX_BARS * 2) {
|
|
||||||
struct adf_bar *bar = &accel_pci_dev->pci_bars[i++];
|
struct adf_bar *bar = &accel_pci_dev->pci_bars[i++];
|
||||||
|
|
||||||
bar->base_addr = pci_resource_start(pdev, bar_nr);
|
bar->base_addr = pci_resource_start(pdev, bar_nr);
|
||||||
|
|
|
@ -125,7 +125,8 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||||
struct adf_hw_device_data *hw_data;
|
struct adf_hw_device_data *hw_data;
|
||||||
char name[ADF_DEVICE_NAME_LENGTH];
|
char name[ADF_DEVICE_NAME_LENGTH];
|
||||||
unsigned int i, bar_nr;
|
unsigned int i, bar_nr;
|
||||||
int ret, bar_mask;
|
unsigned long bar_mask;
|
||||||
|
int ret;
|
||||||
|
|
||||||
switch (ent->device) {
|
switch (ent->device) {
|
||||||
case ADF_DH895XCCIOV_PCI_DEVICE_ID:
|
case ADF_DH895XCCIOV_PCI_DEVICE_ID:
|
||||||
|
@ -215,8 +216,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||||
/* Find and map all the device's BARS */
|
/* Find and map all the device's BARS */
|
||||||
i = 0;
|
i = 0;
|
||||||
bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
|
bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
|
||||||
for_each_set_bit(bar_nr, (const unsigned long *)&bar_mask,
|
for_each_set_bit(bar_nr, &bar_mask, ADF_PCI_MAX_BARS * 2) {
|
||||||
ADF_PCI_MAX_BARS * 2) {
|
|
||||||
struct adf_bar *bar = &accel_pci_dev->pci_bars[i++];
|
struct adf_bar *bar = &accel_pci_dev->pci_bars[i++];
|
||||||
|
|
||||||
bar->base_addr = pci_resource_start(pdev, bar_nr);
|
bar->base_addr = pci_resource_start(pdev, bar_nr);
|
||||||
|
|
|
@ -14,6 +14,7 @@
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#include <linux/module.h>
|
#include <linux/module.h>
|
||||||
|
#include <linux/fpga/fpga-mgr.h>
|
||||||
#include <linux/fpga/fpga-region.h>
|
#include <linux/fpga/fpga-region.h>
|
||||||
|
|
||||||
#include "dfl-fme-pr.h"
|
#include "dfl-fme-pr.h"
|
||||||
|
@ -66,9 +67,10 @@ eprobe_mgr_put:
|
||||||
static int fme_region_remove(struct platform_device *pdev)
|
static int fme_region_remove(struct platform_device *pdev)
|
||||||
{
|
{
|
||||||
struct fpga_region *region = dev_get_drvdata(&pdev->dev);
|
struct fpga_region *region = dev_get_drvdata(&pdev->dev);
|
||||||
|
struct fpga_manager *mgr = region->mgr;
|
||||||
|
|
||||||
fpga_region_unregister(region);
|
fpga_region_unregister(region);
|
||||||
fpga_mgr_put(region->mgr);
|
fpga_mgr_put(mgr);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
|
@ -125,7 +125,7 @@ static int fpga_bridge_dev_match(struct device *dev, const void *data)
|
||||||
*
|
*
|
||||||
* Given a device, get an exclusive reference to a fpga bridge.
|
* Given a device, get an exclusive reference to a fpga bridge.
|
||||||
*
|
*
|
||||||
* Return: fpga manager struct or IS_ERR() condition containing error code.
|
* Return: fpga bridge struct or IS_ERR() condition containing error code.
|
||||||
*/
|
*/
|
||||||
struct fpga_bridge *fpga_bridge_get(struct device *dev,
|
struct fpga_bridge *fpga_bridge_get(struct device *dev,
|
||||||
struct fpga_image_info *info)
|
struct fpga_image_info *info)
|
||||||
|
|
|
@ -437,9 +437,10 @@ eprobe_mgr_put:
|
||||||
static int of_fpga_region_remove(struct platform_device *pdev)
|
static int of_fpga_region_remove(struct platform_device *pdev)
|
||||||
{
|
{
|
||||||
struct fpga_region *region = platform_get_drvdata(pdev);
|
struct fpga_region *region = platform_get_drvdata(pdev);
|
||||||
|
struct fpga_manager *mgr = region->mgr;
|
||||||
|
|
||||||
fpga_region_unregister(region);
|
fpga_region_unregister(region);
|
||||||
fpga_mgr_put(region->mgr);
|
fpga_mgr_put(mgr);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
|
@ -571,7 +571,7 @@ static int linehandle_create(struct gpio_device *gdev, void __user *ip)
|
||||||
if (ret)
|
if (ret)
|
||||||
goto out_free_descs;
|
goto out_free_descs;
|
||||||
lh->descs[i] = desc;
|
lh->descs[i] = desc;
|
||||||
count = i;
|
count = i + 1;
|
||||||
|
|
||||||
if (lflags & GPIOHANDLE_REQUEST_ACTIVE_LOW)
|
if (lflags & GPIOHANDLE_REQUEST_ACTIVE_LOW)
|
||||||
set_bit(FLAG_ACTIVE_LOW, &desc->flags);
|
set_bit(FLAG_ACTIVE_LOW, &desc->flags);
|
||||||
|
@ -1682,7 +1682,8 @@ static void gpiochip_set_cascaded_irqchip(struct gpio_chip *gpiochip,
|
||||||
irq_set_chained_handler_and_data(parent_irq, parent_handler,
|
irq_set_chained_handler_and_data(parent_irq, parent_handler,
|
||||||
gpiochip);
|
gpiochip);
|
||||||
|
|
||||||
gpiochip->irq.parents = &parent_irq;
|
gpiochip->irq.parent_irq = parent_irq;
|
||||||
|
gpiochip->irq.parents = &gpiochip->irq.parent_irq;
|
||||||
gpiochip->irq.num_parents = 1;
|
gpiochip->irq.num_parents = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -358,8 +358,8 @@ static int create_compute_queue_nocpsch(struct device_queue_manager *dqm,
|
||||||
struct queue *q,
|
struct queue *q,
|
||||||
struct qcm_process_device *qpd)
|
struct qcm_process_device *qpd)
|
||||||
{
|
{
|
||||||
int retval;
|
|
||||||
struct mqd_manager *mqd_mgr;
|
struct mqd_manager *mqd_mgr;
|
||||||
|
int retval;
|
||||||
|
|
||||||
mqd_mgr = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_COMPUTE);
|
mqd_mgr = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_COMPUTE);
|
||||||
if (!mqd_mgr)
|
if (!mqd_mgr)
|
||||||
|
@ -387,8 +387,12 @@ static int create_compute_queue_nocpsch(struct device_queue_manager *dqm,
|
||||||
if (!q->properties.is_active)
|
if (!q->properties.is_active)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd, q->pipe, q->queue,
|
if (WARN(q->process->mm != current->mm,
|
||||||
&q->properties, q->process->mm);
|
"should only run in user thread"))
|
||||||
|
retval = -EFAULT;
|
||||||
|
else
|
||||||
|
retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd, q->pipe, q->queue,
|
||||||
|
&q->properties, current->mm);
|
||||||
if (retval)
|
if (retval)
|
||||||
goto out_uninit_mqd;
|
goto out_uninit_mqd;
|
||||||
|
|
||||||
|
@ -545,9 +549,15 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q)
|
||||||
retval = map_queues_cpsch(dqm);
|
retval = map_queues_cpsch(dqm);
|
||||||
else if (q->properties.is_active &&
|
else if (q->properties.is_active &&
|
||||||
(q->properties.type == KFD_QUEUE_TYPE_COMPUTE ||
|
(q->properties.type == KFD_QUEUE_TYPE_COMPUTE ||
|
||||||
q->properties.type == KFD_QUEUE_TYPE_SDMA))
|
q->properties.type == KFD_QUEUE_TYPE_SDMA)) {
|
||||||
retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd, q->pipe, q->queue,
|
if (WARN(q->process->mm != current->mm,
|
||||||
&q->properties, q->process->mm);
|
"should only run in user thread"))
|
||||||
|
retval = -EFAULT;
|
||||||
|
else
|
||||||
|
retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd,
|
||||||
|
q->pipe, q->queue,
|
||||||
|
&q->properties, current->mm);
|
||||||
|
}
|
||||||
|
|
||||||
out_unlock:
|
out_unlock:
|
||||||
dqm_unlock(dqm);
|
dqm_unlock(dqm);
|
||||||
|
@ -653,6 +663,7 @@ out:
|
||||||
static int restore_process_queues_nocpsch(struct device_queue_manager *dqm,
|
static int restore_process_queues_nocpsch(struct device_queue_manager *dqm,
|
||||||
struct qcm_process_device *qpd)
|
struct qcm_process_device *qpd)
|
||||||
{
|
{
|
||||||
|
struct mm_struct *mm = NULL;
|
||||||
struct queue *q;
|
struct queue *q;
|
||||||
struct mqd_manager *mqd_mgr;
|
struct mqd_manager *mqd_mgr;
|
||||||
struct kfd_process_device *pdd;
|
struct kfd_process_device *pdd;
|
||||||
|
@ -686,6 +697,15 @@ static int restore_process_queues_nocpsch(struct device_queue_manager *dqm,
|
||||||
kfd_flush_tlb(pdd);
|
kfd_flush_tlb(pdd);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Take a safe reference to the mm_struct, which may otherwise
|
||||||
|
* disappear even while the kfd_process is still referenced.
|
||||||
|
*/
|
||||||
|
mm = get_task_mm(pdd->process->lead_thread);
|
||||||
|
if (!mm) {
|
||||||
|
retval = -EFAULT;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
/* activate all active queues on the qpd */
|
/* activate all active queues on the qpd */
|
||||||
list_for_each_entry(q, &qpd->queues_list, list) {
|
list_for_each_entry(q, &qpd->queues_list, list) {
|
||||||
if (!q->properties.is_evicted)
|
if (!q->properties.is_evicted)
|
||||||
|
@ -700,14 +720,15 @@ static int restore_process_queues_nocpsch(struct device_queue_manager *dqm,
|
||||||
q->properties.is_evicted = false;
|
q->properties.is_evicted = false;
|
||||||
q->properties.is_active = true;
|
q->properties.is_active = true;
|
||||||
retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd, q->pipe,
|
retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd, q->pipe,
|
||||||
q->queue, &q->properties,
|
q->queue, &q->properties, mm);
|
||||||
q->process->mm);
|
|
||||||
if (retval)
|
if (retval)
|
||||||
goto out;
|
goto out;
|
||||||
dqm->queue_count++;
|
dqm->queue_count++;
|
||||||
}
|
}
|
||||||
qpd->evicted = 0;
|
qpd->evicted = 0;
|
||||||
out:
|
out:
|
||||||
|
if (mm)
|
||||||
|
mmput(mm);
|
||||||
dqm_unlock(dqm);
|
dqm_unlock(dqm);
|
||||||
return retval;
|
return retval;
|
||||||
}
|
}
|
||||||
|
|
|
@ -4633,12 +4633,18 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
|
||||||
}
|
}
|
||||||
spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
|
spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
|
||||||
|
|
||||||
/* Signal HW programming completion */
|
|
||||||
drm_atomic_helper_commit_hw_done(state);
|
|
||||||
|
|
||||||
if (wait_for_vblank)
|
if (wait_for_vblank)
|
||||||
drm_atomic_helper_wait_for_flip_done(dev, state);
|
drm_atomic_helper_wait_for_flip_done(dev, state);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* FIXME:
|
||||||
|
* Delay hw_done() until flip_done() is signaled. This is to block
|
||||||
|
* another commit from freeing the CRTC state while we're still
|
||||||
|
* waiting on flip_done.
|
||||||
|
*/
|
||||||
|
drm_atomic_helper_commit_hw_done(state);
|
||||||
|
|
||||||
drm_atomic_helper_cleanup_planes(dev, state);
|
drm_atomic_helper_cleanup_planes(dev, state);
|
||||||
|
|
||||||
/* Finally, drop a runtime PM reference for each newly disabled CRTC,
|
/* Finally, drop a runtime PM reference for each newly disabled CRTC,
|
||||||
|
|
|
@ -63,20 +63,21 @@ static void drm_client_close(struct drm_client_dev *client)
|
||||||
EXPORT_SYMBOL(drm_client_close);
|
EXPORT_SYMBOL(drm_client_close);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* drm_client_new - Create a DRM client
|
* drm_client_init - Initialise a DRM client
|
||||||
* @dev: DRM device
|
* @dev: DRM device
|
||||||
* @client: DRM client
|
* @client: DRM client
|
||||||
* @name: Client name
|
* @name: Client name
|
||||||
* @funcs: DRM client functions (optional)
|
* @funcs: DRM client functions (optional)
|
||||||
*
|
*
|
||||||
|
* This initialises the client and opens a &drm_file. Use drm_client_add() to complete the process.
|
||||||
* The caller needs to hold a reference on @dev before calling this function.
|
* The caller needs to hold a reference on @dev before calling this function.
|
||||||
* The client is freed when the &drm_device is unregistered. See drm_client_release().
|
* The client is freed when the &drm_device is unregistered. See drm_client_release().
|
||||||
*
|
*
|
||||||
* Returns:
|
* Returns:
|
||||||
* Zero on success or negative error code on failure.
|
* Zero on success or negative error code on failure.
|
||||||
*/
|
*/
|
||||||
int drm_client_new(struct drm_device *dev, struct drm_client_dev *client,
|
int drm_client_init(struct drm_device *dev, struct drm_client_dev *client,
|
||||||
const char *name, const struct drm_client_funcs *funcs)
|
const char *name, const struct drm_client_funcs *funcs)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
|
@ -95,10 +96,6 @@ int drm_client_new(struct drm_device *dev, struct drm_client_dev *client,
|
||||||
if (ret)
|
if (ret)
|
||||||
goto err_put_module;
|
goto err_put_module;
|
||||||
|
|
||||||
mutex_lock(&dev->clientlist_mutex);
|
|
||||||
list_add(&client->list, &dev->clientlist);
|
|
||||||
mutex_unlock(&dev->clientlist_mutex);
|
|
||||||
|
|
||||||
drm_dev_get(dev);
|
drm_dev_get(dev);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -109,13 +106,33 @@ err_put_module:
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(drm_client_new);
|
EXPORT_SYMBOL(drm_client_init);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* drm_client_add - Add client to the device list
|
||||||
|
* @client: DRM client
|
||||||
|
*
|
||||||
|
* Add the client to the &drm_device client list to activate its callbacks.
|
||||||
|
* @client must be initialized by a call to drm_client_init(). After
|
||||||
|
* drm_client_add() it is no longer permissible to call drm_client_release()
|
||||||
|
* directly (outside the unregister callback), instead cleanup will happen
|
||||||
|
* automatically on driver unload.
|
||||||
|
*/
|
||||||
|
void drm_client_add(struct drm_client_dev *client)
|
||||||
|
{
|
||||||
|
struct drm_device *dev = client->dev;
|
||||||
|
|
||||||
|
mutex_lock(&dev->clientlist_mutex);
|
||||||
|
list_add(&client->list, &dev->clientlist);
|
||||||
|
mutex_unlock(&dev->clientlist_mutex);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(drm_client_add);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* drm_client_release - Release DRM client resources
|
* drm_client_release - Release DRM client resources
|
||||||
* @client: DRM client
|
* @client: DRM client
|
||||||
*
|
*
|
||||||
* Releases resources by closing the &drm_file that was opened by drm_client_new().
|
* Releases resources by closing the &drm_file that was opened by drm_client_init().
|
||||||
* It is called automatically if the &drm_client_funcs.unregister callback is _not_ set.
|
* It is called automatically if the &drm_client_funcs.unregister callback is _not_ set.
|
||||||
*
|
*
|
||||||
* This function should only be called from the unregister callback. An exception
|
* This function should only be called from the unregister callback. An exception
|
||||||
|
|
|
@ -160,7 +160,7 @@ struct drm_fbdev_cma *drm_fbdev_cma_init(struct drm_device *dev,
|
||||||
|
|
||||||
fb_helper = &fbdev_cma->fb_helper;
|
fb_helper = &fbdev_cma->fb_helper;
|
||||||
|
|
||||||
ret = drm_client_new(dev, &fb_helper->client, "fbdev", NULL);
|
ret = drm_client_init(dev, &fb_helper->client, "fbdev", NULL);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto err_free;
|
goto err_free;
|
||||||
|
|
||||||
|
@ -169,6 +169,8 @@ struct drm_fbdev_cma *drm_fbdev_cma_init(struct drm_device *dev,
|
||||||
if (ret)
|
if (ret)
|
||||||
goto err_client_put;
|
goto err_client_put;
|
||||||
|
|
||||||
|
drm_client_add(&fb_helper->client);
|
||||||
|
|
||||||
return fbdev_cma;
|
return fbdev_cma;
|
||||||
|
|
||||||
err_client_put:
|
err_client_put:
|
||||||
|
|
|
@ -3218,12 +3218,14 @@ int drm_fbdev_generic_setup(struct drm_device *dev, unsigned int preferred_bpp)
|
||||||
if (!fb_helper)
|
if (!fb_helper)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
ret = drm_client_new(dev, &fb_helper->client, "fbdev", &drm_fbdev_client_funcs);
|
ret = drm_client_init(dev, &fb_helper->client, "fbdev", &drm_fbdev_client_funcs);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
kfree(fb_helper);
|
kfree(fb_helper);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
drm_client_add(&fb_helper->client);
|
||||||
|
|
||||||
fb_helper->preferred_bpp = preferred_bpp;
|
fb_helper->preferred_bpp = preferred_bpp;
|
||||||
|
|
||||||
drm_fbdev_client_hotplug(&fb_helper->client);
|
drm_fbdev_client_hotplug(&fb_helper->client);
|
||||||
|
|
|
@ -566,14 +566,14 @@ int drm_mode_create_lease_ioctl(struct drm_device *dev,
|
||||||
lessee_priv->is_master = 1;
|
lessee_priv->is_master = 1;
|
||||||
lessee_priv->authenticated = 1;
|
lessee_priv->authenticated = 1;
|
||||||
|
|
||||||
/* Hook up the fd */
|
|
||||||
fd_install(fd, lessee_file);
|
|
||||||
|
|
||||||
/* Pass fd back to userspace */
|
/* Pass fd back to userspace */
|
||||||
DRM_DEBUG_LEASE("Returning fd %d id %d\n", fd, lessee->lessee_id);
|
DRM_DEBUG_LEASE("Returning fd %d id %d\n", fd, lessee->lessee_id);
|
||||||
cl->fd = fd;
|
cl->fd = fd;
|
||||||
cl->lessee_id = lessee->lessee_id;
|
cl->lessee_id = lessee->lessee_id;
|
||||||
|
|
||||||
|
/* Hook up the fd */
|
||||||
|
fd_install(fd, lessee_file);
|
||||||
|
|
||||||
DRM_DEBUG_LEASE("drm_mode_create_lease_ioctl succeeded\n");
|
DRM_DEBUG_LEASE("drm_mode_create_lease_ioctl succeeded\n");
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
|
|
@ -55,37 +55,12 @@ static inline void __exynos_iommu_detach(struct exynos_drm_private *priv,
|
||||||
static inline int __exynos_iommu_create_mapping(struct exynos_drm_private *priv,
|
static inline int __exynos_iommu_create_mapping(struct exynos_drm_private *priv,
|
||||||
unsigned long start, unsigned long size)
|
unsigned long start, unsigned long size)
|
||||||
{
|
{
|
||||||
struct iommu_domain *domain;
|
priv->mapping = iommu_get_domain_for_dev(priv->dma_dev);
|
||||||
int ret;
|
|
||||||
|
|
||||||
domain = iommu_domain_alloc(priv->dma_dev->bus);
|
|
||||||
if (!domain)
|
|
||||||
return -ENOMEM;
|
|
||||||
|
|
||||||
ret = iommu_get_dma_cookie(domain);
|
|
||||||
if (ret)
|
|
||||||
goto free_domain;
|
|
||||||
|
|
||||||
ret = iommu_dma_init_domain(domain, start, size, NULL);
|
|
||||||
if (ret)
|
|
||||||
goto put_cookie;
|
|
||||||
|
|
||||||
priv->mapping = domain;
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
put_cookie:
|
|
||||||
iommu_put_dma_cookie(domain);
|
|
||||||
free_domain:
|
|
||||||
iommu_domain_free(domain);
|
|
||||||
return ret;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void __exynos_iommu_release_mapping(struct exynos_drm_private *priv)
|
static inline void __exynos_iommu_release_mapping(struct exynos_drm_private *priv)
|
||||||
{
|
{
|
||||||
struct iommu_domain *domain = priv->mapping;
|
|
||||||
|
|
||||||
iommu_put_dma_cookie(domain);
|
|
||||||
iommu_domain_free(domain);
|
|
||||||
priv->mapping = NULL;
|
priv->mapping = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -94,7 +69,9 @@ static inline int __exynos_iommu_attach(struct exynos_drm_private *priv,
|
||||||
{
|
{
|
||||||
struct iommu_domain *domain = priv->mapping;
|
struct iommu_domain *domain = priv->mapping;
|
||||||
|
|
||||||
return iommu_attach_device(domain, dev);
|
if (dev != priv->dma_dev)
|
||||||
|
return iommu_attach_device(domain, dev);
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void __exynos_iommu_detach(struct exynos_drm_private *priv,
|
static inline void __exynos_iommu_detach(struct exynos_drm_private *priv,
|
||||||
|
@ -102,7 +79,8 @@ static inline void __exynos_iommu_detach(struct exynos_drm_private *priv,
|
||||||
{
|
{
|
||||||
struct iommu_domain *domain = priv->mapping;
|
struct iommu_domain *domain = priv->mapping;
|
||||||
|
|
||||||
iommu_detach_device(domain, dev);
|
if (dev != priv->dma_dev)
|
||||||
|
iommu_detach_device(domain, dev);
|
||||||
}
|
}
|
||||||
#else
|
#else
|
||||||
#error Unsupported architecture and IOMMU/DMA-mapping glue code
|
#error Unsupported architecture and IOMMU/DMA-mapping glue code
|
||||||
|
|
|
@ -191,7 +191,8 @@ static irqreturn_t tda9950_irq(int irq, void *data)
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
/* TDA9950 executes all retries for us */
|
/* TDA9950 executes all retries for us */
|
||||||
tx_status |= CEC_TX_STATUS_MAX_RETRIES;
|
if (tx_status != CEC_TX_STATUS_OK)
|
||||||
|
tx_status |= CEC_TX_STATUS_MAX_RETRIES;
|
||||||
cec_transmit_done(priv->adap, tx_status, arb_lost_cnt,
|
cec_transmit_done(priv->adap, tx_status, arb_lost_cnt,
|
||||||
nack_cnt, 0, err_cnt);
|
nack_cnt, 0, err_cnt);
|
||||||
break;
|
break;
|
||||||
|
@ -310,7 +311,7 @@ static void tda9950_release(struct tda9950_priv *priv)
|
||||||
/* Wait up to .5s for it to signal non-busy */
|
/* Wait up to .5s for it to signal non-busy */
|
||||||
do {
|
do {
|
||||||
csr = tda9950_read(client, REG_CSR);
|
csr = tda9950_read(client, REG_CSR);
|
||||||
if (!(csr & CSR_BUSY) || --timeout)
|
if (!(csr & CSR_BUSY) || !--timeout)
|
||||||
break;
|
break;
|
||||||
msleep(10);
|
msleep(10);
|
||||||
} while (1);
|
} while (1);
|
||||||
|
|
|
@ -232,6 +232,20 @@ static bool compress_init(struct compress *c)
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void *compress_next_page(struct drm_i915_error_object *dst)
|
||||||
|
{
|
||||||
|
unsigned long page;
|
||||||
|
|
||||||
|
if (dst->page_count >= dst->num_pages)
|
||||||
|
return ERR_PTR(-ENOSPC);
|
||||||
|
|
||||||
|
page = __get_free_page(GFP_ATOMIC | __GFP_NOWARN);
|
||||||
|
if (!page)
|
||||||
|
return ERR_PTR(-ENOMEM);
|
||||||
|
|
||||||
|
return dst->pages[dst->page_count++] = (void *)page;
|
||||||
|
}
|
||||||
|
|
||||||
static int compress_page(struct compress *c,
|
static int compress_page(struct compress *c,
|
||||||
void *src,
|
void *src,
|
||||||
struct drm_i915_error_object *dst)
|
struct drm_i915_error_object *dst)
|
||||||
|
@ -245,19 +259,14 @@ static int compress_page(struct compress *c,
|
||||||
|
|
||||||
do {
|
do {
|
||||||
if (zstream->avail_out == 0) {
|
if (zstream->avail_out == 0) {
|
||||||
unsigned long page;
|
zstream->next_out = compress_next_page(dst);
|
||||||
|
if (IS_ERR(zstream->next_out))
|
||||||
|
return PTR_ERR(zstream->next_out);
|
||||||
|
|
||||||
page = __get_free_page(GFP_ATOMIC | __GFP_NOWARN);
|
|
||||||
if (!page)
|
|
||||||
return -ENOMEM;
|
|
||||||
|
|
||||||
dst->pages[dst->page_count++] = (void *)page;
|
|
||||||
|
|
||||||
zstream->next_out = (void *)page;
|
|
||||||
zstream->avail_out = PAGE_SIZE;
|
zstream->avail_out = PAGE_SIZE;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (zlib_deflate(zstream, Z_SYNC_FLUSH) != Z_OK)
|
if (zlib_deflate(zstream, Z_NO_FLUSH) != Z_OK)
|
||||||
return -EIO;
|
return -EIO;
|
||||||
} while (zstream->avail_in);
|
} while (zstream->avail_in);
|
||||||
|
|
||||||
|
@ -268,19 +277,42 @@ static int compress_page(struct compress *c,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int compress_flush(struct compress *c,
|
||||||
|
struct drm_i915_error_object *dst)
|
||||||
|
{
|
||||||
|
struct z_stream_s *zstream = &c->zstream;
|
||||||
|
|
||||||
|
do {
|
||||||
|
switch (zlib_deflate(zstream, Z_FINISH)) {
|
||||||
|
case Z_OK: /* more space requested */
|
||||||
|
zstream->next_out = compress_next_page(dst);
|
||||||
|
if (IS_ERR(zstream->next_out))
|
||||||
|
return PTR_ERR(zstream->next_out);
|
||||||
|
|
||||||
|
zstream->avail_out = PAGE_SIZE;
|
||||||
|
break;
|
||||||
|
|
||||||
|
case Z_STREAM_END:
|
||||||
|
goto end;
|
||||||
|
|
||||||
|
default: /* any error */
|
||||||
|
return -EIO;
|
||||||
|
}
|
||||||
|
} while (1);
|
||||||
|
|
||||||
|
end:
|
||||||
|
memset(zstream->next_out, 0, zstream->avail_out);
|
||||||
|
dst->unused = zstream->avail_out;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
static void compress_fini(struct compress *c,
|
static void compress_fini(struct compress *c,
|
||||||
struct drm_i915_error_object *dst)
|
struct drm_i915_error_object *dst)
|
||||||
{
|
{
|
||||||
struct z_stream_s *zstream = &c->zstream;
|
struct z_stream_s *zstream = &c->zstream;
|
||||||
|
|
||||||
if (dst) {
|
|
||||||
zlib_deflate(zstream, Z_FINISH);
|
|
||||||
dst->unused = zstream->avail_out;
|
|
||||||
}
|
|
||||||
|
|
||||||
zlib_deflateEnd(zstream);
|
zlib_deflateEnd(zstream);
|
||||||
kfree(zstream->workspace);
|
kfree(zstream->workspace);
|
||||||
|
|
||||||
if (c->tmp)
|
if (c->tmp)
|
||||||
free_page((unsigned long)c->tmp);
|
free_page((unsigned long)c->tmp);
|
||||||
}
|
}
|
||||||
|
@ -319,6 +351,12 @@ static int compress_page(struct compress *c,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int compress_flush(struct compress *c,
|
||||||
|
struct drm_i915_error_object *dst)
|
||||||
|
{
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
static void compress_fini(struct compress *c,
|
static void compress_fini(struct compress *c,
|
||||||
struct drm_i915_error_object *dst)
|
struct drm_i915_error_object *dst)
|
||||||
{
|
{
|
||||||
|
@ -917,6 +955,7 @@ i915_error_object_create(struct drm_i915_private *i915,
|
||||||
unsigned long num_pages;
|
unsigned long num_pages;
|
||||||
struct sgt_iter iter;
|
struct sgt_iter iter;
|
||||||
dma_addr_t dma;
|
dma_addr_t dma;
|
||||||
|
int ret;
|
||||||
|
|
||||||
if (!vma)
|
if (!vma)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
@ -930,6 +969,7 @@ i915_error_object_create(struct drm_i915_private *i915,
|
||||||
|
|
||||||
dst->gtt_offset = vma->node.start;
|
dst->gtt_offset = vma->node.start;
|
||||||
dst->gtt_size = vma->node.size;
|
dst->gtt_size = vma->node.size;
|
||||||
|
dst->num_pages = num_pages;
|
||||||
dst->page_count = 0;
|
dst->page_count = 0;
|
||||||
dst->unused = 0;
|
dst->unused = 0;
|
||||||
|
|
||||||
|
@ -938,28 +978,26 @@ i915_error_object_create(struct drm_i915_private *i915,
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ret = -EINVAL;
|
||||||
for_each_sgt_dma(dma, iter, vma->pages) {
|
for_each_sgt_dma(dma, iter, vma->pages) {
|
||||||
void __iomem *s;
|
void __iomem *s;
|
||||||
int ret;
|
|
||||||
|
|
||||||
ggtt->vm.insert_page(&ggtt->vm, dma, slot, I915_CACHE_NONE, 0);
|
ggtt->vm.insert_page(&ggtt->vm, dma, slot, I915_CACHE_NONE, 0);
|
||||||
|
|
||||||
s = io_mapping_map_atomic_wc(&ggtt->iomap, slot);
|
s = io_mapping_map_atomic_wc(&ggtt->iomap, slot);
|
||||||
ret = compress_page(&compress, (void __force *)s, dst);
|
ret = compress_page(&compress, (void __force *)s, dst);
|
||||||
io_mapping_unmap_atomic(s);
|
io_mapping_unmap_atomic(s);
|
||||||
|
|
||||||
if (ret)
|
if (ret)
|
||||||
goto unwind;
|
break;
|
||||||
}
|
}
|
||||||
goto out;
|
|
||||||
|
|
||||||
unwind:
|
if (ret || compress_flush(&compress, dst)) {
|
||||||
while (dst->page_count--)
|
while (dst->page_count--)
|
||||||
free_page((unsigned long)dst->pages[dst->page_count]);
|
free_page((unsigned long)dst->pages[dst->page_count]);
|
||||||
kfree(dst);
|
kfree(dst);
|
||||||
dst = NULL;
|
dst = NULL;
|
||||||
|
}
|
||||||
|
|
||||||
out:
|
|
||||||
compress_fini(&compress, dst);
|
compress_fini(&compress, dst);
|
||||||
ggtt->vm.clear_range(&ggtt->vm, slot, PAGE_SIZE);
|
ggtt->vm.clear_range(&ggtt->vm, slot, PAGE_SIZE);
|
||||||
return dst;
|
return dst;
|
||||||
|
|
|
@ -135,6 +135,7 @@ struct i915_gpu_state {
|
||||||
struct drm_i915_error_object {
|
struct drm_i915_error_object {
|
||||||
u64 gtt_offset;
|
u64 gtt_offset;
|
||||||
u64 gtt_size;
|
u64 gtt_size;
|
||||||
|
int num_pages;
|
||||||
int page_count;
|
int page_count;
|
||||||
int unused;
|
int unused;
|
||||||
u32 *pages[0];
|
u32 *pages[0];
|
||||||
|
|
|
@ -3091,36 +3091,27 @@ gen11_gt_irq_handler(struct drm_i915_private * const i915,
|
||||||
spin_unlock(&i915->irq_lock);
|
spin_unlock(&i915->irq_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static u32
|
||||||
gen11_gu_misc_irq_ack(struct drm_i915_private *dev_priv, const u32 master_ctl,
|
gen11_gu_misc_irq_ack(struct drm_i915_private *dev_priv, const u32 master_ctl)
|
||||||
u32 *iir)
|
|
||||||
{
|
{
|
||||||
void __iomem * const regs = dev_priv->regs;
|
void __iomem * const regs = dev_priv->regs;
|
||||||
|
u32 iir;
|
||||||
|
|
||||||
if (!(master_ctl & GEN11_GU_MISC_IRQ))
|
if (!(master_ctl & GEN11_GU_MISC_IRQ))
|
||||||
return;
|
return 0;
|
||||||
|
|
||||||
*iir = raw_reg_read(regs, GEN11_GU_MISC_IIR);
|
iir = raw_reg_read(regs, GEN11_GU_MISC_IIR);
|
||||||
if (likely(*iir))
|
if (likely(iir))
|
||||||
raw_reg_write(regs, GEN11_GU_MISC_IIR, *iir);
|
raw_reg_write(regs, GEN11_GU_MISC_IIR, iir);
|
||||||
|
|
||||||
|
return iir;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
gen11_gu_misc_irq_handler(struct drm_i915_private *dev_priv,
|
gen11_gu_misc_irq_handler(struct drm_i915_private *dev_priv, const u32 iir)
|
||||||
const u32 master_ctl, const u32 iir)
|
|
||||||
{
|
{
|
||||||
if (!(master_ctl & GEN11_GU_MISC_IRQ))
|
|
||||||
return;
|
|
||||||
|
|
||||||
if (unlikely(!iir)) {
|
|
||||||
DRM_ERROR("GU_MISC iir blank!\n");
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (iir & GEN11_GU_MISC_GSE)
|
if (iir & GEN11_GU_MISC_GSE)
|
||||||
intel_opregion_asle_intr(dev_priv);
|
intel_opregion_asle_intr(dev_priv);
|
||||||
else
|
|
||||||
DRM_ERROR("Unexpected GU_MISC interrupt 0x%x\n", iir);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static irqreturn_t gen11_irq_handler(int irq, void *arg)
|
static irqreturn_t gen11_irq_handler(int irq, void *arg)
|
||||||
|
@ -3157,12 +3148,12 @@ static irqreturn_t gen11_irq_handler(int irq, void *arg)
|
||||||
enable_rpm_wakeref_asserts(i915);
|
enable_rpm_wakeref_asserts(i915);
|
||||||
}
|
}
|
||||||
|
|
||||||
gen11_gu_misc_irq_ack(i915, master_ctl, &gu_misc_iir);
|
gu_misc_iir = gen11_gu_misc_irq_ack(i915, master_ctl);
|
||||||
|
|
||||||
/* Acknowledge and enable interrupts. */
|
/* Acknowledge and enable interrupts. */
|
||||||
raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, GEN11_MASTER_IRQ | master_ctl);
|
raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, GEN11_MASTER_IRQ | master_ctl);
|
||||||
|
|
||||||
gen11_gu_misc_irq_handler(i915, master_ctl, gu_misc_iir);
|
gen11_gu_misc_irq_handler(i915, gu_misc_iir);
|
||||||
|
|
||||||
return IRQ_HANDLED;
|
return IRQ_HANDLED;
|
||||||
}
|
}
|
||||||
|
|
|
@ -592,7 +592,6 @@ static const struct intel_device_info intel_cannonlake_info = {
|
||||||
GEN10_FEATURES, \
|
GEN10_FEATURES, \
|
||||||
GEN(11), \
|
GEN(11), \
|
||||||
.ddb_size = 2048, \
|
.ddb_size = 2048, \
|
||||||
.has_csr = 0, \
|
|
||||||
.has_logical_ring_elsq = 1
|
.has_logical_ring_elsq = 1
|
||||||
|
|
||||||
static const struct intel_device_info intel_icelake_11_info = {
|
static const struct intel_device_info intel_icelake_11_info = {
|
||||||
|
|
|
@ -600,7 +600,7 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev,
|
||||||
}
|
}
|
||||||
|
|
||||||
mtk_crtc->layer_nr = mtk_ddp_comp_layer_nr(mtk_crtc->ddp_comp[0]);
|
mtk_crtc->layer_nr = mtk_ddp_comp_layer_nr(mtk_crtc->ddp_comp[0]);
|
||||||
mtk_crtc->planes = devm_kzalloc(dev, mtk_crtc->layer_nr *
|
mtk_crtc->planes = devm_kcalloc(dev, mtk_crtc->layer_nr,
|
||||||
sizeof(struct drm_plane),
|
sizeof(struct drm_plane),
|
||||||
GFP_KERNEL);
|
GFP_KERNEL);
|
||||||
|
|
||||||
|
|
|
@ -153,8 +153,8 @@ int msm_dss_parse_clock(struct platform_device *pdev,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
mp->clk_config = devm_kzalloc(&pdev->dev,
|
mp->clk_config = devm_kcalloc(&pdev->dev,
|
||||||
sizeof(struct dss_clk) * num_clk,
|
num_clk, sizeof(struct dss_clk),
|
||||||
GFP_KERNEL);
|
GFP_KERNEL);
|
||||||
if (!mp->clk_config)
|
if (!mp->clk_config)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
|
@ -900,9 +900,22 @@ static enum drm_connector_status
|
||||||
nv50_mstc_detect(struct drm_connector *connector, bool force)
|
nv50_mstc_detect(struct drm_connector *connector, bool force)
|
||||||
{
|
{
|
||||||
struct nv50_mstc *mstc = nv50_mstc(connector);
|
struct nv50_mstc *mstc = nv50_mstc(connector);
|
||||||
|
enum drm_connector_status conn_status;
|
||||||
|
int ret;
|
||||||
|
|
||||||
if (!mstc->port)
|
if (!mstc->port)
|
||||||
return connector_status_disconnected;
|
return connector_status_disconnected;
|
||||||
return drm_dp_mst_detect_port(connector, mstc->port->mgr, mstc->port);
|
|
||||||
|
ret = pm_runtime_get_sync(connector->dev->dev);
|
||||||
|
if (ret < 0 && ret != -EACCES)
|
||||||
|
return connector_status_disconnected;
|
||||||
|
|
||||||
|
conn_status = drm_dp_mst_detect_port(connector, mstc->port->mgr,
|
||||||
|
mstc->port);
|
||||||
|
|
||||||
|
pm_runtime_mark_last_busy(connector->dev->dev);
|
||||||
|
pm_runtime_put_autosuspend(connector->dev->dev);
|
||||||
|
return conn_status;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
|
|
|
@ -976,7 +976,6 @@
|
||||||
#define USB_DEVICE_ID_SIS817_TOUCH 0x0817
|
#define USB_DEVICE_ID_SIS817_TOUCH 0x0817
|
||||||
#define USB_DEVICE_ID_SIS_TS 0x1013
|
#define USB_DEVICE_ID_SIS_TS 0x1013
|
||||||
#define USB_DEVICE_ID_SIS1030_TOUCH 0x1030
|
#define USB_DEVICE_ID_SIS1030_TOUCH 0x1030
|
||||||
#define USB_DEVICE_ID_SIS10FB_TOUCH 0x10fb
|
|
||||||
|
|
||||||
#define USB_VENDOR_ID_SKYCABLE 0x1223
|
#define USB_VENDOR_ID_SKYCABLE 0x1223
|
||||||
#define USB_DEVICE_ID_SKYCABLE_WIRELESS_PRESENTER 0x3F07
|
#define USB_DEVICE_ID_SKYCABLE_WIRELESS_PRESENTER 0x3F07
|
||||||
|
|
|
@ -47,7 +47,7 @@
|
||||||
/* quirks to control the device */
|
/* quirks to control the device */
|
||||||
#define I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV BIT(0)
|
#define I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV BIT(0)
|
||||||
#define I2C_HID_QUIRK_NO_IRQ_AFTER_RESET BIT(1)
|
#define I2C_HID_QUIRK_NO_IRQ_AFTER_RESET BIT(1)
|
||||||
#define I2C_HID_QUIRK_RESEND_REPORT_DESCR BIT(2)
|
#define I2C_HID_QUIRK_NO_RUNTIME_PM BIT(2)
|
||||||
|
|
||||||
/* flags */
|
/* flags */
|
||||||
#define I2C_HID_STARTED 0
|
#define I2C_HID_STARTED 0
|
||||||
|
@ -169,9 +169,8 @@ static const struct i2c_hid_quirks {
|
||||||
{ USB_VENDOR_ID_WEIDA, USB_DEVICE_ID_WEIDA_8755,
|
{ USB_VENDOR_ID_WEIDA, USB_DEVICE_ID_WEIDA_8755,
|
||||||
I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV },
|
I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV },
|
||||||
{ I2C_VENDOR_ID_HANTICK, I2C_PRODUCT_ID_HANTICK_5288,
|
{ I2C_VENDOR_ID_HANTICK, I2C_PRODUCT_ID_HANTICK_5288,
|
||||||
I2C_HID_QUIRK_NO_IRQ_AFTER_RESET },
|
I2C_HID_QUIRK_NO_IRQ_AFTER_RESET |
|
||||||
{ USB_VENDOR_ID_SIS_TOUCH, USB_DEVICE_ID_SIS10FB_TOUCH,
|
I2C_HID_QUIRK_NO_RUNTIME_PM },
|
||||||
I2C_HID_QUIRK_RESEND_REPORT_DESCR },
|
|
||||||
{ 0, 0 }
|
{ 0, 0 }
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -1105,7 +1104,9 @@ static int i2c_hid_probe(struct i2c_client *client,
|
||||||
goto err_mem_free;
|
goto err_mem_free;
|
||||||
}
|
}
|
||||||
|
|
||||||
pm_runtime_put(&client->dev);
|
if (!(ihid->quirks & I2C_HID_QUIRK_NO_RUNTIME_PM))
|
||||||
|
pm_runtime_put(&client->dev);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
err_mem_free:
|
err_mem_free:
|
||||||
|
@ -1130,7 +1131,8 @@ static int i2c_hid_remove(struct i2c_client *client)
|
||||||
struct i2c_hid *ihid = i2c_get_clientdata(client);
|
struct i2c_hid *ihid = i2c_get_clientdata(client);
|
||||||
struct hid_device *hid;
|
struct hid_device *hid;
|
||||||
|
|
||||||
pm_runtime_get_sync(&client->dev);
|
if (!(ihid->quirks & I2C_HID_QUIRK_NO_RUNTIME_PM))
|
||||||
|
pm_runtime_get_sync(&client->dev);
|
||||||
pm_runtime_disable(&client->dev);
|
pm_runtime_disable(&client->dev);
|
||||||
pm_runtime_set_suspended(&client->dev);
|
pm_runtime_set_suspended(&client->dev);
|
||||||
pm_runtime_put_noidle(&client->dev);
|
pm_runtime_put_noidle(&client->dev);
|
||||||
|
@ -1236,22 +1238,13 @@ static int i2c_hid_resume(struct device *dev)
|
||||||
|
|
||||||
/* Instead of resetting device, simply powers the device on. This
|
/* Instead of resetting device, simply powers the device on. This
|
||||||
* solves "incomplete reports" on Raydium devices 2386:3118 and
|
* solves "incomplete reports" on Raydium devices 2386:3118 and
|
||||||
* 2386:4B33
|
* 2386:4B33 and fixes various SIS touchscreens no longer sending
|
||||||
|
* data after a suspend/resume.
|
||||||
*/
|
*/
|
||||||
ret = i2c_hid_set_power(client, I2C_HID_PWR_ON);
|
ret = i2c_hid_set_power(client, I2C_HID_PWR_ON);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
/* Some devices need to re-send report descr cmd
|
|
||||||
* after resume, after this it will be back normal.
|
|
||||||
* otherwise it issues too many incomplete reports.
|
|
||||||
*/
|
|
||||||
if (ihid->quirks & I2C_HID_QUIRK_RESEND_REPORT_DESCR) {
|
|
||||||
ret = i2c_hid_command(client, &hid_report_descr_cmd, NULL, 0);
|
|
||||||
if (ret)
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (hid->driver && hid->driver->reset_resume) {
|
if (hid->driver && hid->driver->reset_resume) {
|
||||||
ret = hid->driver->reset_resume(hid);
|
ret = hid->driver->reset_resume(hid);
|
||||||
return ret;
|
return ret;
|
||||||
|
|
|
@ -29,6 +29,7 @@
|
||||||
#define CNL_Ax_DEVICE_ID 0x9DFC
|
#define CNL_Ax_DEVICE_ID 0x9DFC
|
||||||
#define GLK_Ax_DEVICE_ID 0x31A2
|
#define GLK_Ax_DEVICE_ID 0x31A2
|
||||||
#define CNL_H_DEVICE_ID 0xA37C
|
#define CNL_H_DEVICE_ID 0xA37C
|
||||||
|
#define ICL_MOBILE_DEVICE_ID 0x34FC
|
||||||
#define SPT_H_DEVICE_ID 0xA135
|
#define SPT_H_DEVICE_ID 0xA135
|
||||||
|
|
||||||
#define REVISION_ID_CHT_A0 0x6
|
#define REVISION_ID_CHT_A0 0x6
|
||||||
|
|
|
@ -38,6 +38,7 @@ static const struct pci_device_id ish_pci_tbl[] = {
|
||||||
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, CNL_Ax_DEVICE_ID)},
|
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, CNL_Ax_DEVICE_ID)},
|
||||||
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, GLK_Ax_DEVICE_ID)},
|
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, GLK_Ax_DEVICE_ID)},
|
||||||
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, CNL_H_DEVICE_ID)},
|
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, CNL_H_DEVICE_ID)},
|
||||||
|
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, ICL_MOBILE_DEVICE_ID)},
|
||||||
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, SPT_H_DEVICE_ID)},
|
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, SPT_H_DEVICE_ID)},
|
||||||
{0, }
|
{0, }
|
||||||
};
|
};
|
||||||
|
|
|
@ -76,6 +76,7 @@ static int vmbus_negotiate_version(struct vmbus_channel_msginfo *msginfo,
|
||||||
__u32 version)
|
__u32 version)
|
||||||
{
|
{
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
unsigned int cur_cpu;
|
||||||
struct vmbus_channel_initiate_contact *msg;
|
struct vmbus_channel_initiate_contact *msg;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
|
@ -118,9 +119,10 @@ static int vmbus_negotiate_version(struct vmbus_channel_msginfo *msginfo,
|
||||||
* the CPU attempting to connect may not be CPU 0.
|
* the CPU attempting to connect may not be CPU 0.
|
||||||
*/
|
*/
|
||||||
if (version >= VERSION_WIN8_1) {
|
if (version >= VERSION_WIN8_1) {
|
||||||
msg->target_vcpu =
|
cur_cpu = get_cpu();
|
||||||
hv_cpu_number_to_vp_number(smp_processor_id());
|
msg->target_vcpu = hv_cpu_number_to_vp_number(cur_cpu);
|
||||||
vmbus_connection.connect_cpu = smp_processor_id();
|
vmbus_connection.connect_cpu = cur_cpu;
|
||||||
|
put_cpu();
|
||||||
} else {
|
} else {
|
||||||
msg->target_vcpu = 0;
|
msg->target_vcpu = 0;
|
||||||
vmbus_connection.connect_cpu = 0;
|
vmbus_connection.connect_cpu = 0;
|
||||||
|
|
|
@ -908,7 +908,7 @@ static int npcm7xx_en_pwm_fan(struct device *dev,
|
||||||
if (fan_cnt < 1)
|
if (fan_cnt < 1)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
fan_ch = devm_kzalloc(dev, sizeof(*fan_ch) * fan_cnt, GFP_KERNEL);
|
fan_ch = devm_kcalloc(dev, fan_cnt, sizeof(*fan_ch), GFP_KERNEL);
|
||||||
if (!fan_ch)
|
if (!fan_ch)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Reference in a new issue