Merge drm/drm-next into drm-misc-next

Backmerging to get fixes from v6.14-rc4.

Signed-off-by: Thomas Zimmermann <tzimmermann@suse.de>
This commit is contained in:
Thomas Zimmermann 2025-02-25 11:43:10 +01:00
commit 130377304e
589 changed files with 6615 additions and 3332 deletions

View file

@ -226,6 +226,7 @@ Fangrui Song <i@maskray.me> <maskray@google.com>
Felipe W Damasio <felipewd@terra.com.br>
Felix Kuhling <fxkuehl@gmx.de>
Felix Moeller <felix@derklecks.de>
Feng Tang <feng.79.tang@gmail.com> <feng.tang@intel.com>
Fenglin Wu <quic_fenglinw@quicinc.com> <fenglinw@codeaurora.org>
Filipe Lautert <filipe@icewall.org>
Finn Thain <fthain@linux-m68k.org> <fthain@telegraphics.com.au>
@ -317,6 +318,8 @@ Jayachandran C <c.jayachandran@gmail.com> <jnair@caviumnetworks.com>
Jean Tourrilhes <jt@hpl.hp.com>
Jeevan Shriram <quic_jshriram@quicinc.com> <jshriram@codeaurora.org>
Jeff Garzik <jgarzik@pretzel.yyz.us>
Jeff Johnson <jeff.johnson@oss.qualcomm.com> <jjohnson@codeaurora.org>
Jeff Johnson <jeff.johnson@oss.qualcomm.com> <quic_jjohnson@quicinc.com>
Jeff Layton <jlayton@kernel.org> <jlayton@poochiereds.net>
Jeff Layton <jlayton@kernel.org> <jlayton@primarydata.com>
Jeff Layton <jlayton@kernel.org> <jlayton@redhat.com>
@ -377,6 +380,7 @@ Juha Yrjola <juha.yrjola@solidboot.com>
Julien Thierry <julien.thierry.kdev@gmail.com> <julien.thierry@arm.com>
Iskren Chernev <me@iskren.info> <iskren.chernev@gmail.com>
Kalle Valo <kvalo@kernel.org> <kvalo@codeaurora.org>
Kalle Valo <kvalo@kernel.org> <quic_kvalo@quicinc.com>
Kalyan Thota <quic_kalyant@quicinc.com> <kalyan_t@codeaurora.org>
Karthikeyan Periyasamy <quic_periyasa@quicinc.com> <periyasa@codeaurora.org>
Kathiravan T <quic_kathirav@quicinc.com> <kathirav@codeaurora.org>
@ -531,6 +535,7 @@ Nicholas Piggin <npiggin@gmail.com> <npiggin@kernel.dk>
Nicholas Piggin <npiggin@gmail.com> <npiggin@suse.de>
Nicholas Piggin <npiggin@gmail.com> <nickpiggin@yahoo.com.au>
Nicholas Piggin <npiggin@gmail.com> <piggin@cyberone.com.au>
Nick Desaulniers <nick.desaulniers+lkml@gmail.com> <ndesaulniers@google.com>
Nicolas Ferre <nicolas.ferre@microchip.com> <nicolas.ferre@atmel.com>
Nicolas Pitre <nico@fluxnic.net> <nicolas.pitre@linaro.org>
Nicolas Pitre <nico@fluxnic.net> <nico@linaro.org>

View file

@ -37,7 +37,7 @@ intended to be exhaustive.
shadow stacks rather than GCS.
* Support for GCS is reported to userspace via HWCAP_GCS in the aux vector
AT_HWCAP2 entry.
AT_HWCAP entry.
* GCS is enabled per thread. While there is support for disabling GCS
at runtime this should be done with great care.

View file

@ -25,7 +25,7 @@ to cache translations for virtual addresses. The IOMMU driver uses the
mmu_notifier() support to keep the device TLB cache and the CPU cache in
sync. When an ATS lookup fails for a virtual address, the device should
use the PRI in order to request the virtual address to be paged into the
CPU page tables. The device must use ATS again in order the fetch the
CPU page tables. The device must use ATS again in order to fetch the
translation before use.
Shared Hardware Workqueues
@ -216,7 +216,7 @@ submitting work and processing completions.
Single Root I/O Virtualization (SR-IOV) focuses on providing independent
hardware interfaces for virtualizing hardware. Hence, it's required to be
almost fully functional interface to software supporting the traditional
an almost fully functional interface to software supporting the traditional
BARs, space for interrupts via MSI-X, its own register layout.
Virtual Functions (VFs) are assisted by the Physical Function (PF)
driver.

View file

@ -53,11 +53,17 @@ properties:
reg:
maxItems: 1
power-controller:
type: object
reboot-mode:
type: object
required:
- compatible
- reg
additionalProperties: true
additionalProperties: false
examples:
- |

View file

@ -8,6 +8,7 @@ title: Qualcomm Graphics Clock & Reset Controller
maintainers:
- Taniya Das <quic_tdas@quicinc.com>
- Imran Shaik <quic_imrashai@quicinc.com>
description: |
Qualcomm graphics clock control module provides the clocks, resets and power
@ -23,10 +24,12 @@ description: |
include/dt-bindings/clock/qcom,gpucc-sm8150.h
include/dt-bindings/clock/qcom,gpucc-sm8250.h
include/dt-bindings/clock/qcom,gpucc-sm8350.h
include/dt-bindings/clock/qcom,qcs8300-gpucc.h
properties:
compatible:
enum:
- qcom,qcs8300-gpucc
- qcom,sdm845-gpucc
- qcom,sa8775p-gpucc
- qcom,sc7180-gpucc

View file

@ -8,16 +8,20 @@ title: Qualcomm Camera Clock & Reset Controller on SA8775P
maintainers:
- Taniya Das <quic_tdas@quicinc.com>
- Imran Shaik <quic_imrashai@quicinc.com>
description: |
Qualcomm camera clock control module provides the clocks, resets and power
domains on SA8775p.
See also: include/dt-bindings/clock/qcom,sa8775p-camcc.h
See also:
include/dt-bindings/clock/qcom,qcs8300-camcc.h
include/dt-bindings/clock/qcom,sa8775p-camcc.h
properties:
compatible:
enum:
- qcom,qcs8300-camcc
- qcom,sa8775p-camcc
clocks:

View file

@ -18,6 +18,7 @@ description: |
properties:
compatible:
enum:
- qcom,qcs8300-videocc
- qcom,sa8775p-videocc
clocks:

View file

@ -0,0 +1,29 @@
# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
%YAML 1.2
---
$id: http://devicetree.org/schemas/display/panel/powertip,hx8238a.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Powertip Electronic Technology Co. 320 x 240 LCD panel
maintainers:
- Lukasz Majewski <lukma@denx.de>
allOf:
- $ref: panel-dpi.yaml#
properties:
compatible:
items:
- const: powertip,hx8238a
- {} # panel-dpi, but not listed here to avoid false select
height-mm: true
panel-timing: true
port: true
power-supply: true
width-mm: true
additionalProperties: false
...

View file

@ -0,0 +1,29 @@
# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
%YAML 1.2
---
$id: http://devicetree.org/schemas/display/panel/powertip,st7272.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Powertip Electronic Technology Co. 320 x 240 LCD panel
maintainers:
- Lukasz Majewski <lukma@denx.de>
allOf:
- $ref: panel-dpi.yaml#
properties:
compatible:
items:
- const: powertip,st7272
- {} # panel-dpi, but not listed here to avoid false select
height-mm: true
panel-timing: true
port: true
power-supply: true
width-mm: true
additionalProperties: false
...

View file

@ -23,7 +23,7 @@ properties:
compatible:
enum:
- ti,am625-dss
- ti,am62a7,dss
- ti,am62a7-dss
- ti,am65x-dss
reg:

View file

@ -33,6 +33,10 @@ properties:
clocks:
maxItems: 1
clock-names:
items:
- const: nf_clk
dmas:
maxItems: 1
@ -51,6 +55,7 @@ required:
- reg-names
- interrupts
- clocks
- clock-names
unevaluatedProperties: false
@ -66,7 +71,8 @@ examples:
#address-cells = <1>;
#size-cells = <0>;
interrupts = <GIC_SPI 97 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&nf_clk>;
clocks = <&clk>;
clock-names = "nf_clk";
cdns,board-delay-ps = <4830>;
nand@0 {

View file

@ -7,7 +7,6 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: Qualcomm Technologies ath10k wireless devices
maintainers:
- Kalle Valo <kvalo@kernel.org>
- Jeff Johnson <jjohnson@kernel.org>
description:

View file

@ -8,7 +8,6 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: Qualcomm Technologies ath11k wireless devices (PCIe)
maintainers:
- Kalle Valo <kvalo@kernel.org>
- Jeff Johnson <jjohnson@kernel.org>
description: |

View file

@ -8,7 +8,6 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: Qualcomm Technologies ath11k wireless devices
maintainers:
- Kalle Valo <kvalo@kernel.org>
- Jeff Johnson <jjohnson@kernel.org>
description: |

View file

@ -9,7 +9,6 @@ title: Qualcomm Technologies ath12k wireless devices (PCIe) with WSI interface
maintainers:
- Jeff Johnson <jjohnson@kernel.org>
- Kalle Valo <kvalo@kernel.org>
description: |
Qualcomm Technologies IEEE 802.11be PCIe devices with WSI interface.

View file

@ -9,7 +9,6 @@ title: Qualcomm Technologies ath12k wireless devices (PCIe)
maintainers:
- Jeff Johnson <quic_jjohnson@quicinc.com>
- Kalle Valo <kvalo@kernel.org>
description:
Qualcomm Technologies IEEE 802.11be PCIe devices.

View file

@ -36,6 +36,7 @@ properties:
- qcom,qcs404-qfprom
- qcom,qcs615-qfprom
- qcom,qcs8300-qfprom
- qcom,sar2130p-qfprom
- qcom,sc7180-qfprom
- qcom,sc7280-qfprom
- qcom,sc8280xp-qfprom

View file

@ -22,7 +22,7 @@ description:
Each sub-node is identified using the node's name, with valid values listed
for each of the pmics below.
For mp5496, s1, s2
For mp5496, s1, s2, l2, l5
For pm2250, s1, s2, s3, s4, l1, l2, l3, l4, l5, l6, l7, l8, l9, l10, l11,
l12, l13, l14, l15, l16, l17, l18, l19, l20, l21, l22

View file

@ -41,6 +41,12 @@ Device Drivers Base
.. kernel-doc:: drivers/base/class.c
:export:
.. kernel-doc:: include/linux/device/faux.h
:internal:
.. kernel-doc:: drivers/base/faux.c
:export:
.. kernel-doc:: drivers/base/node.c
:internal:

View file

@ -1524,7 +1524,8 @@ attribute-sets:
nested-attributes: bitset
-
name: hwtstamp-flags
type: u32
type: nest
nested-attributes: bitset
operations:
enum-model: directional

View file

@ -369,8 +369,8 @@ to their default.
addr.can_family = AF_CAN;
addr.can_ifindex = if_nametoindex("can0");
addr.tp.tx_id = 0x18DA42F1 | CAN_EFF_FLAG;
addr.tp.rx_id = 0x18DAF142 | CAN_EFF_FLAG;
addr.can_addr.tp.tx_id = 0x18DA42F1 | CAN_EFF_FLAG;
addr.can_addr.tp.rx_id = 0x18DAF142 | CAN_EFF_FLAG;
ret = bind(s, (struct sockaddr *)&addr, sizeof(addr));
if (ret < 0)

View file

@ -112,7 +112,7 @@ Functions
Callbacks
=========
There are six callbacks:
There are seven callbacks:
::
@ -182,6 +182,13 @@ There are six callbacks:
the length of the message. skb->len - offset may be greater
then full_len since strparser does not trim the skb.
::
int (*read_sock)(struct strparser *strp, read_descriptor_t *desc,
sk_read_actor_t recv_actor);
The read_sock callback is used by strparser instead of
sock->ops->read_sock, if provided.
::
int (*read_sock_done)(struct strparser *strp, int err);

View file

@ -308,7 +308,7 @@ an involved disclosed party. The current ambassadors list:
Google Kees Cook <keescook@chromium.org>
LLVM Nick Desaulniers <ndesaulniers@google.com>
LLVM Nick Desaulniers <nick.desaulniers+lkml@gmail.com>
============= ========================================================
If you want your organization to be added to the ambassadors list, please

View file

@ -287,7 +287,7 @@ revelada involucrada. La lista de embajadores actuales:
Google Kees Cook <keescook@chromium.org>
LLVM Nick Desaulniers <ndesaulniers@google.com>
LLVM Nick Desaulniers <nick.desaulniers+lkml@gmail.com>
============= ========================================================
Si quiere que su organización se añada a la lista de embajadores, por

View file

@ -2210,6 +2210,7 @@ F: sound/soc/codecs/ssm3515.c
ARM/APPLE MACHINE SUPPORT
M: Sven Peter <sven@svenpeter.dev>
M: Janne Grunau <j@jannau.net>
R: Alyssa Rosenzweig <alyssa@rosenzweig.io>
L: asahi@lists.linux.dev
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
@ -2284,7 +2285,7 @@ F: drivers/irqchip/irq-aspeed-i2c-ic.c
ARM/ASPEED MACHINE SUPPORT
M: Joel Stanley <joel@jms.id.au>
R: Andrew Jeffery <andrew@codeconstruct.com.au>
M: Andrew Jeffery <andrew@codeconstruct.com.au>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
L: linux-aspeed@lists.ozlabs.org (moderated for non-subscribers)
S: Supported
@ -3654,7 +3655,6 @@ F: Documentation/devicetree/bindings/phy/phy-ath79-usb.txt
F: drivers/phy/qualcomm/phy-ath79-usb.c
ATHEROS ATH GENERIC UTILITIES
M: Kalle Valo <kvalo@kernel.org>
M: Jeff Johnson <jjohnson@kernel.org>
L: linux-wireless@vger.kernel.org
S: Supported
@ -3859,13 +3859,6 @@ W: https://ez.analog.com/linux-software-drivers
F: Documentation/devicetree/bindings/pwm/adi,axi-pwmgen.yaml
F: drivers/pwm/pwm-axi-pwmgen.c
AXXIA I2C CONTROLLER
M: Krzysztof Adamski <krzysztof.adamski@nokia.com>
L: linux-i2c@vger.kernel.org
S: Maintained
F: Documentation/devicetree/bindings/i2c/i2c-axxia.txt
F: drivers/i2c/busses/i2c-axxia.c
AZ6007 DVB DRIVER
M: Mauro Carvalho Chehab <mchehab@kernel.org>
L: linux-media@vger.kernel.org
@ -5663,7 +5656,7 @@ F: .clang-format
CLANG/LLVM BUILD SUPPORT
M: Nathan Chancellor <nathan@kernel.org>
R: Nick Desaulniers <ndesaulniers@google.com>
R: Nick Desaulniers <nick.desaulniers+lkml@gmail.com>
R: Bill Wendling <morbo@google.com>
R: Justin Stitt <justinstitt@google.com>
L: llvm@lists.linux.dev
@ -7116,8 +7109,10 @@ F: rust/kernel/device.rs
F: rust/kernel/device_id.rs
F: rust/kernel/devres.rs
F: rust/kernel/driver.rs
F: rust/kernel/faux.rs
F: rust/kernel/platform.rs
F: samples/rust/rust_driver_platform.rs
F: samples/rust/rust_driver_faux.rs
DRIVERS FOR OMAP ADAPTIVE VOLTAGE SCALING (AVS)
M: Nishanth Menon <nm@ti.com>
@ -7428,7 +7423,6 @@ F: Documentation/devicetree/bindings/display/panel/novatek,nt36672a.yaml
F: drivers/gpu/drm/panel/panel-novatek-nt36672a.c
DRM DRIVER FOR NVIDIA GEFORCE/QUADRO GPUS
M: Karol Herbst <kherbst@redhat.com>
M: Lyude Paul <lyude@redhat.com>
M: Danilo Krummrich <dakr@kernel.org>
L: dri-devel@lists.freedesktop.org
@ -9833,8 +9827,7 @@ F: drivers/input/touchscreen/goodix*
GOOGLE ETHERNET DRIVERS
M: Jeroen de Borst <jeroendb@google.com>
M: Praveen Kaligineedi <pkaligineedi@google.com>
R: Shailend Chand <shailend@google.com>
M: Harshitha Ramamurthy <hramamurthy@google.com>
L: netdev@vger.kernel.org
S: Maintained
F: Documentation/networking/device_drivers/ethernet/google/gve.rst
@ -10820,7 +10813,7 @@ S: Odd Fixes
F: drivers/tty/hvc/
I2C ACPI SUPPORT
M: Mika Westerberg <mika.westerberg@linux.intel.com>
M: Mika Westerberg <westeri@kernel.org>
L: linux-i2c@vger.kernel.org
L: linux-acpi@vger.kernel.org
S: Maintained
@ -16436,7 +16429,7 @@ X: drivers/net/can/
X: drivers/net/wireless/
NETWORKING DRIVERS (WIRELESS)
M: Kalle Valo <kvalo@kernel.org>
M: Johannes Berg <johannes@sipsolutions.net>
L: linux-wireless@vger.kernel.org
S: Maintained
W: https://wireless.wiki.kernel.org/
@ -16476,6 +16469,12 @@ F: net/ethtool/cabletest.c
F: tools/testing/selftests/drivers/net/*/ethtool*
K: cable_test
NETWORKING [ETHTOOL MAC MERGE]
M: Vladimir Oltean <vladimir.oltean@nxp.com>
F: net/ethtool/mm.c
F: tools/testing/selftests/drivers/net/hw/ethtool_mm.sh
K: ethtool_mm
NETWORKING [GENERAL]
M: "David S. Miller" <davem@davemloft.net>
M: Eric Dumazet <edumazet@google.com>
@ -16507,6 +16506,7 @@ F: include/linux/netdev*
F: include/linux/netlink.h
F: include/linux/netpoll.h
F: include/linux/rtnetlink.h
F: include/linux/sctp.h
F: include/linux/seq_file_net.h
F: include/linux/skbuff*
F: include/net/
@ -16523,6 +16523,7 @@ F: include/uapi/linux/netdev*
F: include/uapi/linux/netlink.h
F: include/uapi/linux/netlink_diag.h
F: include/uapi/linux/rtnetlink.h
F: include/uapi/linux/sctp.h
F: lib/net_utils.c
F: lib/random32.c
F: net/
@ -19352,7 +19353,6 @@ Q: http://patchwork.linuxtv.org/project/linux-media/list/
F: drivers/media/tuners/qt1010*
QUALCOMM ATH12K WIRELESS DRIVER
M: Kalle Valo <kvalo@kernel.org>
M: Jeff Johnson <jjohnson@kernel.org>
L: ath12k@lists.infradead.org
S: Supported
@ -19362,7 +19362,6 @@ F: drivers/net/wireless/ath/ath12k/
N: ath12k
QUALCOMM ATHEROS ATH10K WIRELESS DRIVER
M: Kalle Valo <kvalo@kernel.org>
M: Jeff Johnson <jjohnson@kernel.org>
L: ath10k@lists.infradead.org
S: Supported
@ -19372,7 +19371,6 @@ F: drivers/net/wireless/ath/ath10k/
N: ath10k
QUALCOMM ATHEROS ATH11K WIRELESS DRIVER
M: Kalle Valo <kvalo@kernel.org>
M: Jeff Johnson <jjohnson@kernel.org>
L: ath11k@lists.infradead.org
S: Supported
@ -19507,6 +19505,15 @@ L: dmaengine@vger.kernel.org
S: Supported
F: drivers/dma/qcom/hidma*
QUALCOMM I2C QCOM GENI DRIVER
M: Mukesh Kumar Savaliya <quic_msavaliy@quicinc.com>
M: Viken Dadhaniya <quic_vdadhani@quicinc.com>
L: linux-i2c@vger.kernel.org
L: linux-arm-msm@vger.kernel.org
S: Maintained
F: Documentation/devicetree/bindings/i2c/qcom,i2c-geni-qcom.yaml
F: drivers/i2c/busses/i2c-qcom-geni.c
QUALCOMM I2C CCI DRIVER
M: Loic Poulain <loic.poulain@linaro.org>
M: Robert Foss <rfoss@kernel.org>
@ -19869,7 +19876,7 @@ F: net/rds/
F: tools/testing/selftests/net/rds/
RDT - RESOURCE ALLOCATION
M: Fenghua Yu <fenghua.yu@intel.com>
M: Tony Luck <tony.luck@intel.com>
M: Reinette Chatre <reinette.chatre@intel.com>
L: linux-kernel@vger.kernel.org
S: Supported
@ -24059,7 +24066,6 @@ F: tools/testing/selftests/ftrace/
TRACING MMIO ACCESSES (MMIOTRACE)
M: Steven Rostedt <rostedt@goodmis.org>
M: Masami Hiramatsu <mhiramat@kernel.org>
R: Karol Herbst <karolherbst@gmail.com>
R: Pekka Paalanen <ppaalanen@gmail.com>
L: linux-kernel@vger.kernel.org
L: nouveau@lists.freedesktop.org

View file

@ -2,7 +2,7 @@
VERSION = 6
PATCHLEVEL = 14
SUBLEVEL = 0
EXTRAVERSION = -rc2
EXTRAVERSION = -rc4
NAME = Baby Opossum Posse
# *DOCUMENTATION*
@ -1120,8 +1120,8 @@ LDFLAGS_vmlinux += --orphan-handling=$(CONFIG_LD_ORPHAN_WARN_LEVEL)
endif
# Align the bit size of userspace programs with the kernel
KBUILD_USERCFLAGS += $(filter -m32 -m64 --target=%, $(KBUILD_CFLAGS))
KBUILD_USERLDFLAGS += $(filter -m32 -m64 --target=%, $(KBUILD_CFLAGS))
KBUILD_USERCFLAGS += $(filter -m32 -m64 --target=%, $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS))
KBUILD_USERLDFLAGS += $(filter -m32 -m64 --target=%, $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS))
# make the checker run with the right architecture
CHECKFLAGS += --arch=$(ARCH)
@ -1421,18 +1421,13 @@ ifneq ($(wildcard $(resolve_btfids_O)),)
$(Q)$(MAKE) -sC $(srctree)/tools/bpf/resolve_btfids O=$(resolve_btfids_O) clean
endif
# Clear a bunch of variables before executing the submake
ifeq ($(quiet),silent_)
tools_silent=s
endif
tools/: FORCE
$(Q)mkdir -p $(objtree)/tools
$(Q)$(MAKE) LDFLAGS= MAKEFLAGS="$(tools_silent) $(filter --j% -j,$(MAKEFLAGS))" O=$(abspath $(objtree)) subdir=tools -C $(srctree)/tools/
$(Q)$(MAKE) LDFLAGS= O=$(abspath $(objtree)) subdir=tools -C $(srctree)/tools/
tools/%: FORCE
$(Q)mkdir -p $(objtree)/tools
$(Q)$(MAKE) LDFLAGS= MAKEFLAGS="$(tools_silent) $(filter --j% -j,$(MAKEFLAGS))" O=$(abspath $(objtree)) subdir=tools -C $(srctree)/tools/ $*
$(Q)$(MAKE) LDFLAGS= O=$(abspath $(objtree)) subdir=tools -C $(srctree)/tools/ $*
# ---------------------------------------------------------------------------
# Kernel selftest

View file

@ -135,7 +135,7 @@ struct crb_struct {
/* virtual->physical map */
unsigned long map_entries;
unsigned long map_pages;
struct vf_map_struct map[1];
struct vf_map_struct map[];
};
struct memclust_struct {

View file

@ -42,6 +42,8 @@ struct pt_regs {
unsigned long trap_a0;
unsigned long trap_a1;
unsigned long trap_a2;
/* This makes the stack 16-byte aligned as GCC expects */
unsigned long __pad0;
/* These are saved by PAL-code: */
unsigned long ps;
unsigned long pc;

View file

@ -19,9 +19,13 @@ static void __used foo(void)
DEFINE(TI_STATUS, offsetof(struct thread_info, status));
BLANK();
DEFINE(SP_OFF, offsetof(struct pt_regs, ps));
DEFINE(SIZEOF_PT_REGS, sizeof(struct pt_regs));
BLANK();
DEFINE(SWITCH_STACK_SIZE, sizeof(struct switch_stack));
BLANK();
DEFINE(HAE_CACHE, offsetof(struct alpha_machine_vector, hae_cache));
DEFINE(HAE_REG, offsetof(struct alpha_machine_vector, hae_register));
}

View file

@ -15,10 +15,6 @@
.set noat
.cfi_sections .debug_frame
/* Stack offsets. */
#define SP_OFF 184
#define SWITCH_STACK_SIZE 64
.macro CFI_START_OSF_FRAME func
.align 4
.globl \func
@ -198,8 +194,8 @@ CFI_END_OSF_FRAME entArith
CFI_START_OSF_FRAME entMM
SAVE_ALL
/* save $9 - $15 so the inline exception code can manipulate them. */
subq $sp, 56, $sp
.cfi_adjust_cfa_offset 56
subq $sp, 64, $sp
.cfi_adjust_cfa_offset 64
stq $9, 0($sp)
stq $10, 8($sp)
stq $11, 16($sp)
@ -214,7 +210,7 @@ CFI_START_OSF_FRAME entMM
.cfi_rel_offset $13, 32
.cfi_rel_offset $14, 40
.cfi_rel_offset $15, 48
addq $sp, 56, $19
addq $sp, 64, $19
/* handle the fault */
lda $8, 0x3fff
bic $sp, $8, $8
@ -227,7 +223,7 @@ CFI_START_OSF_FRAME entMM
ldq $13, 32($sp)
ldq $14, 40($sp)
ldq $15, 48($sp)
addq $sp, 56, $sp
addq $sp, 64, $sp
.cfi_restore $9
.cfi_restore $10
.cfi_restore $11
@ -235,7 +231,7 @@ CFI_START_OSF_FRAME entMM
.cfi_restore $13
.cfi_restore $14
.cfi_restore $15
.cfi_adjust_cfa_offset -56
.cfi_adjust_cfa_offset -64
/* finish up the syscall as normal. */
br ret_from_sys_call
CFI_END_OSF_FRAME entMM
@ -382,8 +378,8 @@ entUnaUser:
.cfi_restore $0
.cfi_adjust_cfa_offset -256
SAVE_ALL /* setup normal kernel stack */
lda $sp, -56($sp)
.cfi_adjust_cfa_offset 56
lda $sp, -64($sp)
.cfi_adjust_cfa_offset 64
stq $9, 0($sp)
stq $10, 8($sp)
stq $11, 16($sp)
@ -399,7 +395,7 @@ entUnaUser:
.cfi_rel_offset $14, 40
.cfi_rel_offset $15, 48
lda $8, 0x3fff
addq $sp, 56, $19
addq $sp, 64, $19
bic $sp, $8, $8
jsr $26, do_entUnaUser
ldq $9, 0($sp)
@ -409,7 +405,7 @@ entUnaUser:
ldq $13, 32($sp)
ldq $14, 40($sp)
ldq $15, 48($sp)
lda $sp, 56($sp)
lda $sp, 64($sp)
.cfi_restore $9
.cfi_restore $10
.cfi_restore $11
@ -417,7 +413,7 @@ entUnaUser:
.cfi_restore $13
.cfi_restore $14
.cfi_restore $15
.cfi_adjust_cfa_offset -56
.cfi_adjust_cfa_offset -64
br ret_from_sys_call
CFI_END_OSF_FRAME entUna

View file

@ -13,6 +13,7 @@
#include <linux/log2.h>
#include <linux/dma-map-ops.h>
#include <linux/iommu-helper.h>
#include <linux/string_choices.h>
#include <asm/io.h>
#include <asm/hwrpb.h>
@ -212,7 +213,7 @@ static int pci_dac_dma_supported(struct pci_dev *dev, u64 mask)
/* If both conditions above are met, we are fine. */
DBGA("pci_dac_dma_supported %s from %ps\n",
ok ? "yes" : "no", __builtin_return_address(0));
str_yes_no(ok), __builtin_return_address(0));
return ok;
}

View file

@ -649,7 +649,7 @@ s_reg_to_mem (unsigned long s_reg)
static int unauser_reg_offsets[32] = {
R(r0), R(r1), R(r2), R(r3), R(r4), R(r5), R(r6), R(r7), R(r8),
/* r9 ... r15 are stored in front of regs. */
-56, -48, -40, -32, -24, -16, -8,
-64, -56, -48, -40, -32, -24, -16, /* padding at -8 */
R(r16), R(r17), R(r18),
R(r19), R(r20), R(r21), R(r22), R(r23), R(r24), R(r25), R(r26),
R(r27), R(r28), R(gp),

View file

@ -78,8 +78,8 @@ __load_new_mm_context(struct mm_struct *next_mm)
/* Macro for exception fixup code to access integer registers. */
#define dpf_reg(r) \
(((unsigned long *)regs)[(r) <= 8 ? (r) : (r) <= 15 ? (r)-16 : \
(r) <= 18 ? (r)+10 : (r)-10])
(((unsigned long *)regs)[(r) <= 8 ? (r) : (r) <= 15 ? (r)-17 : \
(r) <= 18 ? (r)+11 : (r)-10])
asmlinkage void
do_page_fault(unsigned long address, unsigned long mmcsr,

View file

@ -225,7 +225,6 @@ config ARM64
select HAVE_FUNCTION_ERROR_INJECTION
select HAVE_FUNCTION_GRAPH_FREGS
select HAVE_FUNCTION_GRAPH_TRACER
select HAVE_FUNCTION_GRAPH_RETVAL
select HAVE_GCC_PLUGINS
select HAVE_HARDLOCKUP_DETECTOR_PERF if PERF_EVENTS && \
HW_PERF_EVENTS && HAVE_PERF_EVENTS_NMI

View file

@ -48,7 +48,11 @@ KBUILD_CFLAGS += $(CC_FLAGS_NO_FPU) \
KBUILD_CFLAGS += $(call cc-disable-warning, psabi)
KBUILD_AFLAGS += $(compat_vdso)
ifeq ($(call test-ge, $(CONFIG_RUSTC_VERSION), 108500),y)
KBUILD_RUSTFLAGS += --target=aarch64-unknown-none-softfloat
else
KBUILD_RUSTFLAGS += --target=aarch64-unknown-none -Ctarget-feature="-neon"
endif
KBUILD_CFLAGS += $(call cc-option,-mabi=lp64)
KBUILD_AFLAGS += $(call cc-option,-mabi=lp64)

View file

@ -226,7 +226,6 @@
};
&uart5 {
pinctrl-0 = <&uart5_xfer>;
rts-gpios = <&gpio0 RK_PB5 GPIO_ACTIVE_HIGH>;
status = "okay";
};

View file

@ -396,6 +396,12 @@
status = "okay";
};
&uart5 {
/delete-property/ dmas;
/delete-property/ dma-names;
pinctrl-0 = <&uart5_xfer>;
};
/* Mule UCAN */
&usb_host0_ehci {
status = "okay";

View file

@ -17,8 +17,7 @@
&gmac2io {
phy-handle = <&yt8531c>;
tx_delay = <0x19>;
rx_delay = <0x05>;
phy-mode = "rgmii-id";
status = "okay";
mdio {

View file

@ -15,6 +15,7 @@
&gmac2io {
phy-handle = <&rtl8211e>;
phy-mode = "rgmii";
tx_delay = <0x24>;
rx_delay = <0x18>;
status = "okay";

View file

@ -109,7 +109,6 @@
assigned-clocks = <&cru SCLK_MAC2IO>, <&cru SCLK_MAC2IO_EXT>;
assigned-clock-parents = <&gmac_clk>, <&gmac_clk>;
clock_in_out = "input";
phy-mode = "rgmii";
phy-supply = <&vcc_io>;
pinctrl-0 = <&rgmiim1_pins>;
pinctrl-names = "default";

View file

@ -22,11 +22,11 @@
};
/* EC turns on w/ pp900_usb_en */
pp900_usb: pp900-ap {
pp900_usb: regulator-pp900-ap {
};
/* EC turns on w/ pp900_pcie_en */
pp900_pcie: pp900-ap {
pp900_pcie: regulator-pp900-ap {
};
pp3000: regulator-pp3000 {
@ -126,7 +126,7 @@
};
/* Always on; plain and simple */
pp3000_ap: pp3000_emmc: pp3000 {
pp3000_ap: pp3000_emmc: regulator-pp3000 {
};
pp1500_ap_io: regulator-pp1500-ap-io {
@ -160,7 +160,7 @@
};
/* EC turns on w/ pp3300_usb_en_l */
pp3300_usb: pp3300 {
pp3300_usb: regulator-pp3300 {
};
/* gpio is shared with pp1800_pcie and pinctrl is set there */

View file

@ -92,7 +92,7 @@
};
/* EC turns on pp1800_s3_en */
pp1800_s3: pp1800 {
pp1800_s3: regulator-pp1800 {
};
/* pp3300 children, sorted by name */
@ -109,11 +109,11 @@
};
/* EC turns on pp3300_s0_en */
pp3300_s0: pp3300 {
pp3300_s0: regulator-pp3300 {
};
/* EC turns on pp3300_s3_en */
pp3300_s3: pp3300 {
pp3300_s3: regulator-pp3300 {
};
/*

View file

@ -189,39 +189,39 @@
};
/* EC turns on w/ pp900_ddrpll_en */
pp900_ddrpll: pp900-ap {
pp900_ddrpll: regulator-pp900-ap {
};
/* EC turns on w/ pp900_pll_en */
pp900_pll: pp900-ap {
pp900_pll: regulator-pp900-ap {
};
/* EC turns on w/ pp900_pmu_en */
pp900_pmu: pp900-ap {
pp900_pmu: regulator-pp900-ap {
};
/* EC turns on w/ pp1800_s0_en_l */
pp1800_ap_io: pp1800_emmc: pp1800_nfc: pp1800_s0: pp1800 {
pp1800_ap_io: pp1800_emmc: pp1800_nfc: pp1800_s0: regulator-pp1800 {
};
/* EC turns on w/ pp1800_avdd_en_l */
pp1800_avdd: pp1800 {
pp1800_avdd: regulator-pp1800 {
};
/* EC turns on w/ pp1800_lid_en_l */
pp1800_lid: pp1800_mic: pp1800 {
pp1800_lid: pp1800_mic: regulator-pp1800 {
};
/* EC turns on w/ lpddr_pwr_en */
pp1800_lpddr: pp1800 {
pp1800_lpddr: regulator-pp1800 {
};
/* EC turns on w/ pp1800_pmu_en_l */
pp1800_pmu: pp1800 {
pp1800_pmu: regulator-pp1800 {
};
/* EC turns on w/ pp1800_usb_en_l */
pp1800_usb: pp1800 {
pp1800_usb: regulator-pp1800 {
};
pp3000_sd_slot: regulator-pp3000-sd-slot {
@ -259,11 +259,11 @@
};
/* EC turns on w/ pp3300_trackpad_en_l */
pp3300_trackpad: pp3300-trackpad {
pp3300_trackpad: regulator-pp3300-trackpad {
};
/* EC turns on w/ usb_a_en */
pp5000_usb_a_vbus: pp5000 {
pp5000_usb_a_vbus: regulator-pp5000 {
};
ap_rtc_clk: ap-rtc-clk {

View file

@ -549,10 +549,10 @@
mmu600_pcie: iommu@fc900000 {
compatible = "arm,smmu-v3";
reg = <0x0 0xfc900000 0x0 0x200000>;
interrupts = <GIC_SPI 369 IRQ_TYPE_LEVEL_HIGH 0>,
<GIC_SPI 371 IRQ_TYPE_LEVEL_HIGH 0>,
<GIC_SPI 374 IRQ_TYPE_LEVEL_HIGH 0>,
<GIC_SPI 367 IRQ_TYPE_LEVEL_HIGH 0>;
interrupts = <GIC_SPI 369 IRQ_TYPE_EDGE_RISING 0>,
<GIC_SPI 371 IRQ_TYPE_EDGE_RISING 0>,
<GIC_SPI 374 IRQ_TYPE_EDGE_RISING 0>,
<GIC_SPI 367 IRQ_TYPE_EDGE_RISING 0>;
interrupt-names = "eventq", "gerror", "priq", "cmdq-sync";
#iommu-cells = <1>;
};
@ -560,10 +560,10 @@
mmu600_php: iommu@fcb00000 {
compatible = "arm,smmu-v3";
reg = <0x0 0xfcb00000 0x0 0x200000>;
interrupts = <GIC_SPI 381 IRQ_TYPE_LEVEL_HIGH 0>,
<GIC_SPI 383 IRQ_TYPE_LEVEL_HIGH 0>,
<GIC_SPI 386 IRQ_TYPE_LEVEL_HIGH 0>,
<GIC_SPI 379 IRQ_TYPE_LEVEL_HIGH 0>;
interrupts = <GIC_SPI 381 IRQ_TYPE_EDGE_RISING 0>,
<GIC_SPI 383 IRQ_TYPE_EDGE_RISING 0>,
<GIC_SPI 386 IRQ_TYPE_EDGE_RISING 0>,
<GIC_SPI 379 IRQ_TYPE_EDGE_RISING 0>;
interrupt-names = "eventq", "gerror", "priq", "cmdq-sync";
#iommu-cells = <1>;
status = "disabled";
@ -2668,9 +2668,9 @@
rockchip,hw-tshut-temp = <120000>;
rockchip,hw-tshut-mode = <0>; /* tshut mode 0:CRU 1:GPIO */
rockchip,hw-tshut-polarity = <0>; /* tshut polarity 0:LOW 1:HIGH */
pinctrl-0 = <&tsadc_gpio_func>;
pinctrl-1 = <&tsadc_shut>;
pinctrl-names = "gpio", "otpout";
pinctrl-0 = <&tsadc_shut_org>;
pinctrl-1 = <&tsadc_gpio_func>;
pinctrl-names = "default", "sleep";
#thermal-sensor-cells = <1>;
status = "disabled";
};

View file

@ -113,7 +113,7 @@
compatible = "regulator-fixed";
regulator-name = "vcc3v3_lcd";
enable-active-high;
gpio = <&gpio1 RK_PC4 GPIO_ACTIVE_HIGH>;
gpio = <&gpio0 RK_PC4 GPIO_ACTIVE_HIGH>;
pinctrl-names = "default";
pinctrl-0 = <&lcdpwr_en>;
vin-supply = <&vcc3v3_sys>;
@ -241,7 +241,7 @@
&pinctrl {
lcd {
lcdpwr_en: lcdpwr-en {
rockchip,pins = <1 RK_PC4 RK_FUNC_GPIO &pcfg_pull_down>;
rockchip,pins = <0 RK_PC4 RK_FUNC_GPIO &pcfg_pull_down>;
};
bl_en: bl-en {

View file

@ -213,7 +213,6 @@
interrupt-names = "sys", "pmc", "msg", "legacy", "err",
"dma0", "dma1", "dma2", "dma3";
max-link-speed = <3>;
iommus = <&mmu600_pcie 0x0000>;
num-lanes = <4>;
phys = <&pcie30phy>;
phy-names = "pcie-phy";

View file

@ -23,3 +23,7 @@
vpcie3v3-supply = <&vcc3v3_pcie30>;
status = "okay";
};
&mmu600_pcie {
status = "disabled";
};

View file

@ -1551,6 +1551,8 @@ CONFIG_PWM_VISCONTI=m
CONFIG_SL28CPLD_INTC=y
CONFIG_QCOM_PDC=y
CONFIG_QCOM_MPM=y
CONFIG_TI_SCI_INTR_IRQCHIP=y
CONFIG_TI_SCI_INTA_IRQCHIP=y
CONFIG_RESET_GPIO=m
CONFIG_RESET_IMX7=y
CONFIG_RESET_QCOM_AOSS=y

View file

@ -605,48 +605,6 @@ static __always_inline void kvm_incr_pc(struct kvm_vcpu *vcpu)
__cpacr_to_cptr_set(clr, set));\
} while (0)
static __always_inline void kvm_write_cptr_el2(u64 val)
{
if (has_vhe() || has_hvhe())
write_sysreg(val, cpacr_el1);
else
write_sysreg(val, cptr_el2);
}
/* Resets the value of cptr_el2 when returning to the host. */
static __always_inline void __kvm_reset_cptr_el2(struct kvm *kvm)
{
u64 val;
if (has_vhe()) {
val = (CPACR_EL1_FPEN | CPACR_EL1_ZEN_EL1EN);
if (cpus_have_final_cap(ARM64_SME))
val |= CPACR_EL1_SMEN_EL1EN;
} else if (has_hvhe()) {
val = CPACR_EL1_FPEN;
if (!kvm_has_sve(kvm) || !guest_owns_fp_regs())
val |= CPACR_EL1_ZEN;
if (cpus_have_final_cap(ARM64_SME))
val |= CPACR_EL1_SMEN;
} else {
val = CPTR_NVHE_EL2_RES1;
if (kvm_has_sve(kvm) && guest_owns_fp_regs())
val |= CPTR_EL2_TZ;
if (!cpus_have_final_cap(ARM64_SME))
val |= CPTR_EL2_TSM;
}
kvm_write_cptr_el2(val);
}
#ifdef __KVM_NVHE_HYPERVISOR__
#define kvm_reset_cptr_el2(v) __kvm_reset_cptr_el2(kern_hyp_va((v)->kvm))
#else
#define kvm_reset_cptr_el2(v) __kvm_reset_cptr_el2((v)->kvm)
#endif
/*
* Returns a 'sanitised' view of CPTR_EL2, translating from nVHE to the VHE
* format if E2H isn't set.

View file

@ -100,7 +100,7 @@ static inline void push_hyp_memcache(struct kvm_hyp_memcache *mc,
static inline void *pop_hyp_memcache(struct kvm_hyp_memcache *mc,
void *(*to_va)(phys_addr_t phys))
{
phys_addr_t *p = to_va(mc->head);
phys_addr_t *p = to_va(mc->head & PAGE_MASK);
if (!mc->nr_pages)
return NULL;
@ -615,8 +615,6 @@ struct cpu_sve_state {
struct kvm_host_data {
#define KVM_HOST_DATA_FLAG_HAS_SPE 0
#define KVM_HOST_DATA_FLAG_HAS_TRBE 1
#define KVM_HOST_DATA_FLAG_HOST_SVE_ENABLED 2
#define KVM_HOST_DATA_FLAG_HOST_SME_ENABLED 3
#define KVM_HOST_DATA_FLAG_TRBE_ENABLED 4
#define KVM_HOST_DATA_FLAG_EL1_TRACING_CONFIGURED 5
unsigned long flags;
@ -624,23 +622,13 @@ struct kvm_host_data {
struct kvm_cpu_context host_ctxt;
/*
* All pointers in this union are hyp VA.
* Hyp VA.
* sve_state is only used in pKVM and if system_supports_sve().
*/
union {
struct user_fpsimd_state *fpsimd_state;
struct cpu_sve_state *sve_state;
};
struct cpu_sve_state *sve_state;
union {
/* HYP VA pointer to the host storage for FPMR */
u64 *fpmr_ptr;
/*
* Used by pKVM only, as it needs to provide storage
* for the host
*/
u64 fpmr;
};
/* Used by pKVM only. */
u64 fpmr;
/* Ownership of the FP regs */
enum {

View file

@ -101,16 +101,18 @@ int populate_cache_leaves(unsigned int cpu)
unsigned int level, idx;
enum cache_type type;
struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
struct cacheinfo *this_leaf = this_cpu_ci->info_list;
struct cacheinfo *infos = this_cpu_ci->info_list;
for (idx = 0, level = 1; level <= this_cpu_ci->num_levels &&
idx < this_cpu_ci->num_leaves; idx++, level++) {
idx < this_cpu_ci->num_leaves; level++) {
type = get_cache_type(level);
if (type == CACHE_TYPE_SEPARATE) {
ci_leaf_init(this_leaf++, CACHE_TYPE_DATA, level);
ci_leaf_init(this_leaf++, CACHE_TYPE_INST, level);
if (idx + 1 >= this_cpu_ci->num_leaves)
break;
ci_leaf_init(&infos[idx++], CACHE_TYPE_DATA, level);
ci_leaf_init(&infos[idx++], CACHE_TYPE_INST, level);
} else {
ci_leaf_init(this_leaf++, type, level);
ci_leaf_init(&infos[idx++], type, level);
}
}
return 0;

View file

@ -3091,6 +3091,7 @@ static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = {
HWCAP_CAP(ID_AA64ISAR0_EL1, TS, FLAGM, CAP_HWCAP, KERNEL_HWCAP_FLAGM),
HWCAP_CAP(ID_AA64ISAR0_EL1, TS, FLAGM2, CAP_HWCAP, KERNEL_HWCAP_FLAGM2),
HWCAP_CAP(ID_AA64ISAR0_EL1, RNDR, IMP, CAP_HWCAP, KERNEL_HWCAP_RNG),
HWCAP_CAP(ID_AA64ISAR3_EL1, FPRCVT, IMP, CAP_HWCAP, KERNEL_HWCAP_FPRCVT),
HWCAP_CAP(ID_AA64PFR0_EL1, FP, IMP, CAP_HWCAP, KERNEL_HWCAP_FP),
HWCAP_CAP(ID_AA64PFR0_EL1, FP, FP16, CAP_HWCAP, KERNEL_HWCAP_FPHP),
HWCAP_CAP(ID_AA64PFR0_EL1, AdvSIMD, IMP, CAP_HWCAP, KERNEL_HWCAP_ASIMD),
@ -3180,8 +3181,6 @@ static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = {
HWCAP_CAP(ID_AA64SMFR0_EL1, SF8FMA, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_SF8FMA),
HWCAP_CAP(ID_AA64SMFR0_EL1, SF8DP4, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_SF8DP4),
HWCAP_CAP(ID_AA64SMFR0_EL1, SF8DP2, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_SF8DP2),
HWCAP_CAP(ID_AA64SMFR0_EL1, SF8MM8, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_SF8MM8),
HWCAP_CAP(ID_AA64SMFR0_EL1, SF8MM4, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_SF8MM4),
HWCAP_CAP(ID_AA64SMFR0_EL1, SBitPerm, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_SBITPERM),
HWCAP_CAP(ID_AA64SMFR0_EL1, AES, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_AES),
HWCAP_CAP(ID_AA64SMFR0_EL1, SFEXPA, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_SFEXPA),
@ -3192,6 +3191,8 @@ static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = {
HWCAP_CAP(ID_AA64FPFR0_EL1, F8FMA, IMP, CAP_HWCAP, KERNEL_HWCAP_F8FMA),
HWCAP_CAP(ID_AA64FPFR0_EL1, F8DP4, IMP, CAP_HWCAP, KERNEL_HWCAP_F8DP4),
HWCAP_CAP(ID_AA64FPFR0_EL1, F8DP2, IMP, CAP_HWCAP, KERNEL_HWCAP_F8DP2),
HWCAP_CAP(ID_AA64FPFR0_EL1, F8MM8, IMP, CAP_HWCAP, KERNEL_HWCAP_F8MM8),
HWCAP_CAP(ID_AA64FPFR0_EL1, F8MM4, IMP, CAP_HWCAP, KERNEL_HWCAP_F8MM4),
HWCAP_CAP(ID_AA64FPFR0_EL1, F8E4M3, IMP, CAP_HWCAP, KERNEL_HWCAP_F8E4M3),
HWCAP_CAP(ID_AA64FPFR0_EL1, F8E5M2, IMP, CAP_HWCAP, KERNEL_HWCAP_F8E5M2),
#ifdef CONFIG_ARM64_POE

View file

@ -1694,31 +1694,6 @@ void fpsimd_signal_preserve_current_state(void)
sve_to_fpsimd(current);
}
/*
* Called by KVM when entering the guest.
*/
void fpsimd_kvm_prepare(void)
{
if (!system_supports_sve())
return;
/*
* KVM does not save host SVE state since we can only enter
* the guest from a syscall so the ABI means that only the
* non-saved SVE state needs to be saved. If we have left
* SVE enabled for performance reasons then update the task
* state to be FPSIMD only.
*/
get_cpu_fpsimd_context();
if (test_and_clear_thread_flag(TIF_SVE)) {
sve_to_fpsimd(current);
current->thread.fp_type = FP_STATE_FPSIMD;
}
put_cpu_fpsimd_context();
}
/*
* Associate current's FPSIMD context with this cpu
* The caller must have ownership of the cpu FPSIMD context before calling

View file

@ -194,12 +194,19 @@ static void amu_fie_setup(const struct cpumask *cpus)
int cpu;
/* We are already set since the last insmod of cpufreq driver */
if (unlikely(cpumask_subset(cpus, amu_fie_cpus)))
if (cpumask_available(amu_fie_cpus) &&
unlikely(cpumask_subset(cpus, amu_fie_cpus)))
return;
for_each_cpu(cpu, cpus) {
for_each_cpu(cpu, cpus)
if (!freq_counters_valid(cpu))
return;
if (!cpumask_available(amu_fie_cpus) &&
!zalloc_cpumask_var(&amu_fie_cpus, GFP_KERNEL)) {
WARN_ONCE(1, "Failed to allocate FIE cpumask for CPUs[%*pbl]\n",
cpumask_pr_args(cpus));
return;
}
cpumask_or(amu_fie_cpus, amu_fie_cpus, cpus);
@ -237,17 +244,8 @@ static struct notifier_block init_amu_fie_notifier = {
static int __init init_amu_fie(void)
{
int ret;
if (!zalloc_cpumask_var(&amu_fie_cpus, GFP_KERNEL))
return -ENOMEM;
ret = cpufreq_register_notifier(&init_amu_fie_notifier,
return cpufreq_register_notifier(&init_amu_fie_notifier,
CPUFREQ_POLICY_NOTIFIER);
if (ret)
free_cpumask_var(amu_fie_cpus);
return ret;
}
core_initcall(init_amu_fie);

View file

@ -41,6 +41,7 @@ SECTIONS
*/
/DISCARD/ : {
*(.note.GNU-stack .note.gnu.property)
*(.ARM.attributes)
}
.note : { *(.note.*) } :text :note

View file

@ -162,6 +162,7 @@ SECTIONS
/DISCARD/ : {
*(.interp .dynamic)
*(.dynsym .dynstr .hash .gnu.hash)
*(.ARM.attributes)
}
. = KIMAGE_VADDR;

View file

@ -447,21 +447,19 @@ static void kvm_timer_update_status(struct arch_timer_context *ctx, bool level)
static void kvm_timer_update_irq(struct kvm_vcpu *vcpu, bool new_level,
struct arch_timer_context *timer_ctx)
{
int ret;
kvm_timer_update_status(timer_ctx, new_level);
timer_ctx->irq.level = new_level;
trace_kvm_timer_update_irq(vcpu->vcpu_id, timer_irq(timer_ctx),
timer_ctx->irq.level);
if (!userspace_irqchip(vcpu->kvm)) {
ret = kvm_vgic_inject_irq(vcpu->kvm, vcpu,
timer_irq(timer_ctx),
timer_ctx->irq.level,
timer_ctx);
WARN_ON(ret);
}
if (userspace_irqchip(vcpu->kvm))
return;
kvm_vgic_inject_irq(vcpu->kvm, vcpu,
timer_irq(timer_ctx),
timer_ctx->irq.level,
timer_ctx);
}
/* Only called for a fully emulated timer */

View file

@ -2481,14 +2481,6 @@ static void finalize_init_hyp_mode(void)
per_cpu_ptr_nvhe_sym(kvm_host_data, cpu)->sve_state =
kern_hyp_va(sve_state);
}
} else {
for_each_possible_cpu(cpu) {
struct user_fpsimd_state *fpsimd_state;
fpsimd_state = &per_cpu_ptr_nvhe_sym(kvm_host_data, cpu)->host_ctxt.fp_regs;
per_cpu_ptr_nvhe_sym(kvm_host_data, cpu)->fpsimd_state =
kern_hyp_va(fpsimd_state);
}
}
}

View file

@ -54,50 +54,18 @@ void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu)
if (!system_supports_fpsimd())
return;
fpsimd_kvm_prepare();
/*
* We will check TIF_FOREIGN_FPSTATE just before entering the
* guest in kvm_arch_vcpu_ctxflush_fp() and override this to
* FP_STATE_FREE if the flag set.
* Ensure that any host FPSIMD/SVE/SME state is saved and unbound such
* that the host kernel is responsible for restoring this state upon
* return to userspace, and the hyp code doesn't need to save anything.
*
* When the host may use SME, fpsimd_save_and_flush_cpu_state() ensures
* that PSTATE.{SM,ZA} == {0,0}.
*/
*host_data_ptr(fp_owner) = FP_STATE_HOST_OWNED;
*host_data_ptr(fpsimd_state) = kern_hyp_va(&current->thread.uw.fpsimd_state);
*host_data_ptr(fpmr_ptr) = kern_hyp_va(&current->thread.uw.fpmr);
fpsimd_save_and_flush_cpu_state();
*host_data_ptr(fp_owner) = FP_STATE_FREE;
host_data_clear_flag(HOST_SVE_ENABLED);
if (read_sysreg(cpacr_el1) & CPACR_EL1_ZEN_EL0EN)
host_data_set_flag(HOST_SVE_ENABLED);
if (system_supports_sme()) {
host_data_clear_flag(HOST_SME_ENABLED);
if (read_sysreg(cpacr_el1) & CPACR_EL1_SMEN_EL0EN)
host_data_set_flag(HOST_SME_ENABLED);
/*
* If PSTATE.SM is enabled then save any pending FP
* state and disable PSTATE.SM. If we leave PSTATE.SM
* enabled and the guest does not enable SME via
* CPACR_EL1.SMEN then operations that should be valid
* may generate SME traps from EL1 to EL1 which we
* can't intercept and which would confuse the guest.
*
* Do the same for PSTATE.ZA in the case where there
* is state in the registers which has not already
* been saved, this is very unlikely to happen.
*/
if (read_sysreg_s(SYS_SVCR) & (SVCR_SM_MASK | SVCR_ZA_MASK)) {
*host_data_ptr(fp_owner) = FP_STATE_FREE;
fpsimd_save_and_flush_cpu_state();
}
}
/*
* If normal guests gain SME support, maintain this behavior for pKVM
* guests, which don't support SME.
*/
WARN_ON(is_protected_kvm_enabled() && system_supports_sme() &&
read_sysreg_s(SYS_SVCR));
WARN_ON_ONCE(system_supports_sme() && read_sysreg_s(SYS_SVCR));
}
/*
@ -162,52 +130,7 @@ void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu)
local_irq_save(flags);
/*
* If we have VHE then the Hyp code will reset CPACR_EL1 to
* the default value and we need to reenable SME.
*/
if (has_vhe() && system_supports_sme()) {
/* Also restore EL0 state seen on entry */
if (host_data_test_flag(HOST_SME_ENABLED))
sysreg_clear_set(CPACR_EL1, 0, CPACR_EL1_SMEN);
else
sysreg_clear_set(CPACR_EL1,
CPACR_EL1_SMEN_EL0EN,
CPACR_EL1_SMEN_EL1EN);
isb();
}
if (guest_owns_fp_regs()) {
if (vcpu_has_sve(vcpu)) {
u64 zcr = read_sysreg_el1(SYS_ZCR);
/*
* If the vCPU is in the hyp context then ZCR_EL1 is
* loaded with its vEL2 counterpart.
*/
__vcpu_sys_reg(vcpu, vcpu_sve_zcr_elx(vcpu)) = zcr;
/*
* Restore the VL that was saved when bound to the CPU,
* which is the maximum VL for the guest. Because the
* layout of the data when saving the sve state depends
* on the VL, we need to use a consistent (i.e., the
* maximum) VL.
* Note that this means that at guest exit ZCR_EL1 is
* not necessarily the same as on guest entry.
*
* ZCR_EL2 holds the guest hypervisor's VL when running
* a nested guest, which could be smaller than the
* max for the vCPU. Similar to above, we first need to
* switch to a VL consistent with the layout of the
* vCPU's SVE state. KVM support for NV implies VHE, so
* using the ZCR_EL1 alias is safe.
*/
if (!has_vhe() || (vcpu_has_nv(vcpu) && !is_hyp_ctxt(vcpu)))
sve_cond_update_zcr_vq(vcpu_sve_max_vq(vcpu) - 1,
SYS_ZCR_EL1);
}
/*
* Flush (save and invalidate) the fpsimd/sve state so that if
* the host tries to use fpsimd/sve, it's not using stale data
@ -219,18 +142,6 @@ void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu)
* when needed.
*/
fpsimd_save_and_flush_cpu_state();
} else if (has_vhe() && system_supports_sve()) {
/*
* The FPSIMD/SVE state in the CPU has not been touched, and we
* have SVE (and VHE): CPACR_EL1 (alias CPTR_EL2) has been
* reset by kvm_reset_cptr_el2() in the Hyp code, disabling SVE
* for EL0. To avoid spurious traps, restore the trap state
* seen by kvm_arch_vcpu_load_fp():
*/
if (host_data_test_flag(HOST_SVE_ENABLED))
sysreg_clear_set(CPACR_EL1, 0, CPACR_EL1_ZEN_EL0EN);
else
sysreg_clear_set(CPACR_EL1, CPACR_EL1_ZEN_EL0EN, 0);
}
local_irq_restore(flags);

View file

@ -44,6 +44,11 @@ alternative_if ARM64_HAS_RAS_EXTN
alternative_else_nop_endif
mrs x1, isr_el1
cbz x1, 1f
// Ensure that __guest_enter() always provides a context
// synchronization event so that callers don't need ISBs for anything
// that would usually be synchonized by the ERET.
isb
mov x0, #ARM_EXCEPTION_IRQ
ret

View file

@ -326,7 +326,7 @@ static inline bool __populate_fault_info(struct kvm_vcpu *vcpu)
return __get_fault_info(vcpu->arch.fault.esr_el2, &vcpu->arch.fault);
}
static bool kvm_hyp_handle_mops(struct kvm_vcpu *vcpu, u64 *exit_code)
static inline bool kvm_hyp_handle_mops(struct kvm_vcpu *vcpu, u64 *exit_code)
{
*vcpu_pc(vcpu) = read_sysreg_el2(SYS_ELR);
arm64_mops_reset_regs(vcpu_gp_regs(vcpu), vcpu->arch.fault.esr_el2);
@ -375,7 +375,87 @@ static inline void __hyp_sve_save_host(void)
true);
}
static void kvm_hyp_save_fpsimd_host(struct kvm_vcpu *vcpu);
static inline void fpsimd_lazy_switch_to_guest(struct kvm_vcpu *vcpu)
{
u64 zcr_el1, zcr_el2;
if (!guest_owns_fp_regs())
return;
if (vcpu_has_sve(vcpu)) {
/* A guest hypervisor may restrict the effective max VL. */
if (vcpu_has_nv(vcpu) && !is_hyp_ctxt(vcpu))
zcr_el2 = __vcpu_sys_reg(vcpu, ZCR_EL2);
else
zcr_el2 = vcpu_sve_max_vq(vcpu) - 1;
write_sysreg_el2(zcr_el2, SYS_ZCR);
zcr_el1 = __vcpu_sys_reg(vcpu, vcpu_sve_zcr_elx(vcpu));
write_sysreg_el1(zcr_el1, SYS_ZCR);
}
}
static inline void fpsimd_lazy_switch_to_host(struct kvm_vcpu *vcpu)
{
u64 zcr_el1, zcr_el2;
if (!guest_owns_fp_regs())
return;
/*
* When the guest owns the FP regs, we know that guest+hyp traps for
* any FPSIMD/SVE/SME features exposed to the guest have been disabled
* by either fpsimd_lazy_switch_to_guest() or kvm_hyp_handle_fpsimd()
* prior to __guest_entry(). As __guest_entry() guarantees a context
* synchronization event, we don't need an ISB here to avoid taking
* traps for anything that was exposed to the guest.
*/
if (vcpu_has_sve(vcpu)) {
zcr_el1 = read_sysreg_el1(SYS_ZCR);
__vcpu_sys_reg(vcpu, vcpu_sve_zcr_elx(vcpu)) = zcr_el1;
/*
* The guest's state is always saved using the guest's max VL.
* Ensure that the host has the guest's max VL active such that
* the host can save the guest's state lazily, but don't
* artificially restrict the host to the guest's max VL.
*/
if (has_vhe()) {
zcr_el2 = vcpu_sve_max_vq(vcpu) - 1;
write_sysreg_el2(zcr_el2, SYS_ZCR);
} else {
zcr_el2 = sve_vq_from_vl(kvm_host_sve_max_vl) - 1;
write_sysreg_el2(zcr_el2, SYS_ZCR);
zcr_el1 = vcpu_sve_max_vq(vcpu) - 1;
write_sysreg_el1(zcr_el1, SYS_ZCR);
}
}
}
static void kvm_hyp_save_fpsimd_host(struct kvm_vcpu *vcpu)
{
/*
* Non-protected kvm relies on the host restoring its sve state.
* Protected kvm restores the host's sve state as not to reveal that
* fpsimd was used by a guest nor leak upper sve bits.
*/
if (system_supports_sve()) {
__hyp_sve_save_host();
/* Re-enable SVE traps if not supported for the guest vcpu. */
if (!vcpu_has_sve(vcpu))
cpacr_clear_set(CPACR_EL1_ZEN, 0);
} else {
__fpsimd_save_state(host_data_ptr(host_ctxt.fp_regs));
}
if (kvm_has_fpmr(kern_hyp_va(vcpu->kvm)))
*host_data_ptr(fpmr) = read_sysreg_s(SYS_FPMR);
}
/*
* We trap the first access to the FP/SIMD to save the host context and
@ -383,7 +463,7 @@ static void kvm_hyp_save_fpsimd_host(struct kvm_vcpu *vcpu);
* If FP/SIMD is not implemented, handle the trap and inject an undefined
* instruction exception to the guest. Similarly for trapped SVE accesses.
*/
static bool kvm_hyp_handle_fpsimd(struct kvm_vcpu *vcpu, u64 *exit_code)
static inline bool kvm_hyp_handle_fpsimd(struct kvm_vcpu *vcpu, u64 *exit_code)
{
bool sve_guest;
u8 esr_ec;
@ -425,7 +505,7 @@ static bool kvm_hyp_handle_fpsimd(struct kvm_vcpu *vcpu, u64 *exit_code)
isb();
/* Write out the host state if it's in the registers */
if (host_owns_fp_regs())
if (is_protected_kvm_enabled() && host_owns_fp_regs())
kvm_hyp_save_fpsimd_host(vcpu);
/* Restore the guest state */
@ -501,9 +581,22 @@ static inline bool handle_tx2_tvm(struct kvm_vcpu *vcpu)
return true;
}
/* Open-coded version of timer_get_offset() to allow for kern_hyp_va() */
static inline u64 hyp_timer_get_offset(struct arch_timer_context *ctxt)
{
u64 offset = 0;
if (ctxt->offset.vm_offset)
offset += *kern_hyp_va(ctxt->offset.vm_offset);
if (ctxt->offset.vcpu_offset)
offset += *kern_hyp_va(ctxt->offset.vcpu_offset);
return offset;
}
static inline u64 compute_counter_value(struct arch_timer_context *ctxt)
{
return arch_timer_read_cntpct_el0() - timer_get_offset(ctxt);
return arch_timer_read_cntpct_el0() - hyp_timer_get_offset(ctxt);
}
static bool kvm_handle_cntxct(struct kvm_vcpu *vcpu)
@ -587,7 +680,7 @@ static bool handle_ampere1_tcr(struct kvm_vcpu *vcpu)
return true;
}
static bool kvm_hyp_handle_sysreg(struct kvm_vcpu *vcpu, u64 *exit_code)
static inline bool kvm_hyp_handle_sysreg(struct kvm_vcpu *vcpu, u64 *exit_code)
{
if (cpus_have_final_cap(ARM64_WORKAROUND_CAVIUM_TX2_219_TVM) &&
handle_tx2_tvm(vcpu))
@ -607,7 +700,7 @@ static bool kvm_hyp_handle_sysreg(struct kvm_vcpu *vcpu, u64 *exit_code)
return false;
}
static bool kvm_hyp_handle_cp15_32(struct kvm_vcpu *vcpu, u64 *exit_code)
static inline bool kvm_hyp_handle_cp15_32(struct kvm_vcpu *vcpu, u64 *exit_code)
{
if (static_branch_unlikely(&vgic_v3_cpuif_trap) &&
__vgic_v3_perform_cpuif_access(vcpu) == 1)
@ -616,19 +709,18 @@ static bool kvm_hyp_handle_cp15_32(struct kvm_vcpu *vcpu, u64 *exit_code)
return false;
}
static bool kvm_hyp_handle_memory_fault(struct kvm_vcpu *vcpu, u64 *exit_code)
static inline bool kvm_hyp_handle_memory_fault(struct kvm_vcpu *vcpu,
u64 *exit_code)
{
if (!__populate_fault_info(vcpu))
return true;
return false;
}
static bool kvm_hyp_handle_iabt_low(struct kvm_vcpu *vcpu, u64 *exit_code)
__alias(kvm_hyp_handle_memory_fault);
static bool kvm_hyp_handle_watchpt_low(struct kvm_vcpu *vcpu, u64 *exit_code)
__alias(kvm_hyp_handle_memory_fault);
#define kvm_hyp_handle_iabt_low kvm_hyp_handle_memory_fault
#define kvm_hyp_handle_watchpt_low kvm_hyp_handle_memory_fault
static bool kvm_hyp_handle_dabt_low(struct kvm_vcpu *vcpu, u64 *exit_code)
static inline bool kvm_hyp_handle_dabt_low(struct kvm_vcpu *vcpu, u64 *exit_code)
{
if (kvm_hyp_handle_memory_fault(vcpu, exit_code))
return true;
@ -658,23 +750,16 @@ static bool kvm_hyp_handle_dabt_low(struct kvm_vcpu *vcpu, u64 *exit_code)
typedef bool (*exit_handler_fn)(struct kvm_vcpu *, u64 *);
static const exit_handler_fn *kvm_get_exit_handler_array(struct kvm_vcpu *vcpu);
static void early_exit_filter(struct kvm_vcpu *vcpu, u64 *exit_code);
/*
* Allow the hypervisor to handle the exit with an exit handler if it has one.
*
* Returns true if the hypervisor handled the exit, and control should go back
* to the guest, or false if it hasn't.
*/
static inline bool kvm_hyp_handle_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
static inline bool kvm_hyp_handle_exit(struct kvm_vcpu *vcpu, u64 *exit_code,
const exit_handler_fn *handlers)
{
const exit_handler_fn *handlers = kvm_get_exit_handler_array(vcpu);
exit_handler_fn fn;
fn = handlers[kvm_vcpu_trap_get_class(vcpu)];
exit_handler_fn fn = handlers[kvm_vcpu_trap_get_class(vcpu)];
if (fn)
return fn(vcpu, exit_code);
@ -704,20 +789,9 @@ static inline void synchronize_vcpu_pstate(struct kvm_vcpu *vcpu, u64 *exit_code
* the guest, false when we should restore the host state and return to the
* main run loop.
*/
static inline bool fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
static inline bool __fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code,
const exit_handler_fn *handlers)
{
/*
* Save PSTATE early so that we can evaluate the vcpu mode
* early on.
*/
synchronize_vcpu_pstate(vcpu, exit_code);
/*
* Check whether we want to repaint the state one way or
* another.
*/
early_exit_filter(vcpu, exit_code);
if (ARM_EXCEPTION_CODE(*exit_code) != ARM_EXCEPTION_IRQ)
vcpu->arch.fault.esr_el2 = read_sysreg_el2(SYS_ESR);
@ -747,7 +821,7 @@ static inline bool fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
goto exit;
/* Check if there's an exit handler and allow it to handle the exit. */
if (kvm_hyp_handle_exit(vcpu, exit_code))
if (kvm_hyp_handle_exit(vcpu, exit_code, handlers))
goto guest;
exit:
/* Return to the host kernel and handle the exit */

View file

@ -5,6 +5,7 @@
*/
#include <hyp/adjust_pc.h>
#include <hyp/switch.h>
#include <asm/pgtable-types.h>
#include <asm/kvm_asm.h>
@ -83,7 +84,7 @@ static void fpsimd_sve_sync(struct kvm_vcpu *vcpu)
if (system_supports_sve())
__hyp_sve_restore_host();
else
__fpsimd_restore_state(*host_data_ptr(fpsimd_state));
__fpsimd_restore_state(host_data_ptr(host_ctxt.fp_regs));
if (has_fpmr)
write_sysreg_s(*host_data_ptr(fpmr), SYS_FPMR);
@ -224,8 +225,12 @@ static void handle___kvm_vcpu_run(struct kvm_cpu_context *host_ctxt)
sync_hyp_vcpu(hyp_vcpu);
} else {
struct kvm_vcpu *vcpu = kern_hyp_va(host_vcpu);
/* The host is fully trusted, run its vCPU directly. */
ret = __kvm_vcpu_run(kern_hyp_va(host_vcpu));
fpsimd_lazy_switch_to_guest(vcpu);
ret = __kvm_vcpu_run(vcpu);
fpsimd_lazy_switch_to_host(vcpu);
}
out:
cpu_reg(host_ctxt, 1) = ret;
@ -675,12 +680,6 @@ void handle_trap(struct kvm_cpu_context *host_ctxt)
case ESR_ELx_EC_SMC64:
handle_host_smc(host_ctxt);
break;
case ESR_ELx_EC_SVE:
cpacr_clear_set(0, CPACR_EL1_ZEN);
isb();
sve_cond_update_zcr_vq(sve_vq_from_vl(kvm_host_sve_max_vl) - 1,
SYS_ZCR_EL2);
break;
case ESR_ELx_EC_IABT_LOW:
case ESR_ELx_EC_DABT_LOW:
handle_host_mem_abort(host_ctxt);

View file

@ -943,10 +943,10 @@ static int __check_host_shared_guest(struct pkvm_hyp_vm *vm, u64 *__phys, u64 ip
ret = kvm_pgtable_get_leaf(&vm->pgt, ipa, &pte, &level);
if (ret)
return ret;
if (level != KVM_PGTABLE_LAST_LEVEL)
return -E2BIG;
if (!kvm_pte_valid(pte))
return -ENOENT;
if (level != KVM_PGTABLE_LAST_LEVEL)
return -E2BIG;
state = guest_get_page_state(pte, ipa);
if (state != PKVM_PAGE_SHARED_BORROWED)
@ -998,44 +998,57 @@ unlock:
return ret;
}
int __pkvm_host_relax_perms_guest(u64 gfn, struct pkvm_hyp_vcpu *vcpu, enum kvm_pgtable_prot prot)
static void assert_host_shared_guest(struct pkvm_hyp_vm *vm, u64 ipa)
{
struct pkvm_hyp_vm *vm = pkvm_hyp_vcpu_to_hyp_vm(vcpu);
u64 ipa = hyp_pfn_to_phys(gfn);
u64 phys;
int ret;
if (prot & ~KVM_PGTABLE_PROT_RWX)
return -EINVAL;
if (!IS_ENABLED(CONFIG_NVHE_EL2_DEBUG))
return;
host_lock_component();
guest_lock_component(vm);
ret = __check_host_shared_guest(vm, &phys, ipa);
if (!ret)
ret = kvm_pgtable_stage2_relax_perms(&vm->pgt, ipa, prot, 0);
guest_unlock_component(vm);
host_unlock_component();
WARN_ON(ret && ret != -ENOENT);
}
int __pkvm_host_relax_perms_guest(u64 gfn, struct pkvm_hyp_vcpu *vcpu, enum kvm_pgtable_prot prot)
{
struct pkvm_hyp_vm *vm = pkvm_hyp_vcpu_to_hyp_vm(vcpu);
u64 ipa = hyp_pfn_to_phys(gfn);
int ret;
if (pkvm_hyp_vm_is_protected(vm))
return -EPERM;
if (prot & ~KVM_PGTABLE_PROT_RWX)
return -EINVAL;
assert_host_shared_guest(vm, ipa);
guest_lock_component(vm);
ret = kvm_pgtable_stage2_relax_perms(&vm->pgt, ipa, prot, 0);
guest_unlock_component(vm);
return ret;
}
int __pkvm_host_wrprotect_guest(u64 gfn, struct pkvm_hyp_vm *vm)
{
u64 ipa = hyp_pfn_to_phys(gfn);
u64 phys;
int ret;
host_lock_component();
if (pkvm_hyp_vm_is_protected(vm))
return -EPERM;
assert_host_shared_guest(vm, ipa);
guest_lock_component(vm);
ret = __check_host_shared_guest(vm, &phys, ipa);
if (!ret)
ret = kvm_pgtable_stage2_wrprotect(&vm->pgt, ipa, PAGE_SIZE);
ret = kvm_pgtable_stage2_wrprotect(&vm->pgt, ipa, PAGE_SIZE);
guest_unlock_component(vm);
host_unlock_component();
return ret;
}
@ -1043,18 +1056,15 @@ int __pkvm_host_wrprotect_guest(u64 gfn, struct pkvm_hyp_vm *vm)
int __pkvm_host_test_clear_young_guest(u64 gfn, bool mkold, struct pkvm_hyp_vm *vm)
{
u64 ipa = hyp_pfn_to_phys(gfn);
u64 phys;
int ret;
host_lock_component();
if (pkvm_hyp_vm_is_protected(vm))
return -EPERM;
assert_host_shared_guest(vm, ipa);
guest_lock_component(vm);
ret = __check_host_shared_guest(vm, &phys, ipa);
if (!ret)
ret = kvm_pgtable_stage2_test_clear_young(&vm->pgt, ipa, PAGE_SIZE, mkold);
ret = kvm_pgtable_stage2_test_clear_young(&vm->pgt, ipa, PAGE_SIZE, mkold);
guest_unlock_component(vm);
host_unlock_component();
return ret;
}
@ -1063,18 +1073,14 @@ int __pkvm_host_mkyoung_guest(u64 gfn, struct pkvm_hyp_vcpu *vcpu)
{
struct pkvm_hyp_vm *vm = pkvm_hyp_vcpu_to_hyp_vm(vcpu);
u64 ipa = hyp_pfn_to_phys(gfn);
u64 phys;
int ret;
host_lock_component();
if (pkvm_hyp_vm_is_protected(vm))
return -EPERM;
assert_host_shared_guest(vm, ipa);
guest_lock_component(vm);
ret = __check_host_shared_guest(vm, &phys, ipa);
if (!ret)
kvm_pgtable_stage2_mkyoung(&vm->pgt, ipa, 0);
kvm_pgtable_stage2_mkyoung(&vm->pgt, ipa, 0);
guest_unlock_component(vm);
host_unlock_component();
return ret;
return 0;
}

View file

@ -39,6 +39,9 @@ static void __activate_cptr_traps(struct kvm_vcpu *vcpu)
{
u64 val = CPTR_EL2_TAM; /* Same bit irrespective of E2H */
if (!guest_owns_fp_regs())
__activate_traps_fpsimd32(vcpu);
if (has_hvhe()) {
val |= CPACR_EL1_TTA;
@ -47,6 +50,8 @@ static void __activate_cptr_traps(struct kvm_vcpu *vcpu)
if (vcpu_has_sve(vcpu))
val |= CPACR_EL1_ZEN;
}
write_sysreg(val, cpacr_el1);
} else {
val |= CPTR_EL2_TTA | CPTR_NVHE_EL2_RES1;
@ -61,12 +66,32 @@ static void __activate_cptr_traps(struct kvm_vcpu *vcpu)
if (!guest_owns_fp_regs())
val |= CPTR_EL2_TFP;
write_sysreg(val, cptr_el2);
}
}
if (!guest_owns_fp_regs())
__activate_traps_fpsimd32(vcpu);
static void __deactivate_cptr_traps(struct kvm_vcpu *vcpu)
{
if (has_hvhe()) {
u64 val = CPACR_EL1_FPEN;
kvm_write_cptr_el2(val);
if (cpus_have_final_cap(ARM64_SVE))
val |= CPACR_EL1_ZEN;
if (cpus_have_final_cap(ARM64_SME))
val |= CPACR_EL1_SMEN;
write_sysreg(val, cpacr_el1);
} else {
u64 val = CPTR_NVHE_EL2_RES1;
if (!cpus_have_final_cap(ARM64_SVE))
val |= CPTR_EL2_TZ;
if (!cpus_have_final_cap(ARM64_SME))
val |= CPTR_EL2_TSM;
write_sysreg(val, cptr_el2);
}
}
static void __activate_traps(struct kvm_vcpu *vcpu)
@ -119,7 +144,7 @@ static void __deactivate_traps(struct kvm_vcpu *vcpu)
write_sysreg(this_cpu_ptr(&kvm_init_params)->hcr_el2, hcr_el2);
kvm_reset_cptr_el2(vcpu);
__deactivate_cptr_traps(vcpu);
write_sysreg(__kvm_hyp_host_vector, vbar_el2);
}
@ -192,34 +217,6 @@ static bool kvm_handle_pvm_sys64(struct kvm_vcpu *vcpu, u64 *exit_code)
kvm_handle_pvm_sysreg(vcpu, exit_code));
}
static void kvm_hyp_save_fpsimd_host(struct kvm_vcpu *vcpu)
{
/*
* Non-protected kvm relies on the host restoring its sve state.
* Protected kvm restores the host's sve state as not to reveal that
* fpsimd was used by a guest nor leak upper sve bits.
*/
if (unlikely(is_protected_kvm_enabled() && system_supports_sve())) {
__hyp_sve_save_host();
/* Re-enable SVE traps if not supported for the guest vcpu. */
if (!vcpu_has_sve(vcpu))
cpacr_clear_set(CPACR_EL1_ZEN, 0);
} else {
__fpsimd_save_state(*host_data_ptr(fpsimd_state));
}
if (kvm_has_fpmr(kern_hyp_va(vcpu->kvm))) {
u64 val = read_sysreg_s(SYS_FPMR);
if (unlikely(is_protected_kvm_enabled()))
*host_data_ptr(fpmr) = val;
else
**host_data_ptr(fpmr_ptr) = val;
}
}
static const exit_handler_fn hyp_exit_handlers[] = {
[0 ... ESR_ELx_EC_MAX] = NULL,
[ESR_ELx_EC_CP15_32] = kvm_hyp_handle_cp15_32,
@ -251,19 +248,21 @@ static const exit_handler_fn *kvm_get_exit_handler_array(struct kvm_vcpu *vcpu)
return hyp_exit_handlers;
}
/*
* Some guests (e.g., protected VMs) are not be allowed to run in AArch32.
* The ARMv8 architecture does not give the hypervisor a mechanism to prevent a
* guest from dropping to AArch32 EL0 if implemented by the CPU. If the
* hypervisor spots a guest in such a state ensure it is handled, and don't
* trust the host to spot or fix it. The check below is based on the one in
* kvm_arch_vcpu_ioctl_run().
*
* Returns false if the guest ran in AArch32 when it shouldn't have, and
* thus should exit to the host, or true if a the guest run loop can continue.
*/
static void early_exit_filter(struct kvm_vcpu *vcpu, u64 *exit_code)
static inline bool fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
{
const exit_handler_fn *handlers = kvm_get_exit_handler_array(vcpu);
synchronize_vcpu_pstate(vcpu, exit_code);
/*
* Some guests (e.g., protected VMs) are not be allowed to run in
* AArch32. The ARMv8 architecture does not give the hypervisor a
* mechanism to prevent a guest from dropping to AArch32 EL0 if
* implemented by the CPU. If the hypervisor spots a guest in such a
* state ensure it is handled, and don't trust the host to spot or fix
* it. The check below is based on the one in
* kvm_arch_vcpu_ioctl_run().
*/
if (unlikely(vcpu_is_protected(vcpu) && vcpu_mode_is_32bit(vcpu))) {
/*
* As we have caught the guest red-handed, decide that it isn't
@ -276,6 +275,8 @@ static void early_exit_filter(struct kvm_vcpu *vcpu, u64 *exit_code)
*exit_code &= BIT(ARM_EXIT_WITH_SERROR_BIT);
*exit_code |= ARM_EXCEPTION_IL;
}
return __fixup_guest_exit(vcpu, exit_code, handlers);
}
/* Switch to the guest for legacy non-VHE systems */

View file

@ -136,6 +136,16 @@ write:
write_sysreg(val, cpacr_el1);
}
static void __deactivate_cptr_traps(struct kvm_vcpu *vcpu)
{
u64 val = CPACR_EL1_FPEN | CPACR_EL1_ZEN_EL1EN;
if (cpus_have_final_cap(ARM64_SME))
val |= CPACR_EL1_SMEN_EL1EN;
write_sysreg(val, cpacr_el1);
}
static void __activate_traps(struct kvm_vcpu *vcpu)
{
u64 val;
@ -207,7 +217,7 @@ static void __deactivate_traps(struct kvm_vcpu *vcpu)
*/
asm(ALTERNATIVE("nop", "isb", ARM64_WORKAROUND_SPECULATIVE_AT));
kvm_reset_cptr_el2(vcpu);
__deactivate_cptr_traps(vcpu);
if (!arm64_kernel_unmapped_at_el0())
host_vectors = __this_cpu_read(this_cpu_vector);
@ -413,14 +423,6 @@ static bool kvm_hyp_handle_eret(struct kvm_vcpu *vcpu, u64 *exit_code)
return true;
}
static void kvm_hyp_save_fpsimd_host(struct kvm_vcpu *vcpu)
{
__fpsimd_save_state(*host_data_ptr(fpsimd_state));
if (kvm_has_fpmr(vcpu->kvm))
**host_data_ptr(fpmr_ptr) = read_sysreg_s(SYS_FPMR);
}
static bool kvm_hyp_handle_tlbi_el2(struct kvm_vcpu *vcpu, u64 *exit_code)
{
int ret = -EINVAL;
@ -538,13 +540,10 @@ static const exit_handler_fn hyp_exit_handlers[] = {
[ESR_ELx_EC_MOPS] = kvm_hyp_handle_mops,
};
static const exit_handler_fn *kvm_get_exit_handler_array(struct kvm_vcpu *vcpu)
static inline bool fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
{
return hyp_exit_handlers;
}
synchronize_vcpu_pstate(vcpu, exit_code);
static void early_exit_filter(struct kvm_vcpu *vcpu, u64 *exit_code)
{
/*
* If we were in HYP context on entry, adjust the PSTATE view
* so that the usual helpers work correctly.
@ -564,6 +563,8 @@ static void early_exit_filter(struct kvm_vcpu *vcpu, u64 *exit_code)
*vcpu_cpsr(vcpu) &= ~(PSR_MODE_MASK | PSR_MODE32_BIT);
*vcpu_cpsr(vcpu) |= mode;
}
return __fixup_guest_exit(vcpu, exit_code, hyp_exit_handlers);
}
/* Switch to the guest for VHE systems running in EL2 */
@ -578,6 +579,8 @@ static int __kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
sysreg_save_host_state_vhe(host_ctxt);
fpsimd_lazy_switch_to_guest(vcpu);
/*
* Note that ARM erratum 1165522 requires us to configure both stage 1
* and stage 2 translation for the guest context before we clear
@ -602,6 +605,8 @@ static int __kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
__deactivate_traps(vcpu);
fpsimd_lazy_switch_to_host(vcpu);
sysreg_restore_host_state_vhe(host_ctxt);
if (guest_owns_fp_regs())

View file

@ -34,9 +34,9 @@
*
* CPU Interface:
*
* - kvm_vgic_vcpu_init(): initialization of static data that
* doesn't depend on any sizing information or emulation type. No
* allocation is allowed there.
* - kvm_vgic_vcpu_init(): initialization of static data that doesn't depend
* on any sizing information. Private interrupts are allocated if not
* already allocated at vgic-creation time.
*/
/* EARLY INIT */
@ -58,6 +58,8 @@ void kvm_vgic_early_init(struct kvm *kvm)
/* CREATION */
static int vgic_allocate_private_irqs_locked(struct kvm_vcpu *vcpu, u32 type);
/**
* kvm_vgic_create: triggered by the instantiation of the VGIC device by
* user space, either through the legacy KVM_CREATE_IRQCHIP ioctl (v2 only)
@ -112,6 +114,22 @@ int kvm_vgic_create(struct kvm *kvm, u32 type)
goto out_unlock;
}
kvm_for_each_vcpu(i, vcpu, kvm) {
ret = vgic_allocate_private_irqs_locked(vcpu, type);
if (ret)
break;
}
if (ret) {
kvm_for_each_vcpu(i, vcpu, kvm) {
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
kfree(vgic_cpu->private_irqs);
vgic_cpu->private_irqs = NULL;
}
goto out_unlock;
}
kvm->arch.vgic.in_kernel = true;
kvm->arch.vgic.vgic_model = type;
@ -180,7 +198,7 @@ static int kvm_vgic_dist_init(struct kvm *kvm, unsigned int nr_spis)
return 0;
}
static int vgic_allocate_private_irqs_locked(struct kvm_vcpu *vcpu)
static int vgic_allocate_private_irqs_locked(struct kvm_vcpu *vcpu, u32 type)
{
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
int i;
@ -218,17 +236,28 @@ static int vgic_allocate_private_irqs_locked(struct kvm_vcpu *vcpu)
/* PPIs */
irq->config = VGIC_CONFIG_LEVEL;
}
switch (type) {
case KVM_DEV_TYPE_ARM_VGIC_V3:
irq->group = 1;
irq->mpidr = kvm_vcpu_get_mpidr_aff(vcpu);
break;
case KVM_DEV_TYPE_ARM_VGIC_V2:
irq->group = 0;
irq->targets = BIT(vcpu->vcpu_id);
break;
}
}
return 0;
}
static int vgic_allocate_private_irqs(struct kvm_vcpu *vcpu)
static int vgic_allocate_private_irqs(struct kvm_vcpu *vcpu, u32 type)
{
int ret;
mutex_lock(&vcpu->kvm->arch.config_lock);
ret = vgic_allocate_private_irqs_locked(vcpu);
ret = vgic_allocate_private_irqs_locked(vcpu, type);
mutex_unlock(&vcpu->kvm->arch.config_lock);
return ret;
@ -258,7 +287,7 @@ int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu)
if (!irqchip_in_kernel(vcpu->kvm))
return 0;
ret = vgic_allocate_private_irqs(vcpu);
ret = vgic_allocate_private_irqs(vcpu, dist->vgic_model);
if (ret)
return ret;
@ -295,7 +324,7 @@ int vgic_init(struct kvm *kvm)
{
struct vgic_dist *dist = &kvm->arch.vgic;
struct kvm_vcpu *vcpu;
int ret = 0, i;
int ret = 0;
unsigned long idx;
lockdep_assert_held(&kvm->arch.config_lock);
@ -315,35 +344,6 @@ int vgic_init(struct kvm *kvm)
if (ret)
goto out;
/* Initialize groups on CPUs created before the VGIC type was known */
kvm_for_each_vcpu(idx, vcpu, kvm) {
ret = vgic_allocate_private_irqs_locked(vcpu);
if (ret)
goto out;
for (i = 0; i < VGIC_NR_PRIVATE_IRQS; i++) {
struct vgic_irq *irq = vgic_get_vcpu_irq(vcpu, i);
switch (dist->vgic_model) {
case KVM_DEV_TYPE_ARM_VGIC_V3:
irq->group = 1;
irq->mpidr = kvm_vcpu_get_mpidr_aff(vcpu);
break;
case KVM_DEV_TYPE_ARM_VGIC_V2:
irq->group = 0;
irq->targets = 1U << idx;
break;
default:
ret = -EINVAL;
}
vgic_put_irq(kvm, irq);
if (ret)
goto out;
}
}
/*
* If we have GICv4.1 enabled, unconditionally request enable the
* v4 support so that we get HW-accelerated vSGIs. Otherwise, only

View file

@ -162,6 +162,13 @@ static int copy_p4d(struct trans_pgd_info *info, pgd_t *dst_pgdp,
unsigned long next;
unsigned long addr = start;
if (pgd_none(READ_ONCE(*dst_pgdp))) {
dst_p4dp = trans_alloc(info);
if (!dst_p4dp)
return -ENOMEM;
pgd_populate(NULL, dst_pgdp, dst_p4dp);
}
dst_p4dp = p4d_offset(dst_pgdp, start);
src_p4dp = p4d_offset(src_pgdp, start);
do {

View file

@ -76,27 +76,6 @@ extern const char *__cpu_full_name[];
#define cpu_family_string() __cpu_family[raw_smp_processor_id()]
#define cpu_full_name_string() __cpu_full_name[raw_smp_processor_id()]
struct seq_file;
struct notifier_block;
extern int register_proc_cpuinfo_notifier(struct notifier_block *nb);
extern int proc_cpuinfo_notifier_call_chain(unsigned long val, void *v);
#define proc_cpuinfo_notifier(fn, pri) \
({ \
static struct notifier_block fn##_nb = { \
.notifier_call = fn, \
.priority = pri \
}; \
\
register_proc_cpuinfo_notifier(&fn##_nb); \
})
struct proc_cpuinfo_notifier_args {
struct seq_file *m;
unsigned long n;
};
static inline bool cpus_are_siblings(int cpua, int cpub)
{
struct cpuinfo_loongarch *infoa = &cpu_data[cpua];

View file

@ -77,6 +77,8 @@ extern int __cpu_logical_map[NR_CPUS];
#define SMP_IRQ_WORK BIT(ACTION_IRQ_WORK)
#define SMP_CLEAR_VECTOR BIT(ACTION_CLEAR_VECTOR)
struct seq_file;
struct secondary_data {
unsigned long stack;
unsigned long thread_info;

View file

@ -18,16 +18,19 @@
.align 5
SYM_FUNC_START(__arch_cpu_idle)
/* start of rollback region */
LONG_L t0, tp, TI_FLAGS
nop
andi t0, t0, _TIF_NEED_RESCHED
bnez t0, 1f
nop
nop
nop
/* start of idle interrupt region */
ori t0, zero, CSR_CRMD_IE
/* idle instruction needs irq enabled */
csrxchg t0, t0, LOONGARCH_CSR_CRMD
/*
* If an interrupt lands here; between enabling interrupts above and
* going idle on the next instruction, we must *NOT* go idle since the
* interrupt could have set TIF_NEED_RESCHED or caused an timer to need
* reprogramming. Fall through -- see handle_vint() below -- and have
* the idle loop take care of things.
*/
idle 0
/* end of rollback region */
/* end of idle interrupt region */
1: jr ra
SYM_FUNC_END(__arch_cpu_idle)
@ -35,11 +38,10 @@ SYM_CODE_START(handle_vint)
UNWIND_HINT_UNDEFINED
BACKUP_T0T1
SAVE_ALL
la_abs t1, __arch_cpu_idle
la_abs t1, 1b
LONG_L t0, sp, PT_ERA
/* 32 byte rollback region */
ori t0, t0, 0x1f
xori t0, t0, 0x1f
/* 3 instructions idle interrupt region */
ori t0, t0, 0b1100
bne t0, t1, 1f
LONG_S t0, sp, PT_ERA
1: move a0, sp

View file

@ -11,7 +11,6 @@
void __cpuidle arch_cpu_idle(void)
{
raw_local_irq_enable();
__arch_cpu_idle(); /* idle instruction needs irq enabled */
__arch_cpu_idle();
raw_local_irq_disable();
}

View file

@ -13,28 +13,12 @@
#include <asm/processor.h>
#include <asm/time.h>
/*
* No lock; only written during early bootup by CPU 0.
*/
static RAW_NOTIFIER_HEAD(proc_cpuinfo_chain);
int __ref register_proc_cpuinfo_notifier(struct notifier_block *nb)
{
return raw_notifier_chain_register(&proc_cpuinfo_chain, nb);
}
int proc_cpuinfo_notifier_call_chain(unsigned long val, void *v)
{
return raw_notifier_call_chain(&proc_cpuinfo_chain, val, v);
}
static int show_cpuinfo(struct seq_file *m, void *v)
{
unsigned long n = (unsigned long) v - 1;
unsigned int isa = cpu_data[n].isa_level;
unsigned int version = cpu_data[n].processor_id & 0xff;
unsigned int fp_version = cpu_data[n].fpu_vers;
struct proc_cpuinfo_notifier_args proc_cpuinfo_notifier_args;
#ifdef CONFIG_SMP
if (!cpu_online(n))
@ -91,20 +75,13 @@ static int show_cpuinfo(struct seq_file *m, void *v)
if (cpu_has_lbt_mips) seq_printf(m, " lbt_mips");
seq_printf(m, "\n");
seq_printf(m, "Hardware Watchpoint\t: %s",
cpu_has_watch ? "yes, " : "no\n");
seq_printf(m, "Hardware Watchpoint\t: %s", str_yes_no(cpu_has_watch));
if (cpu_has_watch) {
seq_printf(m, "iwatch count: %d, dwatch count: %d\n",
seq_printf(m, ", iwatch count: %d, dwatch count: %d",
cpu_data[n].watch_ireg_count, cpu_data[n].watch_dreg_count);
}
proc_cpuinfo_notifier_args.m = m;
proc_cpuinfo_notifier_args.n = n;
raw_notifier_call_chain(&proc_cpuinfo_chain, 0,
&proc_cpuinfo_notifier_args);
seq_printf(m, "\n");
seq_printf(m, "\n\n");
return 0;
}

View file

@ -33,7 +33,7 @@ void machine_halt(void)
console_flush_on_panic(CONSOLE_FLUSH_PENDING);
while (true) {
__arch_cpu_idle();
__asm__ __volatile__("idle 0" : : : "memory");
}
}
@ -53,7 +53,7 @@ void machine_power_off(void)
#endif
while (true) {
__arch_cpu_idle();
__asm__ __volatile__("idle 0" : : : "memory");
}
}
@ -74,6 +74,6 @@ void machine_restart(char *command)
acpi_reboot();
while (true) {
__arch_cpu_idle();
__asm__ __volatile__("idle 0" : : : "memory");
}
}

View file

@ -303,9 +303,9 @@ int kvm_arch_enable_virtualization_cpu(void)
* TOE=0: Trap on Exception.
* TIT=0: Trap on Timer.
*/
if (env & CSR_GCFG_GCIP_ALL)
if (env & CSR_GCFG_GCIP_SECURE)
gcfg |= CSR_GCFG_GCI_SECURE;
if (env & CSR_GCFG_MATC_ROOT)
if (env & CSR_GCFG_MATP_ROOT)
gcfg |= CSR_GCFG_MATC_ROOT;
write_csr_gcfg(gcfg);

View file

@ -85,7 +85,7 @@
* Guest CRMD comes from separate GCSR_CRMD register
*/
ori t0, zero, CSR_PRMD_PIE
csrxchg t0, t0, LOONGARCH_CSR_PRMD
csrwr t0, LOONGARCH_CSR_PRMD
/* Set PVM bit to setup ertn to guest context */
ori t0, zero, CSR_GSTAT_PVM

View file

@ -1548,9 +1548,6 @@ static int _kvm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
/* Restore timer state regardless */
kvm_restore_timer(vcpu);
/* Control guest page CCA attribute */
change_csr_gcfg(CSR_GCFG_MATC_MASK, CSR_GCFG_MATC_ROOT);
kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);
/* Restore hardware PMU CSRs */

View file

@ -25,7 +25,7 @@ unsigned int __no_sanitize_address do_csum(const unsigned char *buff, int len)
const u64 *ptr;
u64 data, sum64 = 0;
if (unlikely(len == 0))
if (unlikely(len <= 0))
return 0;
offset = (unsigned long)buff & 7;

View file

@ -3,6 +3,7 @@
* Copyright (C) 2024 Loongson Technology Corporation Limited
*/
#include <linux/memblock.h>
#include <linux/pagewalk.h>
#include <linux/pgtable.h>
#include <asm/set_memory.h>
@ -167,7 +168,7 @@ bool kernel_page_present(struct page *page)
unsigned long addr = (unsigned long)page_address(page);
if (addr < vm_map_base)
return true;
return memblock_is_memory(__pa(addr));
pgd = pgd_offset_k(addr);
if (pgd_none(pgdp_get(pgd)))

View file

@ -27,8 +27,8 @@
*/
struct pt_regs {
#ifdef CONFIG_32BIT
/* Pad bytes for argument save space on the stack. */
unsigned long pad0[8];
/* Saved syscall stack arguments; entries 0-3 unused. */
unsigned long args[8];
#endif
/* Saved main processor registers. */

View file

@ -57,37 +57,21 @@ static inline void mips_syscall_update_nr(struct task_struct *task,
static inline void mips_get_syscall_arg(unsigned long *arg,
struct task_struct *task, struct pt_regs *regs, unsigned int n)
{
unsigned long usp __maybe_unused = regs->regs[29];
#ifdef CONFIG_32BIT
switch (n) {
case 0: case 1: case 2: case 3:
*arg = regs->regs[4 + n];
return;
#ifdef CONFIG_32BIT
case 4: case 5: case 6: case 7:
get_user(*arg, (int *)usp + n);
*arg = regs->args[n];
return;
#endif
#ifdef CONFIG_64BIT
case 4: case 5: case 6: case 7:
#ifdef CONFIG_MIPS32_O32
if (test_tsk_thread_flag(task, TIF_32BIT_REGS))
get_user(*arg, (int *)usp + n);
else
#endif
*arg = regs->regs[4 + n];
return;
#endif
default:
BUG();
}
unreachable();
#else
*arg = regs->regs[4 + n];
if ((IS_ENABLED(CONFIG_MIPS32_O32) &&
test_tsk_thread_flag(task, TIF_32BIT_REGS)))
*arg = (unsigned int)*arg;
#endif
}
static inline long syscall_get_error(struct task_struct *task,

View file

@ -27,6 +27,12 @@ void output_ptreg_defines(void);
void output_ptreg_defines(void)
{
COMMENT("MIPS pt_regs offsets.");
#ifdef CONFIG_32BIT
OFFSET(PT_ARG4, pt_regs, args[4]);
OFFSET(PT_ARG5, pt_regs, args[5]);
OFFSET(PT_ARG6, pt_regs, args[6]);
OFFSET(PT_ARG7, pt_regs, args[7]);
#endif
OFFSET(PT_R0, pt_regs, regs[0]);
OFFSET(PT_R1, pt_regs, regs[1]);
OFFSET(PT_R2, pt_regs, regs[2]);

View file

@ -64,10 +64,10 @@ load_a6: user_lw(t7, 24(t0)) # argument #7 from usp
load_a7: user_lw(t8, 28(t0)) # argument #8 from usp
loads_done:
sw t5, 16(sp) # argument #5 to ksp
sw t6, 20(sp) # argument #6 to ksp
sw t7, 24(sp) # argument #7 to ksp
sw t8, 28(sp) # argument #8 to ksp
sw t5, PT_ARG4(sp) # argument #5 to ksp
sw t6, PT_ARG5(sp) # argument #6 to ksp
sw t7, PT_ARG6(sp) # argument #7 to ksp
sw t8, PT_ARG7(sp) # argument #8 to ksp
.set pop
.section __ex_table,"a"

View file

@ -77,9 +77,17 @@
/*
* With 4K page size the real_pte machinery is all nops.
*/
#define __real_pte(e, p, o) ((real_pte_t){(e)})
static inline real_pte_t __real_pte(pte_t pte, pte_t *ptep, int offset)
{
return (real_pte_t){pte};
}
#define __rpte_to_pte(r) ((r).pte)
#define __rpte_to_hidx(r,index) (pte_val(__rpte_to_pte(r)) >> H_PAGE_F_GIX_SHIFT)
static inline unsigned long __rpte_to_hidx(real_pte_t rpte, unsigned long index)
{
return pte_val(__rpte_to_pte(rpte)) >> H_PAGE_F_GIX_SHIFT;
}
#define pte_iterate_hashed_subpages(rpte, psize, va, index, shift) \
do { \

View file

@ -108,7 +108,7 @@ static int text_area_cpu_up(unsigned int cpu)
unsigned long addr;
int err;
area = get_vm_area(PAGE_SIZE, VM_ALLOC);
area = get_vm_area(PAGE_SIZE, 0);
if (!area) {
WARN_ONCE(1, "Failed to create text area for cpu %d\n",
cpu);
@ -493,7 +493,9 @@ static int __do_patch_instructions_mm(u32 *addr, u32 *code, size_t len, bool rep
orig_mm = start_using_temp_mm(patching_mm);
kasan_disable_current();
err = __patch_instructions(patch_addr, code, len, repeat_instr);
kasan_enable_current();
/* context synchronisation performed by __patch_instructions */
stop_using_temp_mm(patching_mm, orig_mm);

View file

@ -86,7 +86,7 @@ static int cmma_test_essa(void)
: [reg1] "=&d" (reg1),
[reg2] "=&a" (reg2),
[rc] "+&d" (rc),
[tmp] "=&d" (tmp),
[tmp] "+&d" (tmp),
"+Q" (get_lowcore()->program_new_psw),
"=Q" (old)
: [psw_old] "a" (&old),

View file

@ -469,6 +469,7 @@ CONFIG_SCSI_DH_ALUA=m
CONFIG_MD=y
CONFIG_BLK_DEV_MD=y
# CONFIG_MD_BITMAP_FILE is not set
CONFIG_MD_LINEAR=m
CONFIG_MD_CLUSTER=m
CONFIG_BCACHE=m
CONFIG_BLK_DEV_DM=y
@ -740,7 +741,6 @@ CONFIG_IMA=y
CONFIG_IMA_DEFAULT_HASH_SHA256=y
CONFIG_IMA_WRITE_POLICY=y
CONFIG_IMA_APPRAISE=y
CONFIG_LSM="yama,loadpin,safesetid,integrity,selinux,smack,tomoyo,apparmor"
CONFIG_BUG_ON_DATA_CORRUPTION=y
CONFIG_CRYPTO_USER=m
# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set
@ -875,6 +875,7 @@ CONFIG_RCU_CPU_STALL_TIMEOUT=300
CONFIG_LATENCYTOP=y
CONFIG_BOOTTIME_TRACING=y
CONFIG_FUNCTION_GRAPH_RETVAL=y
CONFIG_FUNCTION_GRAPH_RETADDR=y
CONFIG_FPROBE=y
CONFIG_FUNCTION_PROFILER=y
CONFIG_STACK_TRACER=y

View file

@ -459,6 +459,7 @@ CONFIG_SCSI_DH_ALUA=m
CONFIG_MD=y
CONFIG_BLK_DEV_MD=y
# CONFIG_MD_BITMAP_FILE is not set
CONFIG_MD_LINEAR=m
CONFIG_MD_CLUSTER=m
CONFIG_BCACHE=m
CONFIG_BLK_DEV_DM=y
@ -725,7 +726,6 @@ CONFIG_IMA=y
CONFIG_IMA_DEFAULT_HASH_SHA256=y
CONFIG_IMA_WRITE_POLICY=y
CONFIG_IMA_APPRAISE=y
CONFIG_LSM="yama,loadpin,safesetid,integrity,selinux,smack,tomoyo,apparmor"
CONFIG_BUG_ON_DATA_CORRUPTION=y
CONFIG_CRYPTO_FIPS=y
CONFIG_CRYPTO_USER=m
@ -826,6 +826,7 @@ CONFIG_RCU_CPU_STALL_TIMEOUT=60
CONFIG_LATENCYTOP=y
CONFIG_BOOTTIME_TRACING=y
CONFIG_FUNCTION_GRAPH_RETVAL=y
CONFIG_FUNCTION_GRAPH_RETADDR=y
CONFIG_FPROBE=y
CONFIG_FUNCTION_PROFILER=y
CONFIG_STACK_TRACER=y

View file

@ -62,7 +62,6 @@ CONFIG_ZFCP=y
# CONFIG_INOTIFY_USER is not set
# CONFIG_MISC_FILESYSTEMS is not set
# CONFIG_NETWORK_FILESYSTEMS is not set
CONFIG_LSM="yama,loadpin,safesetid,integrity"
# CONFIG_ZLIB_DFLTCC is not set
CONFIG_XZ_DEC_MICROLZMA=y
CONFIG_PRINTK_TIME=y

View file

@ -53,7 +53,11 @@ static __always_inline bool arch_test_bit(unsigned long nr, const volatile unsig
unsigned long mask;
int cc;
if (__builtin_constant_p(nr)) {
/*
* With CONFIG_PROFILE_ALL_BRANCHES enabled gcc fails to
* handle __builtin_constant_p() in some cases.
*/
if (!IS_ENABLED(CONFIG_PROFILE_ALL_BRANCHES) && __builtin_constant_p(nr)) {
addr = (const volatile unsigned char *)ptr;
addr += (nr ^ (BITS_PER_LONG - BITS_PER_BYTE)) / BITS_PER_BYTE;
mask = 1UL << (nr & (BITS_PER_BYTE - 1));

View file

@ -331,6 +331,17 @@ error:
return rc;
}
static bool zpci_bus_is_isolated_vf(struct zpci_bus *zbus, struct zpci_dev *zdev)
{
struct pci_dev *pdev;
pdev = zpci_iov_find_parent_pf(zbus, zdev);
if (!pdev)
return true;
pci_dev_put(pdev);
return false;
}
int zpci_bus_device_register(struct zpci_dev *zdev, struct pci_ops *ops)
{
bool topo_is_tid = zdev->tid_avail;
@ -345,6 +356,15 @@ int zpci_bus_device_register(struct zpci_dev *zdev, struct pci_ops *ops)
topo = topo_is_tid ? zdev->tid : zdev->pchid;
zbus = zpci_bus_get(topo, topo_is_tid);
/*
* An isolated VF gets its own domain/bus even if there exists
* a matching domain/bus already
*/
if (zbus && zpci_bus_is_isolated_vf(zbus, zdev)) {
zpci_bus_put(zbus);
zbus = NULL;
}
if (!zbus) {
zbus = zpci_bus_alloc(topo, topo_is_tid);
if (!zbus)

View file

@ -60,18 +60,35 @@ static int zpci_iov_link_virtfn(struct pci_dev *pdev, struct pci_dev *virtfn, in
return 0;
}
int zpci_iov_setup_virtfn(struct zpci_bus *zbus, struct pci_dev *virtfn, int vfn)
/**
* zpci_iov_find_parent_pf - Find the parent PF, if any, of the given function
* @zbus: The bus that the PCI function is on, or would be added on
* @zdev: The PCI function
*
* Finds the parent PF, if it exists and is configured, of the given PCI function
* and increments its refcount. Th PF is searched for on the provided bus so the
* caller has to ensure that this is the correct bus to search. This function may
* be used before adding the PCI function to a zbus.
*
* Return: Pointer to the struct pci_dev of the parent PF or NULL if it not
* found. If the function is not a VF or has no RequesterID information,
* NULL is returned as well.
*/
struct pci_dev *zpci_iov_find_parent_pf(struct zpci_bus *zbus, struct zpci_dev *zdev)
{
int i, cand_devfn;
struct zpci_dev *zdev;
int i, vfid, devfn, cand_devfn;
struct pci_dev *pdev;
int vfid = vfn - 1; /* Linux' vfid's start at 0 vfn at 1*/
int rc = 0;
if (!zbus->multifunction)
return 0;
/* If the parent PF for the given VF is also configured in the
return NULL;
/* Non-VFs and VFs without RID available don't have a parent */
if (!zdev->vfn || !zdev->rid_available)
return NULL;
/* Linux vfid starts at 0 vfn at 1 */
vfid = zdev->vfn - 1;
devfn = zdev->rid & ZPCI_RID_MASK_DEVFN;
/*
* If the parent PF for the given VF is also configured in the
* instance, it must be on the same zbus.
* We can then identify the parent PF by checking what
* devfn the VF would have if it belonged to that PF using the PF's
@ -85,15 +102,26 @@ int zpci_iov_setup_virtfn(struct zpci_bus *zbus, struct pci_dev *virtfn, int vfn
if (!pdev)
continue;
cand_devfn = pci_iov_virtfn_devfn(pdev, vfid);
if (cand_devfn == virtfn->devfn) {
rc = zpci_iov_link_virtfn(pdev, virtfn, vfid);
/* balance pci_get_slot() */
pci_dev_put(pdev);
break;
}
if (cand_devfn == devfn)
return pdev;
/* balance pci_get_slot() */
pci_dev_put(pdev);
}
}
return NULL;
}
int zpci_iov_setup_virtfn(struct zpci_bus *zbus, struct pci_dev *virtfn, int vfn)
{
struct zpci_dev *zdev = to_zpci(virtfn);
struct pci_dev *pdev_pf;
int rc = 0;
pdev_pf = zpci_iov_find_parent_pf(zbus, zdev);
if (pdev_pf) {
/* Linux' vfids start at 0 while zdev->vfn starts at 1 */
rc = zpci_iov_link_virtfn(pdev_pf, virtfn, zdev->vfn - 1);
pci_dev_put(pdev_pf);
}
return rc;
}

View file

@ -19,6 +19,8 @@ void zpci_iov_map_resources(struct pci_dev *pdev);
int zpci_iov_setup_virtfn(struct zpci_bus *zbus, struct pci_dev *virtfn, int vfn);
struct pci_dev *zpci_iov_find_parent_pf(struct zpci_bus *zbus, struct zpci_dev *zdev);
#else /* CONFIG_PCI_IOV */
static inline void zpci_iov_remove_virtfn(struct pci_dev *pdev, int vfn) {}
@ -28,5 +30,10 @@ static inline int zpci_iov_setup_virtfn(struct zpci_bus *zbus, struct pci_dev *v
{
return 0;
}
static inline struct pci_dev *zpci_iov_find_parent_pf(struct zpci_bus *zbus, struct zpci_dev *zdev)
{
return NULL;
}
#endif /* CONFIG_PCI_IOV */
#endif /* __S390_PCI_IOV_h */

View file

@ -8,7 +8,7 @@ PURGATORY_OBJS = $(addprefix $(obj)/,$(purgatory-y))
$(obj)/sha256.o: $(srctree)/lib/crypto/sha256.c FORCE
$(call if_changed_rule,cc_o_c)
CFLAGS_sha256.o := -D__DISABLE_EXPORTS -D__NO_FORTIFY
CFLAGS_sha256.o := -D__NO_FORTIFY
$(obj)/mem.o: $(srctree)/arch/s390/lib/mem.S FORCE
$(call if_changed_rule,as_o_S)
@ -19,9 +19,11 @@ KBUILD_CFLAGS += -fno-zero-initialized-in-bss -fno-builtin -ffreestanding
KBUILD_CFLAGS += -Os -m64 -msoft-float -fno-common
KBUILD_CFLAGS += -fno-stack-protector
KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING
KBUILD_CFLAGS += -D__DISABLE_EXPORTS
KBUILD_CFLAGS += $(CLANG_FLAGS)
KBUILD_CFLAGS += $(call cc-option,-fno-PIE)
KBUILD_AFLAGS := $(filter-out -DCC_USING_EXPOLINE,$(KBUILD_AFLAGS))
KBUILD_AFLAGS += -D__DISABLE_EXPORTS
# Since we link purgatory with -r unresolved symbols are not checked, so we
# also link a purgatory.chk binary without -r to check for unresolved symbols.

View file

@ -25,8 +25,10 @@
#define MAX_IRQ_MSG_SIZE (sizeof(struct virtio_pcidev_msg) + sizeof(u32))
#define NUM_IRQ_MSGS 10
#define HANDLE_NO_FREE(ptr) ((void *)((unsigned long)(ptr) | 1))
#define HANDLE_IS_NO_FREE(ptr) ((unsigned long)(ptr) & 1)
struct um_pci_message_buffer {
struct virtio_pcidev_msg hdr;
u8 data[8];
};
struct um_pci_device {
struct virtio_device *vdev;
@ -36,6 +38,11 @@ struct um_pci_device {
struct virtqueue *cmd_vq, *irq_vq;
#define UM_PCI_WRITE_BUFS 20
struct um_pci_message_buffer bufs[UM_PCI_WRITE_BUFS + 1];
void *extra_ptrs[UM_PCI_WRITE_BUFS + 1];
DECLARE_BITMAP(used_bufs, UM_PCI_WRITE_BUFS);
#define UM_PCI_STAT_WAITING 0
unsigned long status;
@ -61,12 +68,40 @@ static unsigned long um_pci_msi_used[BITS_TO_LONGS(MAX_MSI_VECTORS)];
static unsigned int um_pci_max_delay_us = 40000;
module_param_named(max_delay_us, um_pci_max_delay_us, uint, 0644);
struct um_pci_message_buffer {
struct virtio_pcidev_msg hdr;
u8 data[8];
};
static int um_pci_get_buf(struct um_pci_device *dev, bool *posted)
{
int i;
static struct um_pci_message_buffer __percpu *um_pci_msg_bufs;
for (i = 0; i < UM_PCI_WRITE_BUFS; i++) {
if (!test_and_set_bit(i, dev->used_bufs))
return i;
}
*posted = false;
return UM_PCI_WRITE_BUFS;
}
static void um_pci_free_buf(struct um_pci_device *dev, void *buf)
{
int i;
if (buf == &dev->bufs[UM_PCI_WRITE_BUFS]) {
kfree(dev->extra_ptrs[UM_PCI_WRITE_BUFS]);
dev->extra_ptrs[UM_PCI_WRITE_BUFS] = NULL;
return;
}
for (i = 0; i < UM_PCI_WRITE_BUFS; i++) {
if (buf == &dev->bufs[i]) {
kfree(dev->extra_ptrs[i]);
dev->extra_ptrs[i] = NULL;
WARN_ON(!test_and_clear_bit(i, dev->used_bufs));
return;
}
}
WARN_ON(1);
}
static int um_pci_send_cmd(struct um_pci_device *dev,
struct virtio_pcidev_msg *cmd,
@ -82,7 +117,9 @@ static int um_pci_send_cmd(struct um_pci_device *dev,
};
struct um_pci_message_buffer *buf;
int delay_count = 0;
bool bounce_out;
int ret, len;
int buf_idx;
bool posted;
if (WARN_ON(cmd_size < sizeof(*cmd) || cmd_size > sizeof(*buf)))
@ -101,26 +138,28 @@ static int um_pci_send_cmd(struct um_pci_device *dev,
break;
}
buf = get_cpu_var(um_pci_msg_bufs);
if (buf)
memcpy(buf, cmd, cmd_size);
bounce_out = !posted && cmd_size <= sizeof(*cmd) &&
out && out_size <= sizeof(buf->data);
if (posted) {
u8 *ncmd = kmalloc(cmd_size + extra_size, GFP_ATOMIC);
buf_idx = um_pci_get_buf(dev, &posted);
buf = &dev->bufs[buf_idx];
memcpy(buf, cmd, cmd_size);
if (ncmd) {
memcpy(ncmd, cmd, cmd_size);
if (extra)
memcpy(ncmd + cmd_size, extra, extra_size);
cmd = (void *)ncmd;
cmd_size += extra_size;
extra = NULL;
extra_size = 0;
} else {
/* try without allocating memory */
posted = false;
cmd = (void *)buf;
if (posted && extra && extra_size > sizeof(buf) - cmd_size) {
dev->extra_ptrs[buf_idx] = kmemdup(extra, extra_size,
GFP_ATOMIC);
if (!dev->extra_ptrs[buf_idx]) {
um_pci_free_buf(dev, buf);
return -ENOMEM;
}
extra = dev->extra_ptrs[buf_idx];
} else if (extra && extra_size <= sizeof(buf) - cmd_size) {
memcpy((u8 *)buf + cmd_size, extra, extra_size);
cmd_size += extra_size;
extra_size = 0;
extra = NULL;
cmd = (void *)buf;
} else {
cmd = (void *)buf;
}
@ -128,39 +167,40 @@ static int um_pci_send_cmd(struct um_pci_device *dev,
sg_init_one(&out_sg, cmd, cmd_size);
if (extra)
sg_init_one(&extra_sg, extra, extra_size);
if (out)
/* allow stack for small buffers */
if (bounce_out)
sg_init_one(&in_sg, buf->data, out_size);
else if (out)
sg_init_one(&in_sg, out, out_size);
/* add to internal virtio queue */
ret = virtqueue_add_sgs(dev->cmd_vq, sgs_list,
extra ? 2 : 1,
out ? 1 : 0,
posted ? cmd : HANDLE_NO_FREE(cmd),
GFP_ATOMIC);
cmd, GFP_ATOMIC);
if (ret) {
if (posted)
kfree(cmd);
goto out;
um_pci_free_buf(dev, buf);
return ret;
}
if (posted) {
virtqueue_kick(dev->cmd_vq);
ret = 0;
goto out;
return 0;
}
/* kick and poll for getting a response on the queue */
set_bit(UM_PCI_STAT_WAITING, &dev->status);
virtqueue_kick(dev->cmd_vq);
ret = 0;
while (1) {
void *completed = virtqueue_get_buf(dev->cmd_vq, &len);
if (completed == HANDLE_NO_FREE(cmd))
if (completed == buf)
break;
if (completed && !HANDLE_IS_NO_FREE(completed))
kfree(completed);
if (completed)
um_pci_free_buf(dev, completed);
if (WARN_ONCE(virtqueue_is_broken(dev->cmd_vq) ||
++delay_count > um_pci_max_delay_us,
@ -172,8 +212,11 @@ static int um_pci_send_cmd(struct um_pci_device *dev,
}
clear_bit(UM_PCI_STAT_WAITING, &dev->status);
out:
put_cpu_var(um_pci_msg_bufs);
if (bounce_out)
memcpy(out, buf->data, out_size);
um_pci_free_buf(dev, buf);
return ret;
}
@ -187,20 +230,13 @@ static unsigned long um_pci_cfgspace_read(void *priv, unsigned int offset,
.size = size,
.addr = offset,
};
/* buf->data is maximum size - we may only use parts of it */
struct um_pci_message_buffer *buf;
u8 *data;
unsigned long ret = ULONG_MAX;
size_t bytes = sizeof(buf->data);
/* max 8, we might not use it all */
u8 data[8];
if (!dev)
return ULONG_MAX;
buf = get_cpu_var(um_pci_msg_bufs);
data = buf->data;
if (buf)
memset(data, 0xff, bytes);
memset(data, 0xff, sizeof(data));
switch (size) {
case 1:
@ -212,34 +248,26 @@ static unsigned long um_pci_cfgspace_read(void *priv, unsigned int offset,
break;
default:
WARN(1, "invalid config space read size %d\n", size);
goto out;
return ULONG_MAX;
}
if (um_pci_send_cmd(dev, &hdr, sizeof(hdr), NULL, 0, data, bytes))
goto out;
if (um_pci_send_cmd(dev, &hdr, sizeof(hdr), NULL, 0, data, size))
return ULONG_MAX;
switch (size) {
case 1:
ret = data[0];
break;
return data[0];
case 2:
ret = le16_to_cpup((void *)data);
break;
return le16_to_cpup((void *)data);
case 4:
ret = le32_to_cpup((void *)data);
break;
return le32_to_cpup((void *)data);
#ifdef CONFIG_64BIT
case 8:
ret = le64_to_cpup((void *)data);
break;
return le64_to_cpup((void *)data);
#endif
default:
break;
return ULONG_MAX;
}
out:
put_cpu_var(um_pci_msg_bufs);
return ret;
}
static void um_pci_cfgspace_write(void *priv, unsigned int offset, int size,
@ -312,13 +340,8 @@ static void um_pci_bar_copy_from(void *priv, void *buffer,
static unsigned long um_pci_bar_read(void *priv, unsigned int offset,
int size)
{
/* buf->data is maximum size - we may only use parts of it */
struct um_pci_message_buffer *buf;
u8 *data;
unsigned long ret = ULONG_MAX;
buf = get_cpu_var(um_pci_msg_bufs);
data = buf->data;
/* 8 is maximum size - we may only use parts of it */
u8 data[8];
switch (size) {
case 1:
@ -330,33 +353,25 @@ static unsigned long um_pci_bar_read(void *priv, unsigned int offset,
break;
default:
WARN(1, "invalid config space read size %d\n", size);
goto out;
return ULONG_MAX;
}
um_pci_bar_copy_from(priv, data, offset, size);
switch (size) {
case 1:
ret = data[0];
break;
return data[0];
case 2:
ret = le16_to_cpup((void *)data);
break;
return le16_to_cpup((void *)data);
case 4:
ret = le32_to_cpup((void *)data);
break;
return le32_to_cpup((void *)data);
#ifdef CONFIG_64BIT
case 8:
ret = le64_to_cpup((void *)data);
break;
return le64_to_cpup((void *)data);
#endif
default:
break;
return ULONG_MAX;
}
out:
put_cpu_var(um_pci_msg_bufs);
return ret;
}
static void um_pci_bar_copy_to(void *priv, unsigned int offset,
@ -523,11 +538,8 @@ static void um_pci_cmd_vq_cb(struct virtqueue *vq)
if (test_bit(UM_PCI_STAT_WAITING, &dev->status))
return;
while ((cmd = virtqueue_get_buf(vq, &len))) {
if (WARN_ON(HANDLE_IS_NO_FREE(cmd)))
continue;
kfree(cmd);
}
while ((cmd = virtqueue_get_buf(vq, &len)))
um_pci_free_buf(dev, cmd);
}
static void um_pci_irq_vq_cb(struct virtqueue *vq)
@ -1006,10 +1018,6 @@ static int __init um_pci_init(void)
"No virtio device ID configured for PCI - no PCI support\n"))
return 0;
um_pci_msg_bufs = alloc_percpu(struct um_pci_message_buffer);
if (!um_pci_msg_bufs)
return -ENOMEM;
bridge = pci_alloc_host_bridge(0);
if (!bridge) {
err = -ENOMEM;
@ -1070,7 +1078,6 @@ free:
pci_free_resource_list(&bridge->windows);
pci_free_host_bridge(bridge);
}
free_percpu(um_pci_msg_bufs);
return err;
}
module_init(um_pci_init);
@ -1082,6 +1089,5 @@ static void __exit um_pci_exit(void)
irq_domain_remove(um_pci_inner_domain);
pci_free_resource_list(&bridge->windows);
pci_free_host_bridge(bridge);
free_percpu(um_pci_msg_bufs);
}
module_exit(um_pci_exit);

View file

@ -52,7 +52,7 @@ struct virtio_uml_device {
struct platform_device *pdev;
struct virtio_uml_platform_data *pdata;
spinlock_t sock_lock;
raw_spinlock_t sock_lock;
int sock, req_fd, irq;
u64 features;
u64 protocol_features;
@ -246,7 +246,7 @@ static int vhost_user_send(struct virtio_uml_device *vu_dev,
if (request_ack)
msg->header.flags |= VHOST_USER_FLAG_NEED_REPLY;
spin_lock_irqsave(&vu_dev->sock_lock, flags);
raw_spin_lock_irqsave(&vu_dev->sock_lock, flags);
rc = full_sendmsg_fds(vu_dev->sock, msg, size, fds, num_fds);
if (rc < 0)
goto out;
@ -266,7 +266,7 @@ static int vhost_user_send(struct virtio_uml_device *vu_dev,
}
out:
spin_unlock_irqrestore(&vu_dev->sock_lock, flags);
raw_spin_unlock_irqrestore(&vu_dev->sock_lock, flags);
return rc;
}
@ -1239,7 +1239,7 @@ static int virtio_uml_probe(struct platform_device *pdev)
goto error_free;
vu_dev->sock = rc;
spin_lock_init(&vu_dev->sock_lock);
raw_spin_lock_init(&vu_dev->sock_lock);
rc = vhost_user_init(vu_dev);
if (rc)

View file

@ -52,7 +52,7 @@ struct irq_entry {
bool sigio_workaround;
};
static DEFINE_SPINLOCK(irq_lock);
static DEFINE_RAW_SPINLOCK(irq_lock);
static LIST_HEAD(active_fds);
static DECLARE_BITMAP(irqs_allocated, UM_LAST_SIGNAL_IRQ);
static bool irqs_suspended;
@ -257,7 +257,7 @@ static struct irq_entry *get_irq_entry_by_fd(int fd)
return NULL;
}
static void free_irq_entry(struct irq_entry *to_free, bool remove)
static void remove_irq_entry(struct irq_entry *to_free, bool remove)
{
if (!to_free)
return;
@ -265,7 +265,6 @@ static void free_irq_entry(struct irq_entry *to_free, bool remove)
if (remove)
os_del_epoll_fd(to_free->fd);
list_del(&to_free->list);
kfree(to_free);
}
static bool update_irq_entry(struct irq_entry *entry)
@ -286,17 +285,19 @@ static bool update_irq_entry(struct irq_entry *entry)
return false;
}
static void update_or_free_irq_entry(struct irq_entry *entry)
static struct irq_entry *update_or_remove_irq_entry(struct irq_entry *entry)
{
if (!update_irq_entry(entry))
free_irq_entry(entry, false);
if (update_irq_entry(entry))
return NULL;
remove_irq_entry(entry, false);
return entry;
}
static int activate_fd(int irq, int fd, enum um_irq_type type, void *dev_id,
void (*timetravel_handler)(int, int, void *,
struct time_travel_event *))
{
struct irq_entry *irq_entry;
struct irq_entry *irq_entry, *to_free = NULL;
int err, events = os_event_mask(type);
unsigned long flags;
@ -304,9 +305,10 @@ static int activate_fd(int irq, int fd, enum um_irq_type type, void *dev_id,
if (err < 0)
goto out;
spin_lock_irqsave(&irq_lock, flags);
raw_spin_lock_irqsave(&irq_lock, flags);
irq_entry = get_irq_entry_by_fd(fd);
if (irq_entry) {
already:
/* cannot register the same FD twice with the same type */
if (WARN_ON(irq_entry->reg[type].events)) {
err = -EALREADY;
@ -316,11 +318,22 @@ static int activate_fd(int irq, int fd, enum um_irq_type type, void *dev_id,
/* temporarily disable to avoid IRQ-side locking */
os_del_epoll_fd(fd);
} else {
irq_entry = kzalloc(sizeof(*irq_entry), GFP_ATOMIC);
if (!irq_entry) {
err = -ENOMEM;
goto out_unlock;
struct irq_entry *new;
/* don't restore interrupts */
raw_spin_unlock(&irq_lock);
new = kzalloc(sizeof(*irq_entry), GFP_ATOMIC);
if (!new) {
local_irq_restore(flags);
return -ENOMEM;
}
raw_spin_lock(&irq_lock);
irq_entry = get_irq_entry_by_fd(fd);
if (irq_entry) {
to_free = new;
goto already;
}
irq_entry = new;
irq_entry->fd = fd;
list_add_tail(&irq_entry->list, &active_fds);
maybe_sigio_broken(fd);
@ -339,12 +352,11 @@ static int activate_fd(int irq, int fd, enum um_irq_type type, void *dev_id,
#endif
WARN_ON(!update_irq_entry(irq_entry));
spin_unlock_irqrestore(&irq_lock, flags);
return 0;
err = 0;
out_unlock:
spin_unlock_irqrestore(&irq_lock, flags);
raw_spin_unlock_irqrestore(&irq_lock, flags);
out:
kfree(to_free);
return err;
}
@ -358,19 +370,20 @@ void free_irq_by_fd(int fd)
struct irq_entry *to_free;
unsigned long flags;
spin_lock_irqsave(&irq_lock, flags);
raw_spin_lock_irqsave(&irq_lock, flags);
to_free = get_irq_entry_by_fd(fd);
free_irq_entry(to_free, true);
spin_unlock_irqrestore(&irq_lock, flags);
remove_irq_entry(to_free, true);
raw_spin_unlock_irqrestore(&irq_lock, flags);
kfree(to_free);
}
EXPORT_SYMBOL(free_irq_by_fd);
static void free_irq_by_irq_and_dev(unsigned int irq, void *dev)
{
struct irq_entry *entry;
struct irq_entry *entry, *to_free = NULL;
unsigned long flags;
spin_lock_irqsave(&irq_lock, flags);
raw_spin_lock_irqsave(&irq_lock, flags);
list_for_each_entry(entry, &active_fds, list) {
enum um_irq_type i;
@ -386,12 +399,13 @@ static void free_irq_by_irq_and_dev(unsigned int irq, void *dev)
os_del_epoll_fd(entry->fd);
reg->events = 0;
update_or_free_irq_entry(entry);
to_free = update_or_remove_irq_entry(entry);
goto out;
}
}
out:
spin_unlock_irqrestore(&irq_lock, flags);
raw_spin_unlock_irqrestore(&irq_lock, flags);
kfree(to_free);
}
void deactivate_fd(int fd, int irqnum)
@ -402,7 +416,7 @@ void deactivate_fd(int fd, int irqnum)
os_del_epoll_fd(fd);
spin_lock_irqsave(&irq_lock, flags);
raw_spin_lock_irqsave(&irq_lock, flags);
entry = get_irq_entry_by_fd(fd);
if (!entry)
goto out;
@ -414,9 +428,10 @@ void deactivate_fd(int fd, int irqnum)
entry->reg[i].events = 0;
}
update_or_free_irq_entry(entry);
entry = update_or_remove_irq_entry(entry);
out:
spin_unlock_irqrestore(&irq_lock, flags);
raw_spin_unlock_irqrestore(&irq_lock, flags);
kfree(entry);
ignore_sigio_fd(fd);
}
@ -546,7 +561,7 @@ void um_irqs_suspend(void)
irqs_suspended = true;
spin_lock_irqsave(&irq_lock, flags);
raw_spin_lock_irqsave(&irq_lock, flags);
list_for_each_entry(entry, &active_fds, list) {
enum um_irq_type t;
bool clear = true;
@ -579,7 +594,7 @@ void um_irqs_suspend(void)
!__ignore_sigio_fd(entry->fd);
}
}
spin_unlock_irqrestore(&irq_lock, flags);
raw_spin_unlock_irqrestore(&irq_lock, flags);
}
void um_irqs_resume(void)
@ -588,7 +603,7 @@ void um_irqs_resume(void)
unsigned long flags;
spin_lock_irqsave(&irq_lock, flags);
raw_spin_lock_irqsave(&irq_lock, flags);
list_for_each_entry(entry, &active_fds, list) {
if (entry->suspended) {
int err = os_set_fd_async(entry->fd);
@ -602,7 +617,7 @@ void um_irqs_resume(void)
}
}
}
spin_unlock_irqrestore(&irq_lock, flags);
raw_spin_unlock_irqrestore(&irq_lock, flags);
irqs_suspended = false;
send_sigio_to_self();
@ -613,7 +628,7 @@ static int normal_irq_set_wake(struct irq_data *d, unsigned int on)
struct irq_entry *entry;
unsigned long flags;
spin_lock_irqsave(&irq_lock, flags);
raw_spin_lock_irqsave(&irq_lock, flags);
list_for_each_entry(entry, &active_fds, list) {
enum um_irq_type t;
@ -628,7 +643,7 @@ static int normal_irq_set_wake(struct irq_data *d, unsigned int on)
}
}
unlock:
spin_unlock_irqrestore(&irq_lock, flags);
raw_spin_unlock_irqrestore(&irq_lock, flags);
return 0;
}
#else

View file

@ -191,7 +191,15 @@ void initial_thread_cb(void (*proc)(void *), void *arg)
int arch_dup_task_struct(struct task_struct *dst,
struct task_struct *src)
{
memcpy(dst, src, arch_task_struct_size);
/* init_task is not dynamically sized (missing FPU state) */
if (unlikely(src == &init_task)) {
memcpy(dst, src, sizeof(init_task));
memset((void *)dst + sizeof(init_task), 0,
arch_task_struct_size - sizeof(init_task));
} else {
memcpy(dst, src, arch_task_struct_size);
}
return 0;
}

View file

@ -181,6 +181,10 @@ extern char __syscall_stub_start[];
static int stub_exe_fd;
#ifndef CLOSE_RANGE_CLOEXEC
#define CLOSE_RANGE_CLOEXEC (1U << 2)
#endif
static int userspace_tramp(void *stack)
{
char *const argv[] = { "uml-userspace", NULL };
@ -202,8 +206,12 @@ static int userspace_tramp(void *stack)
init_data.stub_data_fd = phys_mapping(uml_to_phys(stack), &offset);
init_data.stub_data_offset = MMAP_OFFSET(offset);
/* Set CLOEXEC on all FDs and then unset on all memory related FDs */
close_range(0, ~0U, CLOSE_RANGE_CLOEXEC);
/*
* Avoid leaking unneeded FDs to the stub by setting CLOEXEC on all FDs
* and then unsetting it on all memory related FDs.
* This is not strictly necessary from a safety perspective.
*/
syscall(__NR_close_range, 0, ~0U, CLOSE_RANGE_CLOEXEC);
fcntl(init_data.stub_data_fd, F_SETFD, 0);
for (iomem = iomem_regions; iomem; iomem = iomem->next)
@ -224,7 +232,9 @@ static int userspace_tramp(void *stack)
if (ret != sizeof(init_data))
exit(4);
execveat(stub_exe_fd, "", argv, NULL, AT_EMPTY_PATH);
/* Raw execveat for compatibility with older libc versions */
syscall(__NR_execveat, stub_exe_fd, (unsigned long)"",
(unsigned long)argv, NULL, AT_EMPTY_PATH);
exit(5);
}

View file

@ -2599,7 +2599,8 @@ config MITIGATION_IBPB_ENTRY
depends on CPU_SUP_AMD && X86_64
default y
help
Compile the kernel with support for the retbleed=ibpb mitigation.
Compile the kernel with support for the retbleed=ibpb and
spec_rstack_overflow={ibpb,ibpb-vmexit} mitigations.
config MITIGATION_IBRS_ENTRY
bool "Enable IBRS on kernel entry"

View file

@ -397,34 +397,28 @@ static struct event_constraint intel_lnc_event_constraints[] = {
METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_FETCH_LAT, 6),
METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_MEM_BOUND, 7),
INTEL_EVENT_CONSTRAINT(0x20, 0xf),
INTEL_UEVENT_CONSTRAINT(0x012a, 0xf),
INTEL_UEVENT_CONSTRAINT(0x012b, 0xf),
INTEL_UEVENT_CONSTRAINT(0x0148, 0x4),
INTEL_UEVENT_CONSTRAINT(0x0175, 0x4),
INTEL_EVENT_CONSTRAINT(0x2e, 0x3ff),
INTEL_EVENT_CONSTRAINT(0x3c, 0x3ff),
/*
* Generally event codes < 0x90 are restricted to counters 0-3.
* The 0x2E and 0x3C are exception, which has no restriction.
*/
INTEL_EVENT_CONSTRAINT_RANGE(0x01, 0x8f, 0xf),
INTEL_UEVENT_CONSTRAINT(0x01a3, 0xf),
INTEL_UEVENT_CONSTRAINT(0x02a3, 0xf),
INTEL_UEVENT_CONSTRAINT(0x08a3, 0x4),
INTEL_UEVENT_CONSTRAINT(0x0ca3, 0x4),
INTEL_UEVENT_CONSTRAINT(0x04a4, 0x1),
INTEL_UEVENT_CONSTRAINT(0x08a4, 0x1),
INTEL_UEVENT_CONSTRAINT(0x10a4, 0x1),
INTEL_UEVENT_CONSTRAINT(0x01b1, 0x8),
INTEL_UEVENT_CONSTRAINT(0x01cd, 0x3fc),
INTEL_UEVENT_CONSTRAINT(0x02cd, 0x3),
INTEL_EVENT_CONSTRAINT(0xce, 0x1),
INTEL_EVENT_CONSTRAINT_RANGE(0xd0, 0xdf, 0xf),
/*
* Generally event codes >= 0x90 are likely to have no restrictions.
* The exception are defined as above.
*/
INTEL_EVENT_CONSTRAINT_RANGE(0x90, 0xfe, 0x3ff),
INTEL_UEVENT_CONSTRAINT(0x00e0, 0xf),
EVENT_CONSTRAINT_END
};
@ -4905,20 +4899,22 @@ static inline bool intel_pmu_broken_perf_cap(void)
static void update_pmu_cap(struct x86_hybrid_pmu *pmu)
{
unsigned int sub_bitmaps, eax, ebx, ecx, edx;
unsigned int cntr, fixed_cntr, ecx, edx;
union cpuid35_eax eax;
union cpuid35_ebx ebx;
cpuid(ARCH_PERFMON_EXT_LEAF, &sub_bitmaps, &ebx, &ecx, &edx);
cpuid(ARCH_PERFMON_EXT_LEAF, &eax.full, &ebx.full, &ecx, &edx);
if (ebx & ARCH_PERFMON_EXT_UMASK2)
if (ebx.split.umask2)
pmu->config_mask |= ARCH_PERFMON_EVENTSEL_UMASK2;
if (ebx & ARCH_PERFMON_EXT_EQ)
if (ebx.split.eq)
pmu->config_mask |= ARCH_PERFMON_EVENTSEL_EQ;
if (sub_bitmaps & ARCH_PERFMON_NUM_COUNTER_LEAF_BIT) {
if (eax.split.cntr_subleaf) {
cpuid_count(ARCH_PERFMON_EXT_LEAF, ARCH_PERFMON_NUM_COUNTER_LEAF,
&eax, &ebx, &ecx, &edx);
pmu->cntr_mask64 = eax;
pmu->fixed_cntr_mask64 = ebx;
&cntr, &fixed_cntr, &ecx, &edx);
pmu->cntr_mask64 = cntr;
pmu->fixed_cntr_mask64 = fixed_cntr;
}
if (!intel_pmu_broken_perf_cap()) {
@ -4941,11 +4937,6 @@ static void intel_pmu_check_hybrid_pmus(struct x86_hybrid_pmu *pmu)
else
pmu->intel_ctrl &= ~(1ULL << GLOBAL_CTRL_EN_PERF_METRICS);
if (pmu->intel_cap.pebs_output_pt_available)
pmu->pmu.capabilities |= PERF_PMU_CAP_AUX_OUTPUT;
else
pmu->pmu.capabilities &= ~PERF_PMU_CAP_AUX_OUTPUT;
intel_pmu_check_event_constraints(pmu->event_constraints,
pmu->cntr_mask64,
pmu->fixed_cntr_mask64,
@ -5023,9 +5014,6 @@ static bool init_hybrid_pmu(int cpu)
pr_info("%s PMU driver: ", pmu->name);
if (pmu->intel_cap.pebs_output_pt_available)
pr_cont("PEBS-via-PT ");
pr_cont("\n");
x86_pmu_show_pmu_cap(&pmu->pmu);
@ -5048,8 +5036,11 @@ static void intel_pmu_cpu_starting(int cpu)
init_debug_store_on_cpu(cpu);
/*
* Deal with CPUs that don't clear their LBRs on power-up.
* Deal with CPUs that don't clear their LBRs on power-up, and that may
* even boot with LBRs enabled.
*/
if (!static_cpu_has(X86_FEATURE_ARCH_LBR) && x86_pmu.lbr_nr)
msr_clear_bit(MSR_IA32_DEBUGCTLMSR, DEBUGCTLMSR_LBR_BIT);
intel_pmu_lbr_reset();
cpuc->lbr_sel = NULL;
@ -6370,11 +6361,9 @@ static __always_inline int intel_pmu_init_hybrid(enum hybrid_pmu_type pmus)
pmu->intel_cap.capabilities = x86_pmu.intel_cap.capabilities;
if (pmu->pmu_type & hybrid_small_tiny) {
pmu->intel_cap.perf_metrics = 0;
pmu->intel_cap.pebs_output_pt_available = 1;
pmu->mid_ack = true;
} else if (pmu->pmu_type & hybrid_big) {
pmu->intel_cap.perf_metrics = 1;
pmu->intel_cap.pebs_output_pt_available = 0;
pmu->late_ack = true;
}
}

Some files were not shown because too many files have changed in this diff Show more