mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-09-18 22:14:16 +00:00
Merge 6.15-rc6 into usb-next
We need the USB fixes in here as well. Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
commit
ab6dc9a6c7
559 changed files with 6749 additions and 3012 deletions
|
@ -7,5 +7,5 @@ check-private-items = true
|
|||
disallowed-macros = [
|
||||
# The `clippy::dbg_macro` lint only works with `std::dbg!`, thus we simulate
|
||||
# it here, see: https://github.com/rust-lang/rust-clippy/issues/11303.
|
||||
{ path = "kernel::dbg", reason = "the `dbg!` macro is intended as a debugging tool" },
|
||||
{ path = "kernel::dbg", reason = "the `dbg!` macro is intended as a debugging tool", allow-invalid = true },
|
||||
]
|
||||
|
|
4
.mailmap
4
.mailmap
|
@ -447,6 +447,8 @@ Luca Ceresoli <luca.ceresoli@bootlin.com> <luca@lucaceresoli.net>
|
|||
Luca Weiss <luca@lucaweiss.eu> <luca@z3ntu.xyz>
|
||||
Lukasz Luba <lukasz.luba@arm.com> <l.luba@partner.samsung.com>
|
||||
Luo Jie <quic_luoj@quicinc.com> <luoj@codeaurora.org>
|
||||
Lance Yang <lance.yang@linux.dev> <ioworker0@gmail.com>
|
||||
Lance Yang <lance.yang@linux.dev> <mingzhe.yang@ly.com>
|
||||
Maciej W. Rozycki <macro@mips.com> <macro@imgtec.com>
|
||||
Maciej W. Rozycki <macro@orcam.me.uk> <macro@linux-mips.org>
|
||||
Maharaja Kennadyrajan <quic_mkenna@quicinc.com> <mkenna@codeaurora.org>
|
||||
|
@ -483,6 +485,7 @@ Matthias Fuchs <socketcan@esd.eu> <matthias.fuchs@esd.eu>
|
|||
Matthieu Baerts <matttbe@kernel.org> <matthieu.baerts@tessares.net>
|
||||
Matthieu CASTET <castet.matthieu@free.fr>
|
||||
Matti Vaittinen <mazziesaccount@gmail.com> <matti.vaittinen@fi.rohmeurope.com>
|
||||
Mattijs Korpershoek <mkorpershoek@kernel.org> <mkorpershoek@baylibre.com>
|
||||
Matt Ranostay <matt@ranostay.sg> <matt.ranostay@konsulko.com>
|
||||
Matt Ranostay <matt@ranostay.sg> <matt@ranostay.consulting>
|
||||
Matt Ranostay <matt@ranostay.sg> Matthew Ranostay <mranostay@embeddedalley.com>
|
||||
|
@ -749,6 +752,7 @@ Tvrtko Ursulin <tursulin@ursulin.net> <tvrtko@ursulin.net>
|
|||
Tycho Andersen <tycho@tycho.pizza> <tycho@tycho.ws>
|
||||
Tzung-Bi Shih <tzungbi@kernel.org> <tzungbi@google.com>
|
||||
Uwe Kleine-König <ukleinek@informatik.uni-freiburg.de>
|
||||
Uwe Kleine-König <u.kleine-koenig@baylibre.com> <ukleinek@baylibre.com>
|
||||
Uwe Kleine-König <u.kleine-koenig@pengutronix.de>
|
||||
Uwe Kleine-König <ukleinek@strlen.de>
|
||||
Uwe Kleine-König <ukl@pengutronix.de>
|
||||
|
|
|
@ -111,7 +111,7 @@ Description: RO. Package current voltage in millivolt.
|
|||
|
||||
What: /sys/bus/pci/drivers/xe/.../hwmon/hwmon<i>/temp2_input
|
||||
Date: March 2025
|
||||
KernelVersion: 6.14
|
||||
KernelVersion: 6.15
|
||||
Contact: intel-xe@lists.freedesktop.org
|
||||
Description: RO. Package temperature in millidegree Celsius.
|
||||
|
||||
|
@ -119,7 +119,7 @@ Description: RO. Package temperature in millidegree Celsius.
|
|||
|
||||
What: /sys/bus/pci/drivers/xe/.../hwmon/hwmon<i>/temp3_input
|
||||
Date: March 2025
|
||||
KernelVersion: 6.14
|
||||
KernelVersion: 6.15
|
||||
Contact: intel-xe@lists.freedesktop.org
|
||||
Description: RO. VRAM temperature in millidegree Celsius.
|
||||
|
||||
|
|
|
@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
|
|||
title: Mediatek's Keypad Controller
|
||||
|
||||
maintainers:
|
||||
- Mattijs Korpershoek <mkorpershoek@baylibre.com>
|
||||
- Mattijs Korpershoek <mkorpershoek@kernel.org>
|
||||
|
||||
allOf:
|
||||
- $ref: /schemas/input/matrix-keymap.yaml#
|
||||
|
|
|
@ -74,19 +74,17 @@ properties:
|
|||
- rev-rmii
|
||||
- moca
|
||||
|
||||
# RX and TX delays are added by the MAC when required
|
||||
# RX and TX delays are provided by the PCB. See below
|
||||
- rgmii
|
||||
|
||||
# RGMII with internal RX and TX delays provided by the PHY,
|
||||
# the MAC should not add the RX or TX delays in this case
|
||||
# RX and TX delays are not provided by the PCB. This is the most
|
||||
# frequent case. See below
|
||||
- rgmii-id
|
||||
|
||||
# RGMII with internal RX delay provided by the PHY, the MAC
|
||||
# should not add an RX delay in this case
|
||||
# TX delay is provided by the PCB. See below
|
||||
- rgmii-rxid
|
||||
|
||||
# RGMII with internal TX delay provided by the PHY, the MAC
|
||||
# should not add an TX delay in this case
|
||||
# RX delay is provided by the PCB. See below
|
||||
- rgmii-txid
|
||||
- rtbi
|
||||
- smii
|
||||
|
@ -286,4 +284,89 @@ allOf:
|
|||
|
||||
additionalProperties: true
|
||||
|
||||
# Informative
|
||||
# ===========
|
||||
#
|
||||
# 'phy-modes' & 'phy-connection-type' properties 'rgmii', 'rgmii-id',
|
||||
# 'rgmii-rxid', and 'rgmii-txid' are frequently used wrongly by
|
||||
# developers. This informative section clarifies their usage.
|
||||
#
|
||||
# The RGMII specification requires a 2ns delay between the data and
|
||||
# clock signals on the RGMII bus. How this delay is implemented is not
|
||||
# specified.
|
||||
#
|
||||
# One option is to make the clock traces on the PCB longer than the
|
||||
# data traces. A sufficiently difference in length can provide the 2ns
|
||||
# delay. If both the RX and TX delays are implemented in this manner,
|
||||
# 'rgmii' should be used, so indicating the PCB adds the delays.
|
||||
#
|
||||
# If the PCB does not add these delays via extra long traces,
|
||||
# 'rgmii-id' should be used. Here, 'id' refers to 'internal delay',
|
||||
# where either the MAC or PHY adds the delay.
|
||||
#
|
||||
# If only one of the two delays are implemented via extra long clock
|
||||
# lines, either 'rgmii-rxid' or 'rgmii-txid' should be used,
|
||||
# indicating the MAC or PHY should implement one of the delays
|
||||
# internally, while the PCB implements the other delay.
|
||||
#
|
||||
# Device Tree describes hardware, and in this case, it describes the
|
||||
# PCB between the MAC and the PHY, if the PCB implements delays or
|
||||
# not.
|
||||
#
|
||||
# In practice, very few PCBs make use of extra long clock lines. Hence
|
||||
# any RGMII phy mode other than 'rgmii-id' is probably wrong, and is
|
||||
# unlikely to be accepted during review without details provided in
|
||||
# the commit description and comments in the .dts file.
|
||||
#
|
||||
# When the PCB does not implement the delays, the MAC or PHY must. As
|
||||
# such, this is software configuration, and so not described in Device
|
||||
# Tree.
|
||||
#
|
||||
# The following describes how Linux implements the configuration of
|
||||
# the MAC and PHY to add these delays when the PCB does not. As stated
|
||||
# above, developers often get this wrong, and the aim of this section
|
||||
# is reduce the frequency of these errors by Linux developers. Other
|
||||
# users of the Device Tree may implement it differently, and still be
|
||||
# consistent with both the normative and informative description
|
||||
# above.
|
||||
#
|
||||
# By default in Linux, when using phylib/phylink, the MAC is expected
|
||||
# to read the 'phy-mode' from Device Tree, not implement any delays,
|
||||
# and pass the value to the PHY. The PHY will then implement delays as
|
||||
# specified by the 'phy-mode'. The PHY should always be reconfigured
|
||||
# to implement the needed delays, replacing any setting performed by
|
||||
# strapping or the bootloader, etc.
|
||||
#
|
||||
# Experience to date is that all PHYs which implement RGMII also
|
||||
# implement the ability to add or not add the needed delays. Hence
|
||||
# this default is expected to work in all cases. Ignoring this default
|
||||
# is likely to be questioned by Reviews, and require a strong argument
|
||||
# to be accepted.
|
||||
#
|
||||
# There are a small number of cases where the MAC has hard coded
|
||||
# delays which cannot be disabled. The 'phy-mode' only describes the
|
||||
# PCB. The inability to disable the delays in the MAC does not change
|
||||
# the meaning of 'phy-mode'. It does however mean that a 'phy-mode' of
|
||||
# 'rgmii' is now invalid, it cannot be supported, since both the PCB
|
||||
# and the MAC and PHY adding delays cannot result in a functional
|
||||
# link. Thus the MAC should report a fatal error for any modes which
|
||||
# cannot be supported. When the MAC implements the delay, it must
|
||||
# ensure that the PHY does not also implement the same delay. So it
|
||||
# must modify the phy-mode it passes to the PHY, removing the delay it
|
||||
# has added. Failure to remove the delay will result in a
|
||||
# non-functioning link.
|
||||
#
|
||||
# Sometimes there is a need to fine tune the delays. Often the MAC or
|
||||
# PHY can perform this fine tuning. In the MAC node, the Device Tree
|
||||
# properties 'rx-internal-delay-ps' and 'tx-internal-delay-ps' should
|
||||
# be used to indicate fine tuning performed by the MAC. The values
|
||||
# expected here are small. A value of 2000ps, i.e 2ns, and a phy-mode
|
||||
# of 'rgmii' will not be accepted by Reviewers.
|
||||
#
|
||||
# If the PHY is to perform fine tuning, the properties
|
||||
# 'rx-internal-delay-ps' and 'tx-internal-delay-ps' in the PHY node
|
||||
# should be used. When the PHY is implementing delays, e.g. 'rgmii-id'
|
||||
# these properties should have a value near to 2000ps. If the PCB is
|
||||
# implementing delays, e.g. 'rgmii', a small value can be used to fine
|
||||
# tune the delay added by the PCB.
|
||||
...
|
||||
|
|
|
@ -56,19 +56,18 @@ properties:
|
|||
enum:
|
||||
- snps,dw-apb-ssi
|
||||
- snps,dwc-ssi-1.01a
|
||||
- description: Microsemi Ocelot/Jaguar2 SoC SPI Controller
|
||||
items:
|
||||
- enum:
|
||||
- mscc,ocelot-spi
|
||||
- mscc,jaguar2-spi
|
||||
- const: snps,dw-apb-ssi
|
||||
- description: Microchip Sparx5 SoC SPI Controller
|
||||
const: microchip,sparx5-spi
|
||||
- description: Amazon Alpine SPI Controller
|
||||
const: amazon,alpine-dw-apb-ssi
|
||||
- description: Renesas RZ/N1 SPI Controller
|
||||
- description: Vendor controllers which use snps,dw-apb-ssi as fallback
|
||||
items:
|
||||
- const: renesas,rzn1-spi
|
||||
- enum:
|
||||
- mscc,ocelot-spi
|
||||
- mscc,jaguar2-spi
|
||||
- renesas,rzn1-spi
|
||||
- sophgo,sg2042-spi
|
||||
- thead,th1520-spi
|
||||
- const: snps,dw-apb-ssi
|
||||
- description: Intel Keem Bay SPI Controller
|
||||
const: intel,keembay-ssi
|
||||
|
@ -88,10 +87,6 @@ properties:
|
|||
- renesas,r9a06g032-spi # RZ/N1D
|
||||
- renesas,r9a06g033-spi # RZ/N1S
|
||||
- const: renesas,rzn1-spi # RZ/N1
|
||||
- description: T-HEAD TH1520 SoC SPI Controller
|
||||
items:
|
||||
- const: thead,th1520-spi
|
||||
- const: snps,dw-apb-ssi
|
||||
|
||||
reg:
|
||||
minItems: 1
|
||||
|
|
|
@ -89,8 +89,10 @@ definitions:
|
|||
doc: Group of short_detected states
|
||||
-
|
||||
name: phy-upstream-type
|
||||
enum-name:
|
||||
enum-name: phy-upstream
|
||||
header: linux/ethtool.h
|
||||
type: enum
|
||||
name-prefix: phy-upstream
|
||||
entries: [ mac, phy ]
|
||||
-
|
||||
name: tcp-data-split
|
||||
|
|
77
MAINTAINERS
77
MAINTAINERS
|
@ -2519,6 +2519,7 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/shawnguo/linux.git
|
|||
F: arch/arm/boot/dts/nxp/imx/
|
||||
F: arch/arm/boot/dts/nxp/mxs/
|
||||
F: arch/arm64/boot/dts/freescale/
|
||||
X: Documentation/devicetree/bindings/media/i2c/
|
||||
X: arch/arm64/boot/dts/freescale/fsl-*
|
||||
X: arch/arm64/boot/dts/freescale/qoriq-*
|
||||
X: drivers/media/i2c/
|
||||
|
@ -8726,6 +8727,7 @@ M: Chao Yu <chao@kernel.org>
|
|||
R: Yue Hu <zbestahu@gmail.com>
|
||||
R: Jeffle Xu <jefflexu@linux.alibaba.com>
|
||||
R: Sandeep Dhavale <dhavale@google.com>
|
||||
R: Hongbo Li <lihongbo22@huawei.com>
|
||||
L: linux-erofs@lists.ozlabs.org
|
||||
S: Maintained
|
||||
W: https://erofs.docs.kernel.org
|
||||
|
@ -11236,7 +11238,6 @@ S: Maintained
|
|||
F: drivers/i2c/busses/i2c-cht-wc.c
|
||||
|
||||
I2C/SMBUS ISMT DRIVER
|
||||
M: Seth Heasley <seth.heasley@intel.com>
|
||||
M: Neil Horman <nhorman@tuxdriver.com>
|
||||
L: linux-i2c@vger.kernel.org
|
||||
F: Documentation/i2c/busses/i2c-ismt.rst
|
||||
|
@ -15072,7 +15073,7 @@ F: Documentation/devicetree/bindings/media/mediatek-jpeg-*.yaml
|
|||
F: drivers/media/platform/mediatek/jpeg/
|
||||
|
||||
MEDIATEK KEYPAD DRIVER
|
||||
M: Mattijs Korpershoek <mkorpershoek@baylibre.com>
|
||||
M: Mattijs Korpershoek <mkorpershoek@kernel.org>
|
||||
S: Supported
|
||||
F: Documentation/devicetree/bindings/input/mediatek,mt6779-keypad.yaml
|
||||
F: drivers/input/keyboard/mt6779-keypad.c
|
||||
|
@ -15495,24 +15496,45 @@ F: Documentation/mm/
|
|||
F: include/linux/gfp.h
|
||||
F: include/linux/gfp_types.h
|
||||
F: include/linux/memfd.h
|
||||
F: include/linux/memory.h
|
||||
F: include/linux/memory_hotplug.h
|
||||
F: include/linux/memory-tiers.h
|
||||
F: include/linux/mempolicy.h
|
||||
F: include/linux/mempool.h
|
||||
F: include/linux/memremap.h
|
||||
F: include/linux/mm.h
|
||||
F: include/linux/mm_*.h
|
||||
F: include/linux/mmzone.h
|
||||
F: include/linux/mmu_notifier.h
|
||||
F: include/linux/pagewalk.h
|
||||
F: include/linux/rmap.h
|
||||
F: include/trace/events/ksm.h
|
||||
F: mm/
|
||||
F: tools/mm/
|
||||
F: tools/testing/selftests/mm/
|
||||
N: include/linux/page[-_]*
|
||||
|
||||
MEMORY MANAGEMENT - CORE
|
||||
M: Andrew Morton <akpm@linux-foundation.org>
|
||||
M: David Hildenbrand <david@redhat.com>
|
||||
R: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
|
||||
R: Liam R. Howlett <Liam.Howlett@oracle.com>
|
||||
R: Vlastimil Babka <vbabka@suse.cz>
|
||||
R: Mike Rapoport <rppt@kernel.org>
|
||||
R: Suren Baghdasaryan <surenb@google.com>
|
||||
R: Michal Hocko <mhocko@suse.com>
|
||||
L: linux-mm@kvack.org
|
||||
S: Maintained
|
||||
W: http://www.linux-mm.org
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
|
||||
F: include/linux/memory.h
|
||||
F: include/linux/mm.h
|
||||
F: include/linux/mm_*.h
|
||||
F: include/linux/mmdebug.h
|
||||
F: include/linux/pagewalk.h
|
||||
F: mm/Kconfig
|
||||
F: mm/debug.c
|
||||
F: mm/init-mm.c
|
||||
F: mm/memory.c
|
||||
F: mm/pagewalk.c
|
||||
F: mm/util.c
|
||||
|
||||
MEMORY MANAGEMENT - EXECMEM
|
||||
M: Andrew Morton <akpm@linux-foundation.org>
|
||||
M: Mike Rapoport <rppt@kernel.org>
|
||||
|
@ -15546,6 +15568,19 @@ F: mm/page_alloc.c
|
|||
F: include/linux/gfp.h
|
||||
F: include/linux/compaction.h
|
||||
|
||||
MEMORY MANAGEMENT - RMAP (REVERSE MAPPING)
|
||||
M: Andrew Morton <akpm@linux-foundation.org>
|
||||
M: David Hildenbrand <david@redhat.com>
|
||||
M: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
|
||||
R: Rik van Riel <riel@surriel.com>
|
||||
R: Liam R. Howlett <Liam.Howlett@oracle.com>
|
||||
R: Vlastimil Babka <vbabka@suse.cz>
|
||||
R: Harry Yoo <harry.yoo@oracle.com>
|
||||
L: linux-mm@kvack.org
|
||||
S: Maintained
|
||||
F: include/linux/rmap.h
|
||||
F: mm/rmap.c
|
||||
|
||||
MEMORY MANAGEMENT - SECRETMEM
|
||||
M: Andrew Morton <akpm@linux-foundation.org>
|
||||
M: Mike Rapoport <rppt@kernel.org>
|
||||
|
@ -15554,6 +15589,30 @@ S: Maintained
|
|||
F: include/linux/secretmem.h
|
||||
F: mm/secretmem.c
|
||||
|
||||
MEMORY MANAGEMENT - THP (TRANSPARENT HUGE PAGE)
|
||||
M: Andrew Morton <akpm@linux-foundation.org>
|
||||
M: David Hildenbrand <david@redhat.com>
|
||||
R: Zi Yan <ziy@nvidia.com>
|
||||
R: Baolin Wang <baolin.wang@linux.alibaba.com>
|
||||
R: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
|
||||
R: Liam R. Howlett <Liam.Howlett@oracle.com>
|
||||
R: Nico Pache <npache@redhat.com>
|
||||
R: Ryan Roberts <ryan.roberts@arm.com>
|
||||
R: Dev Jain <dev.jain@arm.com>
|
||||
L: linux-mm@kvack.org
|
||||
S: Maintained
|
||||
W: http://www.linux-mm.org
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
|
||||
F: Documentation/admin-guide/mm/transhuge.rst
|
||||
F: include/linux/huge_mm.h
|
||||
F: include/linux/khugepaged.h
|
||||
F: include/trace/events/huge_memory.h
|
||||
F: mm/huge_memory.c
|
||||
F: mm/khugepaged.c
|
||||
F: tools/testing/selftests/mm/khugepaged.c
|
||||
F: tools/testing/selftests/mm/split_huge_page_test.c
|
||||
F: tools/testing/selftests/mm/transhuge-stress.c
|
||||
|
||||
MEMORY MANAGEMENT - USERFAULTFD
|
||||
M: Andrew Morton <akpm@linux-foundation.org>
|
||||
R: Peter Xu <peterx@redhat.com>
|
||||
|
@ -22739,9 +22798,15 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/broonie/sound.git
|
|||
F: Documentation/devicetree/bindings/sound/
|
||||
F: Documentation/sound/soc/
|
||||
F: include/dt-bindings/sound/
|
||||
F: include/sound/cs-amp-lib.h
|
||||
F: include/sound/cs35l*
|
||||
F: include/sound/cs4271.h
|
||||
F: include/sound/cs42l*
|
||||
F: include/sound/madera-pdata.h
|
||||
F: include/sound/soc*
|
||||
F: include/sound/sof.h
|
||||
F: include/sound/sof/
|
||||
F: include/sound/wm*.h
|
||||
F: include/trace/events/sof*.h
|
||||
F: include/uapi/sound/asoc.h
|
||||
F: sound/soc/
|
||||
|
|
9
Makefile
9
Makefile
|
@ -2,7 +2,7 @@
|
|||
VERSION = 6
|
||||
PATCHLEVEL = 15
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc4
|
||||
EXTRAVERSION = -rc6
|
||||
NAME = Baby Opossum Posse
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
@ -1052,13 +1052,6 @@ NOSTDINC_FLAGS += -nostdinc
|
|||
# perform bounds checking.
|
||||
KBUILD_CFLAGS += $(call cc-option, -fstrict-flex-arrays=3)
|
||||
|
||||
#Currently, disable -Wstringop-overflow for GCC 11, globally.
|
||||
KBUILD_CFLAGS-$(CONFIG_CC_NO_STRINGOP_OVERFLOW) += $(call cc-disable-warning, stringop-overflow)
|
||||
KBUILD_CFLAGS-$(CONFIG_CC_STRINGOP_OVERFLOW) += $(call cc-option, -Wstringop-overflow)
|
||||
|
||||
#Currently, disable -Wunterminated-string-initialization as broken
|
||||
KBUILD_CFLAGS += $(call cc-disable-warning, unterminated-string-initialization)
|
||||
|
||||
# disable invalid "can't wrap" optimizations for signed / pointers
|
||||
KBUILD_CFLAGS += -fno-strict-overflow
|
||||
|
||||
|
|
|
@ -40,6 +40,9 @@
|
|||
reg = <1>;
|
||||
interrupt-parent = <&gpio4>;
|
||||
interrupts = <16 IRQ_TYPE_LEVEL_LOW>;
|
||||
micrel,led-mode = <1>;
|
||||
clocks = <&clks IMX6UL_CLK_ENET_REF>;
|
||||
clock-names = "rmii-ref";
|
||||
status = "okay";
|
||||
};
|
||||
};
|
||||
|
|
|
@ -44,7 +44,7 @@
|
|||
next-level-cache = <&l2_0>;
|
||||
clocks = <&scmi_dvfs 0>;
|
||||
|
||||
l2_0: l2-cache-0 {
|
||||
l2_0: l2-cache {
|
||||
compatible = "cache";
|
||||
cache-level = <2>;
|
||||
/* 8 ways set associative */
|
||||
|
@ -53,13 +53,6 @@
|
|||
cache-sets = <2048>;
|
||||
cache-unified;
|
||||
next-level-cache = <&l3_0>;
|
||||
|
||||
l3_0: l3-cache {
|
||||
compatible = "cache";
|
||||
cache-level = <3>;
|
||||
cache-size = <0x100000>;
|
||||
cache-unified;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
|
@ -78,7 +71,7 @@
|
|||
next-level-cache = <&l2_1>;
|
||||
clocks = <&scmi_dvfs 0>;
|
||||
|
||||
l2_1: l2-cache-1 {
|
||||
l2_1: l2-cache {
|
||||
compatible = "cache";
|
||||
cache-level = <2>;
|
||||
/* 8 ways set associative */
|
||||
|
@ -105,7 +98,7 @@
|
|||
next-level-cache = <&l2_2>;
|
||||
clocks = <&scmi_dvfs 1>;
|
||||
|
||||
l2_2: l2-cache-2 {
|
||||
l2_2: l2-cache {
|
||||
compatible = "cache";
|
||||
cache-level = <2>;
|
||||
/* 8 ways set associative */
|
||||
|
@ -132,7 +125,7 @@
|
|||
next-level-cache = <&l2_3>;
|
||||
clocks = <&scmi_dvfs 1>;
|
||||
|
||||
l2_3: l2-cache-3 {
|
||||
l2_3: l2-cache {
|
||||
compatible = "cache";
|
||||
cache-level = <2>;
|
||||
/* 8 ways set associative */
|
||||
|
@ -143,6 +136,13 @@
|
|||
next-level-cache = <&l3_0>;
|
||||
};
|
||||
};
|
||||
|
||||
l3_0: l3-cache {
|
||||
compatible = "cache";
|
||||
cache-level = <3>;
|
||||
cache-size = <0x100000>;
|
||||
cache-unified;
|
||||
};
|
||||
};
|
||||
|
||||
firmware {
|
||||
|
|
|
@ -144,6 +144,19 @@
|
|||
startup-delay-us = <20000>;
|
||||
};
|
||||
|
||||
reg_usdhc2_vqmmc: regulator-usdhc2-vqmmc {
|
||||
compatible = "regulator-gpio";
|
||||
pinctrl-names = "default";
|
||||
pinctrl-0 = <&pinctrl_usdhc2_vsel>;
|
||||
gpios = <&gpio1 4 GPIO_ACTIVE_HIGH>;
|
||||
regulator-max-microvolt = <3300000>;
|
||||
regulator-min-microvolt = <1800000>;
|
||||
states = <1800000 0x1>,
|
||||
<3300000 0x0>;
|
||||
regulator-name = "PMIC_USDHC_VSELECT";
|
||||
vin-supply = <®_nvcc_sd>;
|
||||
};
|
||||
|
||||
reserved-memory {
|
||||
#address-cells = <2>;
|
||||
#size-cells = <2>;
|
||||
|
@ -269,7 +282,7 @@
|
|||
"SODIMM_19",
|
||||
"",
|
||||
"",
|
||||
"",
|
||||
"PMIC_USDHC_VSELECT",
|
||||
"",
|
||||
"",
|
||||
"",
|
||||
|
@ -785,6 +798,7 @@
|
|||
pinctrl-2 = <&pinctrl_usdhc2_200mhz>, <&pinctrl_usdhc2_cd>;
|
||||
pinctrl-3 = <&pinctrl_usdhc2_sleep>, <&pinctrl_usdhc2_cd_sleep>;
|
||||
vmmc-supply = <®_usdhc2_vmmc>;
|
||||
vqmmc-supply = <®_usdhc2_vqmmc>;
|
||||
};
|
||||
|
||||
&wdog1 {
|
||||
|
@ -1206,13 +1220,17 @@
|
|||
<MX8MM_IOMUXC_NAND_CLE_GPIO3_IO5 0x6>; /* SODIMM 76 */
|
||||
};
|
||||
|
||||
pinctrl_usdhc2_vsel: usdhc2vselgrp {
|
||||
fsl,pins =
|
||||
<MX8MM_IOMUXC_GPIO1_IO04_GPIO1_IO4 0x10>; /* PMIC_USDHC_VSELECT */
|
||||
};
|
||||
|
||||
/*
|
||||
* Note: Due to ERR050080 we use discrete external on-module resistors pulling-up to the
|
||||
* on-module +V3.3_1.8_SD (LDO5) rail and explicitly disable the internal pull-ups here.
|
||||
*/
|
||||
pinctrl_usdhc2: usdhc2grp {
|
||||
fsl,pins =
|
||||
<MX8MM_IOMUXC_GPIO1_IO04_USDHC2_VSELECT 0x10>,
|
||||
<MX8MM_IOMUXC_SD2_CLK_USDHC2_CLK 0x90>, /* SODIMM 78 */
|
||||
<MX8MM_IOMUXC_SD2_CMD_USDHC2_CMD 0x90>, /* SODIMM 74 */
|
||||
<MX8MM_IOMUXC_SD2_DATA0_USDHC2_DATA0 0x90>, /* SODIMM 80 */
|
||||
|
@ -1223,7 +1241,6 @@
|
|||
|
||||
pinctrl_usdhc2_100mhz: usdhc2-100mhzgrp {
|
||||
fsl,pins =
|
||||
<MX8MM_IOMUXC_GPIO1_IO04_USDHC2_VSELECT 0x10>,
|
||||
<MX8MM_IOMUXC_SD2_CLK_USDHC2_CLK 0x94>,
|
||||
<MX8MM_IOMUXC_SD2_CMD_USDHC2_CMD 0x94>,
|
||||
<MX8MM_IOMUXC_SD2_DATA0_USDHC2_DATA0 0x94>,
|
||||
|
@ -1234,7 +1251,6 @@
|
|||
|
||||
pinctrl_usdhc2_200mhz: usdhc2-200mhzgrp {
|
||||
fsl,pins =
|
||||
<MX8MM_IOMUXC_GPIO1_IO04_USDHC2_VSELECT 0x10>,
|
||||
<MX8MM_IOMUXC_SD2_CLK_USDHC2_CLK 0x96>,
|
||||
<MX8MM_IOMUXC_SD2_CMD_USDHC2_CMD 0x96>,
|
||||
<MX8MM_IOMUXC_SD2_DATA0_USDHC2_DATA0 0x96>,
|
||||
|
@ -1246,7 +1262,6 @@
|
|||
/* Avoid backfeeding with removed card power */
|
||||
pinctrl_usdhc2_sleep: usdhc2slpgrp {
|
||||
fsl,pins =
|
||||
<MX8MM_IOMUXC_GPIO1_IO04_USDHC2_VSELECT 0x0>,
|
||||
<MX8MM_IOMUXC_SD2_CLK_USDHC2_CLK 0x0>,
|
||||
<MX8MM_IOMUXC_SD2_CMD_USDHC2_CMD 0x0>,
|
||||
<MX8MM_IOMUXC_SD2_DATA0_USDHC2_DATA0 0x0>,
|
||||
|
|
|
@ -24,6 +24,20 @@
|
|||
fsl,operating-mode = "nominal";
|
||||
};
|
||||
|
||||
&gpu2d {
|
||||
assigned-clocks = <&clk IMX8MP_CLK_GPU2D_CORE>;
|
||||
assigned-clock-parents = <&clk IMX8MP_SYS_PLL1_800M>;
|
||||
assigned-clock-rates = <800000000>;
|
||||
};
|
||||
|
||||
&gpu3d {
|
||||
assigned-clocks = <&clk IMX8MP_CLK_GPU3D_CORE>,
|
||||
<&clk IMX8MP_CLK_GPU3D_SHADER_CORE>;
|
||||
assigned-clock-parents = <&clk IMX8MP_SYS_PLL1_800M>,
|
||||
<&clk IMX8MP_SYS_PLL1_800M>;
|
||||
assigned-clock-rates = <800000000>, <800000000>;
|
||||
};
|
||||
|
||||
&pgc_hdmimix {
|
||||
assigned-clocks = <&clk IMX8MP_CLK_HDMI_AXI>,
|
||||
<&clk IMX8MP_CLK_HDMI_APB>;
|
||||
|
@ -46,6 +60,18 @@
|
|||
assigned-clock-rates = <600000000>, <300000000>;
|
||||
};
|
||||
|
||||
&pgc_mlmix {
|
||||
assigned-clocks = <&clk IMX8MP_CLK_ML_CORE>,
|
||||
<&clk IMX8MP_CLK_ML_AXI>,
|
||||
<&clk IMX8MP_CLK_ML_AHB>;
|
||||
assigned-clock-parents = <&clk IMX8MP_SYS_PLL1_800M>,
|
||||
<&clk IMX8MP_SYS_PLL1_800M>,
|
||||
<&clk IMX8MP_SYS_PLL1_800M>;
|
||||
assigned-clock-rates = <800000000>,
|
||||
<800000000>,
|
||||
<300000000>;
|
||||
};
|
||||
|
||||
&media_blk_ctrl {
|
||||
assigned-clocks = <&clk IMX8MP_CLK_MEDIA_AXI>,
|
||||
<&clk IMX8MP_CLK_MEDIA_APB>,
|
||||
|
|
|
@ -1626,7 +1626,7 @@
|
|||
reg = <0 0x4c300000 0 0x10000>,
|
||||
<0 0x60100000 0 0xfe00000>,
|
||||
<0 0x4c360000 0 0x10000>,
|
||||
<0 0x4c340000 0 0x2000>;
|
||||
<0 0x4c340000 0 0x4000>;
|
||||
reg-names = "dbi", "config", "atu", "app";
|
||||
ranges = <0x81000000 0x0 0x00000000 0x0 0x6ff00000 0 0x00100000>,
|
||||
<0x82000000 0x0 0x10000000 0x9 0x10000000 0 0x10000000>;
|
||||
|
@ -1673,7 +1673,7 @@
|
|||
reg = <0 0x4c300000 0 0x10000>,
|
||||
<0 0x4c360000 0 0x1000>,
|
||||
<0 0x4c320000 0 0x1000>,
|
||||
<0 0x4c340000 0 0x2000>,
|
||||
<0 0x4c340000 0 0x4000>,
|
||||
<0 0x4c370000 0 0x10000>,
|
||||
<0x9 0 1 0>;
|
||||
reg-names = "dbi","atu", "dbi2", "app", "dma", "addr_space";
|
||||
|
@ -1700,7 +1700,7 @@
|
|||
reg = <0 0x4c380000 0 0x10000>,
|
||||
<8 0x80100000 0 0xfe00000>,
|
||||
<0 0x4c3e0000 0 0x10000>,
|
||||
<0 0x4c3c0000 0 0x2000>;
|
||||
<0 0x4c3c0000 0 0x4000>;
|
||||
reg-names = "dbi", "config", "atu", "app";
|
||||
ranges = <0x81000000 0 0x00000000 0x8 0x8ff00000 0 0x00100000>,
|
||||
<0x82000000 0 0x10000000 0xa 0x10000000 0 0x10000000>;
|
||||
|
@ -1749,7 +1749,7 @@
|
|||
reg = <0 0x4c380000 0 0x10000>,
|
||||
<0 0x4c3e0000 0 0x1000>,
|
||||
<0 0x4c3a0000 0 0x1000>,
|
||||
<0 0x4c3c0000 0 0x2000>,
|
||||
<0 0x4c3c0000 0 0x4000>,
|
||||
<0 0x4c3f0000 0 0x10000>,
|
||||
<0xa 0 1 0>;
|
||||
reg-names = "dbi", "atu", "dbi2", "app", "dma", "addr_space";
|
||||
|
|
|
@ -116,11 +116,11 @@
|
|||
};
|
||||
|
||||
intc: interrupt-controller@4ac10000 {
|
||||
compatible = "arm,cortex-a7-gic";
|
||||
compatible = "arm,gic-400";
|
||||
reg = <0x4ac10000 0x0 0x1000>,
|
||||
<0x4ac20000 0x0 0x2000>,
|
||||
<0x4ac40000 0x0 0x2000>,
|
||||
<0x4ac60000 0x0 0x2000>;
|
||||
<0x4ac20000 0x0 0x20000>,
|
||||
<0x4ac40000 0x0 0x20000>,
|
||||
<0x4ac60000 0x0 0x20000>;
|
||||
#interrupt-cells = <3>;
|
||||
interrupt-controller;
|
||||
};
|
||||
|
|
|
@ -1201,13 +1201,12 @@
|
|||
};
|
||||
|
||||
intc: interrupt-controller@4ac10000 {
|
||||
compatible = "arm,cortex-a7-gic";
|
||||
compatible = "arm,gic-400";
|
||||
reg = <0x4ac10000 0x1000>,
|
||||
<0x4ac20000 0x2000>,
|
||||
<0x4ac40000 0x2000>,
|
||||
<0x4ac60000 0x2000>;
|
||||
<0x4ac20000 0x20000>,
|
||||
<0x4ac40000 0x20000>,
|
||||
<0x4ac60000 0x20000>;
|
||||
#interrupt-cells = <3>;
|
||||
#address-cells = <1>;
|
||||
interrupt-controller;
|
||||
};
|
||||
};
|
||||
|
|
|
@ -115,14 +115,13 @@
|
|||
};
|
||||
|
||||
intc: interrupt-controller@4ac00000 {
|
||||
compatible = "arm,cortex-a7-gic";
|
||||
compatible = "arm,gic-400";
|
||||
#interrupt-cells = <3>;
|
||||
#address-cells = <1>;
|
||||
interrupt-controller;
|
||||
reg = <0x0 0x4ac10000 0x0 0x1000>,
|
||||
<0x0 0x4ac20000 0x0 0x2000>,
|
||||
<0x0 0x4ac40000 0x0 0x2000>,
|
||||
<0x0 0x4ac60000 0x0 0x2000>;
|
||||
<0x0 0x4ac20000 0x0 0x20000>,
|
||||
<0x0 0x4ac40000 0x0 0x20000>,
|
||||
<0x0 0x4ac60000 0x0 0x20000>;
|
||||
};
|
||||
|
||||
psci {
|
||||
|
|
|
@ -52,7 +52,7 @@
|
|||
mrs x0, id_aa64mmfr1_el1
|
||||
ubfx x0, x0, #ID_AA64MMFR1_EL1_HCX_SHIFT, #4
|
||||
cbz x0, .Lskip_hcrx_\@
|
||||
mov_q x0, HCRX_HOST_FLAGS
|
||||
mov_q x0, (HCRX_EL2_MSCEn | HCRX_EL2_TCR2En | HCRX_EL2_EnFPM)
|
||||
|
||||
/* Enable GCS if supported */
|
||||
mrs_s x1, SYS_ID_AA64PFR1_EL1
|
||||
|
|
|
@ -100,9 +100,8 @@
|
|||
HCR_FMO | HCR_IMO | HCR_PTW | HCR_TID3 | HCR_TID1)
|
||||
#define HCR_HOST_NVHE_FLAGS (HCR_RW | HCR_API | HCR_APK | HCR_ATA)
|
||||
#define HCR_HOST_NVHE_PROTECTED_FLAGS (HCR_HOST_NVHE_FLAGS | HCR_TSC)
|
||||
#define HCR_HOST_VHE_FLAGS (HCR_RW | HCR_TGE | HCR_E2H)
|
||||
#define HCR_HOST_VHE_FLAGS (HCR_RW | HCR_TGE | HCR_E2H | HCR_AMO | HCR_IMO | HCR_FMO)
|
||||
|
||||
#define HCRX_HOST_FLAGS (HCRX_EL2_MSCEn | HCRX_EL2_TCR2En | HCRX_EL2_EnFPM)
|
||||
#define MPAMHCR_HOST_FLAGS 0
|
||||
|
||||
/* TCR_EL2 Registers bits */
|
||||
|
|
|
@ -99,6 +99,19 @@ static __always_inline u64 __arch_get_hw_counter(s32 clock_mode,
|
|||
return res;
|
||||
}
|
||||
|
||||
#if IS_ENABLED(CONFIG_CC_IS_GCC) && IS_ENABLED(CONFIG_PAGE_SIZE_64KB)
|
||||
static __always_inline const struct vdso_time_data *__arch_get_vdso_u_time_data(void)
|
||||
{
|
||||
const struct vdso_time_data *ret = &vdso_u_time_data;
|
||||
|
||||
/* Work around invalid absolute relocations */
|
||||
OPTIMIZER_HIDE_VAR(ret);
|
||||
|
||||
return ret;
|
||||
}
|
||||
#define __arch_get_vdso_u_time_data __arch_get_vdso_u_time_data
|
||||
#endif /* IS_ENABLED(CONFIG_CC_IS_GCC) && IS_ENABLED(CONFIG_PAGE_SIZE_64KB) */
|
||||
|
||||
#endif /* !__ASSEMBLY__ */
|
||||
|
||||
#endif /* __ASM_VDSO_GETTIMEOFDAY_H */
|
||||
|
|
|
@ -114,7 +114,14 @@ static struct arm64_cpu_capabilities const __ro_after_init *cpucap_ptrs[ARM64_NC
|
|||
|
||||
DECLARE_BITMAP(boot_cpucaps, ARM64_NCAPS);
|
||||
|
||||
bool arm64_use_ng_mappings = false;
|
||||
/*
|
||||
* arm64_use_ng_mappings must be placed in the .data section, otherwise it
|
||||
* ends up in the .bss section where it is initialized in early_map_kernel()
|
||||
* after the MMU (with the idmap) was enabled. create_init_idmap() - which
|
||||
* runs before early_map_kernel() and reads the variable via PTE_MAYBE_NG -
|
||||
* may end up generating an incorrect idmap page table attributes.
|
||||
*/
|
||||
bool arm64_use_ng_mappings __read_mostly = false;
|
||||
EXPORT_SYMBOL(arm64_use_ng_mappings);
|
||||
|
||||
DEFINE_PER_CPU_READ_MOSTLY(const char *, this_cpu_vector) = vectors;
|
||||
|
|
|
@ -879,10 +879,12 @@ static u8 spectre_bhb_loop_affected(void)
|
|||
static const struct midr_range spectre_bhb_k132_list[] = {
|
||||
MIDR_ALL_VERSIONS(MIDR_CORTEX_X3),
|
||||
MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V2),
|
||||
{},
|
||||
};
|
||||
static const struct midr_range spectre_bhb_k38_list[] = {
|
||||
MIDR_ALL_VERSIONS(MIDR_CORTEX_A715),
|
||||
MIDR_ALL_VERSIONS(MIDR_CORTEX_A720),
|
||||
{},
|
||||
};
|
||||
static const struct midr_range spectre_bhb_k32_list[] = {
|
||||
MIDR_ALL_VERSIONS(MIDR_CORTEX_A78),
|
||||
|
|
|
@ -235,6 +235,8 @@ static inline void __deactivate_traps_mpam(void)
|
|||
|
||||
static inline void __activate_traps_common(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvm_cpu_context *hctxt = host_data_ptr(host_ctxt);
|
||||
|
||||
/* Trap on AArch32 cp15 c15 (impdef sysregs) accesses (EL1 or EL0) */
|
||||
write_sysreg(1 << 15, hstr_el2);
|
||||
|
||||
|
@ -245,11 +247,8 @@ static inline void __activate_traps_common(struct kvm_vcpu *vcpu)
|
|||
* EL1 instead of being trapped to EL2.
|
||||
*/
|
||||
if (system_supports_pmuv3()) {
|
||||
struct kvm_cpu_context *hctxt;
|
||||
|
||||
write_sysreg(0, pmselr_el0);
|
||||
|
||||
hctxt = host_data_ptr(host_ctxt);
|
||||
ctxt_sys_reg(hctxt, PMUSERENR_EL0) = read_sysreg(pmuserenr_el0);
|
||||
write_sysreg(ARMV8_PMU_USERENR_MASK, pmuserenr_el0);
|
||||
vcpu_set_flag(vcpu, PMUSERENR_ON_CPU);
|
||||
|
@ -269,6 +268,7 @@ static inline void __activate_traps_common(struct kvm_vcpu *vcpu)
|
|||
hcrx &= ~clr;
|
||||
}
|
||||
|
||||
ctxt_sys_reg(hctxt, HCRX_EL2) = read_sysreg_s(SYS_HCRX_EL2);
|
||||
write_sysreg_s(hcrx, SYS_HCRX_EL2);
|
||||
}
|
||||
|
||||
|
@ -278,19 +278,18 @@ static inline void __activate_traps_common(struct kvm_vcpu *vcpu)
|
|||
|
||||
static inline void __deactivate_traps_common(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvm_cpu_context *hctxt = host_data_ptr(host_ctxt);
|
||||
|
||||
write_sysreg(*host_data_ptr(host_debug_state.mdcr_el2), mdcr_el2);
|
||||
|
||||
write_sysreg(0, hstr_el2);
|
||||
if (system_supports_pmuv3()) {
|
||||
struct kvm_cpu_context *hctxt;
|
||||
|
||||
hctxt = host_data_ptr(host_ctxt);
|
||||
write_sysreg(ctxt_sys_reg(hctxt, PMUSERENR_EL0), pmuserenr_el0);
|
||||
vcpu_clear_flag(vcpu, PMUSERENR_ON_CPU);
|
||||
}
|
||||
|
||||
if (cpus_have_final_cap(ARM64_HAS_HCX))
|
||||
write_sysreg_s(HCRX_HOST_FLAGS, SYS_HCRX_EL2);
|
||||
write_sysreg_s(ctxt_sys_reg(hctxt, HCRX_EL2), SYS_HCRX_EL2);
|
||||
|
||||
__deactivate_traps_hfgxtr(vcpu);
|
||||
__deactivate_traps_mpam();
|
||||
|
|
|
@ -503,7 +503,7 @@ int host_stage2_set_owner_locked(phys_addr_t addr, u64 size, u8 owner_id)
|
|||
{
|
||||
int ret;
|
||||
|
||||
if (!addr_is_memory(addr))
|
||||
if (!range_is_memory(addr, addr + size))
|
||||
return -EPERM;
|
||||
|
||||
ret = host_stage2_try(kvm_pgtable_stage2_set_owner, &host_mmu.pgt,
|
||||
|
|
|
@ -429,23 +429,27 @@ u64 __vgic_v3_get_gic_config(void)
|
|||
/*
|
||||
* To check whether we have a MMIO-based (GICv2 compatible)
|
||||
* CPU interface, we need to disable the system register
|
||||
* view. To do that safely, we have to prevent any interrupt
|
||||
* from firing (which would be deadly).
|
||||
* view.
|
||||
*
|
||||
* Note that this only makes sense on VHE, as interrupts are
|
||||
* already masked for nVHE as part of the exception entry to
|
||||
* EL2.
|
||||
*/
|
||||
if (has_vhe())
|
||||
flags = local_daif_save();
|
||||
|
||||
/*
|
||||
* Table 11-2 "Permitted ICC_SRE_ELx.SRE settings" indicates
|
||||
* that to be able to set ICC_SRE_EL1.SRE to 0, all the
|
||||
* interrupt overrides must be set. You've got to love this.
|
||||
*
|
||||
* As we always run VHE with HCR_xMO set, no extra xMO
|
||||
* manipulation is required in that case.
|
||||
*
|
||||
* To safely disable SRE, we have to prevent any interrupt
|
||||
* from firing (which would be deadly). This only makes sense
|
||||
* on VHE, as interrupts are already masked for nVHE as part
|
||||
* of the exception entry to EL2.
|
||||
*/
|
||||
sysreg_clear_set(hcr_el2, 0, HCR_AMO | HCR_FMO | HCR_IMO);
|
||||
isb();
|
||||
if (has_vhe()) {
|
||||
flags = local_daif_save();
|
||||
} else {
|
||||
sysreg_clear_set(hcr_el2, 0, HCR_AMO | HCR_FMO | HCR_IMO);
|
||||
isb();
|
||||
}
|
||||
|
||||
write_gicreg(0, ICC_SRE_EL1);
|
||||
isb();
|
||||
|
||||
|
@ -453,11 +457,13 @@ u64 __vgic_v3_get_gic_config(void)
|
|||
|
||||
write_gicreg(sre, ICC_SRE_EL1);
|
||||
isb();
|
||||
sysreg_clear_set(hcr_el2, HCR_AMO | HCR_FMO | HCR_IMO, 0);
|
||||
isb();
|
||||
|
||||
if (has_vhe())
|
||||
if (has_vhe()) {
|
||||
local_daif_restore(flags);
|
||||
} else {
|
||||
sysreg_clear_set(hcr_el2, HCR_AMO | HCR_FMO | HCR_IMO, 0);
|
||||
isb();
|
||||
}
|
||||
|
||||
val = (val & ICC_SRE_EL1_SRE) ? 0 : (1ULL << 63);
|
||||
val |= read_gicreg(ICH_VTR_EL2);
|
||||
|
|
|
@ -1501,6 +1501,11 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
|
|||
return -EFAULT;
|
||||
}
|
||||
|
||||
if (!is_protected_kvm_enabled())
|
||||
memcache = &vcpu->arch.mmu_page_cache;
|
||||
else
|
||||
memcache = &vcpu->arch.pkvm_memcache;
|
||||
|
||||
/*
|
||||
* Permission faults just need to update the existing leaf entry,
|
||||
* and so normally don't require allocations from the memcache. The
|
||||
|
@ -1510,13 +1515,11 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
|
|||
if (!fault_is_perm || (logging_active && write_fault)) {
|
||||
int min_pages = kvm_mmu_cache_min_pages(vcpu->arch.hw_mmu);
|
||||
|
||||
if (!is_protected_kvm_enabled()) {
|
||||
memcache = &vcpu->arch.mmu_page_cache;
|
||||
if (!is_protected_kvm_enabled())
|
||||
ret = kvm_mmu_topup_memory_cache(memcache, min_pages);
|
||||
} else {
|
||||
memcache = &vcpu->arch.pkvm_memcache;
|
||||
else
|
||||
ret = topup_hyp_memcache(memcache, min_pages);
|
||||
}
|
||||
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -1945,6 +1945,12 @@ static int set_id_aa64pfr0_el1(struct kvm_vcpu *vcpu,
|
|||
if ((hw_val & mpam_mask) == (user_val & mpam_mask))
|
||||
user_val &= ~ID_AA64PFR0_EL1_MPAM_MASK;
|
||||
|
||||
/* Fail the guest's request to disable the AA64 ISA at EL{0,1,2} */
|
||||
if (!FIELD_GET(ID_AA64PFR0_EL1_EL0, user_val) ||
|
||||
!FIELD_GET(ID_AA64PFR0_EL1_EL1, user_val) ||
|
||||
(vcpu_has_nv(vcpu) && !FIELD_GET(ID_AA64PFR0_EL1_EL2, user_val)))
|
||||
return -EINVAL;
|
||||
|
||||
return set_id_reg(vcpu, rd, user_val);
|
||||
}
|
||||
|
||||
|
|
|
@ -6,11 +6,10 @@
|
|||
#include <linux/linkage.h>
|
||||
|
||||
extern void (*cpu_wait)(void);
|
||||
extern void r4k_wait(void);
|
||||
extern asmlinkage void __r4k_wait(void);
|
||||
extern asmlinkage void r4k_wait(void);
|
||||
extern void r4k_wait_irqoff(void);
|
||||
|
||||
static inline int using_rollback_handler(void)
|
||||
static inline int using_skipover_handler(void)
|
||||
{
|
||||
return cpu_wait == r4k_wait;
|
||||
}
|
||||
|
|
|
@ -65,7 +65,8 @@ static inline void instruction_pointer_set(struct pt_regs *regs,
|
|||
|
||||
/* Query offset/name of register from its name/offset */
|
||||
extern int regs_query_register_offset(const char *name);
|
||||
#define MAX_REG_OFFSET (offsetof(struct pt_regs, __last))
|
||||
#define MAX_REG_OFFSET \
|
||||
(offsetof(struct pt_regs, __last) - sizeof(unsigned long))
|
||||
|
||||
/**
|
||||
* regs_get_register() - get register value from its offset
|
||||
|
|
|
@ -104,48 +104,59 @@ handle_vcei:
|
|||
|
||||
__FINIT
|
||||
|
||||
.align 5 /* 32 byte rollback region */
|
||||
LEAF(__r4k_wait)
|
||||
.set push
|
||||
.set noreorder
|
||||
/* start of rollback region */
|
||||
LONG_L t0, TI_FLAGS($28)
|
||||
nop
|
||||
andi t0, _TIF_NEED_RESCHED
|
||||
bnez t0, 1f
|
||||
nop
|
||||
nop
|
||||
nop
|
||||
#ifdef CONFIG_CPU_MICROMIPS
|
||||
nop
|
||||
nop
|
||||
nop
|
||||
nop
|
||||
#endif
|
||||
.section .cpuidle.text,"ax"
|
||||
/* Align to 32 bytes for the maximum idle interrupt region size. */
|
||||
.align 5
|
||||
LEAF(r4k_wait)
|
||||
/* Keep the ISA bit clear for calculations on local labels here. */
|
||||
0: .fill 0
|
||||
/* Start of idle interrupt region. */
|
||||
local_irq_enable
|
||||
/*
|
||||
* If an interrupt lands here, before going idle on the next
|
||||
* instruction, we must *NOT* go idle since the interrupt could
|
||||
* have set TIF_NEED_RESCHED or caused a timer to need resched.
|
||||
* Fall through -- see skipover_handler below -- and have the
|
||||
* idle loop take care of things.
|
||||
*/
|
||||
1: .fill 0
|
||||
/* The R2 EI/EHB sequence takes 8 bytes, otherwise pad up. */
|
||||
.if 1b - 0b > 32
|
||||
.error "overlong idle interrupt region"
|
||||
.elseif 1b - 0b > 8
|
||||
.align 4
|
||||
.endif
|
||||
2: .fill 0
|
||||
.equ r4k_wait_idle_size, 2b - 0b
|
||||
/* End of idle interrupt region; size has to be a power of 2. */
|
||||
.set MIPS_ISA_ARCH_LEVEL_RAW
|
||||
r4k_wait_insn:
|
||||
wait
|
||||
/* end of rollback region (the region size must be power of two) */
|
||||
1:
|
||||
r4k_wait_exit:
|
||||
.set mips0
|
||||
local_irq_disable
|
||||
jr ra
|
||||
nop
|
||||
.set pop
|
||||
END(__r4k_wait)
|
||||
END(r4k_wait)
|
||||
.previous
|
||||
|
||||
.macro BUILD_ROLLBACK_PROLOGUE handler
|
||||
FEXPORT(rollback_\handler)
|
||||
.macro BUILD_SKIPOVER_PROLOGUE handler
|
||||
FEXPORT(skipover_\handler)
|
||||
.set push
|
||||
.set noat
|
||||
MFC0 k0, CP0_EPC
|
||||
PTR_LA k1, __r4k_wait
|
||||
ori k0, 0x1f /* 32 byte rollback region */
|
||||
xori k0, 0x1f
|
||||
/* Subtract/add 2 to let the ISA bit propagate through the mask. */
|
||||
PTR_LA k1, r4k_wait_insn - 2
|
||||
ori k0, r4k_wait_idle_size - 2
|
||||
.set noreorder
|
||||
bne k0, k1, \handler
|
||||
PTR_ADDIU k0, r4k_wait_exit - r4k_wait_insn + 2
|
||||
.set reorder
|
||||
MTC0 k0, CP0_EPC
|
||||
.set pop
|
||||
.endm
|
||||
|
||||
.align 5
|
||||
BUILD_ROLLBACK_PROLOGUE handle_int
|
||||
BUILD_SKIPOVER_PROLOGUE handle_int
|
||||
NESTED(handle_int, PT_SIZE, sp)
|
||||
.cfi_signal_frame
|
||||
#ifdef CONFIG_TRACE_IRQFLAGS
|
||||
|
@ -265,7 +276,7 @@ NESTED(except_vec_ejtag_debug, 0, sp)
|
|||
* This prototype is copied to ebase + n*IntCtl.VS and patched
|
||||
* to invoke the handler
|
||||
*/
|
||||
BUILD_ROLLBACK_PROLOGUE except_vec_vi
|
||||
BUILD_SKIPOVER_PROLOGUE except_vec_vi
|
||||
NESTED(except_vec_vi, 0, sp)
|
||||
SAVE_SOME docfi=1
|
||||
SAVE_AT docfi=1
|
||||
|
|
|
@ -35,13 +35,6 @@ static void __cpuidle r3081_wait(void)
|
|||
write_c0_conf(cfg | R30XX_CONF_HALT);
|
||||
}
|
||||
|
||||
void __cpuidle r4k_wait(void)
|
||||
{
|
||||
raw_local_irq_enable();
|
||||
__r4k_wait();
|
||||
raw_local_irq_disable();
|
||||
}
|
||||
|
||||
/*
|
||||
* This variant is preferable as it allows testing need_resched and going to
|
||||
* sleep depending on the outcome atomically. Unfortunately the "It is
|
||||
|
|
|
@ -332,6 +332,8 @@ static void __init cps_prepare_cpus(unsigned int max_cpus)
|
|||
mips_cps_cluster_bootcfg = kcalloc(nclusters,
|
||||
sizeof(*mips_cps_cluster_bootcfg),
|
||||
GFP_KERNEL);
|
||||
if (!mips_cps_cluster_bootcfg)
|
||||
goto err_out;
|
||||
|
||||
if (nclusters > 1)
|
||||
mips_cm_update_property();
|
||||
|
@ -348,6 +350,8 @@ static void __init cps_prepare_cpus(unsigned int max_cpus)
|
|||
mips_cps_cluster_bootcfg[cl].core_power =
|
||||
kcalloc(BITS_TO_LONGS(ncores), sizeof(unsigned long),
|
||||
GFP_KERNEL);
|
||||
if (!mips_cps_cluster_bootcfg[cl].core_power)
|
||||
goto err_out;
|
||||
|
||||
/* Allocate VPE boot configuration structs */
|
||||
for (c = 0; c < ncores; c++) {
|
||||
|
|
|
@ -77,7 +77,7 @@
|
|||
#include "access-helper.h"
|
||||
|
||||
extern void check_wait(void);
|
||||
extern asmlinkage void rollback_handle_int(void);
|
||||
extern asmlinkage void skipover_handle_int(void);
|
||||
extern asmlinkage void handle_int(void);
|
||||
extern asmlinkage void handle_adel(void);
|
||||
extern asmlinkage void handle_ades(void);
|
||||
|
@ -2066,7 +2066,7 @@ void *set_vi_handler(int n, vi_handler_t addr)
|
|||
{
|
||||
extern const u8 except_vec_vi[];
|
||||
extern const u8 except_vec_vi_ori[], except_vec_vi_end[];
|
||||
extern const u8 rollback_except_vec_vi[];
|
||||
extern const u8 skipover_except_vec_vi[];
|
||||
unsigned long handler;
|
||||
unsigned long old_handler = vi_handlers[n];
|
||||
int srssets = current_cpu_data.srsets;
|
||||
|
@ -2095,7 +2095,7 @@ void *set_vi_handler(int n, vi_handler_t addr)
|
|||
change_c0_srsmap(0xf << n*4, 0 << n*4);
|
||||
}
|
||||
|
||||
vec_start = using_rollback_handler() ? rollback_except_vec_vi :
|
||||
vec_start = using_skipover_handler() ? skipover_except_vec_vi :
|
||||
except_vec_vi;
|
||||
#if defined(CONFIG_CPU_MICROMIPS) || defined(CONFIG_CPU_BIG_ENDIAN)
|
||||
ori_offset = except_vec_vi_ori - vec_start + 2;
|
||||
|
@ -2426,8 +2426,8 @@ void __init trap_init(void)
|
|||
if (board_be_init)
|
||||
board_be_init();
|
||||
|
||||
set_except_vector(EXCCODE_INT, using_rollback_handler() ?
|
||||
rollback_handle_int : handle_int);
|
||||
set_except_vector(EXCCODE_INT, using_skipover_handler() ?
|
||||
skipover_handle_int : handle_int);
|
||||
set_except_vector(EXCCODE_MOD, handle_tlbm);
|
||||
set_except_vector(EXCCODE_TLBL, handle_tlbl);
|
||||
set_except_vector(EXCCODE_TLBS, handle_tlbs);
|
||||
|
|
|
@ -97,9 +97,19 @@ handle_fpe(struct pt_regs *regs)
|
|||
|
||||
memcpy(regs->fr, frcopy, sizeof regs->fr);
|
||||
if (signalcode != 0) {
|
||||
force_sig_fault(signalcode >> 24, signalcode & 0xffffff,
|
||||
(void __user *) regs->iaoq[0]);
|
||||
return -1;
|
||||
int sig = signalcode >> 24;
|
||||
|
||||
if (sig == SIGFPE) {
|
||||
/*
|
||||
* Clear floating point trap bit to avoid trapping
|
||||
* again on the first floating-point instruction in
|
||||
* the userspace signal handler.
|
||||
*/
|
||||
regs->fr[0] &= ~(1ULL << 38);
|
||||
}
|
||||
force_sig_fault(sig, signalcode & 0xffffff,
|
||||
(void __user *) regs->iaoq[0]);
|
||||
return -1;
|
||||
}
|
||||
|
||||
return signalcode ? -1 : 0;
|
||||
|
|
|
@ -234,10 +234,8 @@ fi
|
|||
|
||||
# suppress some warnings in recent ld versions
|
||||
nowarn="-z noexecstack"
|
||||
if ! ld_is_lld; then
|
||||
if [ "$LD_VERSION" -ge "$(echo 2.39 | ld_version)" ]; then
|
||||
nowarn="$nowarn --no-warn-rwx-segments"
|
||||
fi
|
||||
if "${CROSS}ld" -v --no-warn-rwx-segments >/dev/null 2>&1; then
|
||||
nowarn="$nowarn --no-warn-rwx-segments"
|
||||
fi
|
||||
|
||||
platformo=$object/"$platform".o
|
||||
|
|
|
@ -258,10 +258,6 @@ static unsigned long get_stubs_size(const Elf64_Ehdr *hdr,
|
|||
break;
|
||||
}
|
||||
}
|
||||
if (i == hdr->e_shnum) {
|
||||
pr_err("%s: doesn't contain __patchable_function_entries.\n", me->name);
|
||||
return -ENOEXEC;
|
||||
}
|
||||
#endif
|
||||
|
||||
pr_debug("Looks like a total of %lu stubs, max\n", relocs);
|
||||
|
|
|
@ -976,7 +976,7 @@ int __meminit radix__vmemmap_create_mapping(unsigned long start,
|
|||
return 0;
|
||||
}
|
||||
|
||||
|
||||
#ifdef CONFIG_ARCH_WANT_OPTIMIZE_DAX_VMEMMAP
|
||||
bool vmemmap_can_optimize(struct vmem_altmap *altmap, struct dev_pagemap *pgmap)
|
||||
{
|
||||
if (radix_enabled())
|
||||
|
@ -984,6 +984,7 @@ bool vmemmap_can_optimize(struct vmem_altmap *altmap, struct dev_pagemap *pgmap)
|
|||
|
||||
return false;
|
||||
}
|
||||
#endif
|
||||
|
||||
int __meminit vmemmap_check_pmd(pmd_t *pmdp, int node,
|
||||
unsigned long addr, unsigned long next)
|
||||
|
@ -1120,6 +1121,19 @@ int __meminit radix__vmemmap_populate(unsigned long start, unsigned long end, in
|
|||
pmd_t *pmd;
|
||||
pte_t *pte;
|
||||
|
||||
/*
|
||||
* Make sure we align the start vmemmap addr so that we calculate
|
||||
* the correct start_pfn in altmap boundary check to decided whether
|
||||
* we should use altmap or RAM based backing memory allocation. Also
|
||||
* the address need to be aligned for set_pte operation.
|
||||
|
||||
* If the start addr is already PMD_SIZE aligned we will try to use
|
||||
* a pmd mapping. We don't want to be too aggressive here beacause
|
||||
* that will cause more allocations in RAM. So only if the namespace
|
||||
* vmemmap start addr is PMD_SIZE aligned we will use PMD mapping.
|
||||
*/
|
||||
|
||||
start = ALIGN_DOWN(start, PAGE_SIZE);
|
||||
for (addr = start; addr < end; addr = next) {
|
||||
next = pmd_addr_end(addr, end);
|
||||
|
||||
|
@ -1145,8 +1159,8 @@ int __meminit radix__vmemmap_populate(unsigned long start, unsigned long end, in
|
|||
* in altmap block allocation failures, in which case
|
||||
* we fallback to RAM for vmemmap allocation.
|
||||
*/
|
||||
if (altmap && (!IS_ALIGNED(addr, PMD_SIZE) ||
|
||||
altmap_cross_boundary(altmap, addr, PMD_SIZE))) {
|
||||
if (!IS_ALIGNED(addr, PMD_SIZE) || (altmap &&
|
||||
altmap_cross_boundary(altmap, addr, PMD_SIZE))) {
|
||||
/*
|
||||
* make sure we don't create altmap mappings
|
||||
* covering things outside the device.
|
||||
|
|
|
@ -17,7 +17,7 @@ config PPC_POWERNV
|
|||
select MMU_NOTIFIER
|
||||
select FORCE_SMP
|
||||
select ARCH_SUPPORTS_PER_VMA_LOCK
|
||||
select PPC_RADIX_BROADCAST_TLBIE
|
||||
select PPC_RADIX_BROADCAST_TLBIE if PPC_RADIX_MMU
|
||||
default y
|
||||
|
||||
config OPAL_PRD
|
||||
|
|
|
@ -23,7 +23,7 @@ config PPC_PSERIES
|
|||
select FORCE_SMP
|
||||
select SWIOTLB
|
||||
select ARCH_SUPPORTS_PER_VMA_LOCK
|
||||
select PPC_RADIX_BROADCAST_TLBIE
|
||||
select PPC_RADIX_BROADCAST_TLBIE if PPC_RADIX_MMU
|
||||
default y
|
||||
|
||||
config PARAVIRT
|
||||
|
|
|
@ -275,6 +275,9 @@ long set_tagged_addr_ctrl(struct task_struct *task, unsigned long arg)
|
|||
unsigned long pmm;
|
||||
u8 pmlen;
|
||||
|
||||
if (!riscv_has_extension_unlikely(RISCV_ISA_EXT_SUPM))
|
||||
return -EINVAL;
|
||||
|
||||
if (is_compat_thread(ti))
|
||||
return -EINVAL;
|
||||
|
||||
|
@ -330,6 +333,9 @@ long get_tagged_addr_ctrl(struct task_struct *task)
|
|||
struct thread_info *ti = task_thread_info(task);
|
||||
long ret = 0;
|
||||
|
||||
if (!riscv_has_extension_unlikely(RISCV_ISA_EXT_SUPM))
|
||||
return -EINVAL;
|
||||
|
||||
if (is_compat_thread(ti))
|
||||
return -EINVAL;
|
||||
|
||||
|
|
|
@ -198,47 +198,57 @@ asmlinkage __visible __trap_section void do_trap_insn_illegal(struct pt_regs *re
|
|||
DO_ERROR_INFO(do_trap_load_fault,
|
||||
SIGSEGV, SEGV_ACCERR, "load access fault");
|
||||
|
||||
asmlinkage __visible __trap_section void do_trap_load_misaligned(struct pt_regs *regs)
|
||||
enum misaligned_access_type {
|
||||
MISALIGNED_STORE,
|
||||
MISALIGNED_LOAD,
|
||||
};
|
||||
static const struct {
|
||||
const char *type_str;
|
||||
int (*handler)(struct pt_regs *regs);
|
||||
} misaligned_handler[] = {
|
||||
[MISALIGNED_STORE] = {
|
||||
.type_str = "Oops - store (or AMO) address misaligned",
|
||||
.handler = handle_misaligned_store,
|
||||
},
|
||||
[MISALIGNED_LOAD] = {
|
||||
.type_str = "Oops - load address misaligned",
|
||||
.handler = handle_misaligned_load,
|
||||
},
|
||||
};
|
||||
|
||||
static void do_trap_misaligned(struct pt_regs *regs, enum misaligned_access_type type)
|
||||
{
|
||||
irqentry_state_t state;
|
||||
|
||||
if (user_mode(regs)) {
|
||||
irqentry_enter_from_user_mode(regs);
|
||||
local_irq_enable();
|
||||
} else {
|
||||
state = irqentry_nmi_enter(regs);
|
||||
}
|
||||
|
||||
if (handle_misaligned_load(regs))
|
||||
do_trap_error(regs, SIGBUS, BUS_ADRALN, regs->epc,
|
||||
"Oops - load address misaligned");
|
||||
if (misaligned_handler[type].handler(regs))
|
||||
do_trap_error(regs, SIGBUS, BUS_ADRALN, regs->epc,
|
||||
misaligned_handler[type].type_str);
|
||||
|
||||
if (user_mode(regs)) {
|
||||
local_irq_disable();
|
||||
irqentry_exit_to_user_mode(regs);
|
||||
} else {
|
||||
irqentry_state_t state = irqentry_nmi_enter(regs);
|
||||
|
||||
if (handle_misaligned_load(regs))
|
||||
do_trap_error(regs, SIGBUS, BUS_ADRALN, regs->epc,
|
||||
"Oops - load address misaligned");
|
||||
|
||||
irqentry_nmi_exit(regs, state);
|
||||
}
|
||||
}
|
||||
|
||||
asmlinkage __visible __trap_section void do_trap_load_misaligned(struct pt_regs *regs)
|
||||
{
|
||||
do_trap_misaligned(regs, MISALIGNED_LOAD);
|
||||
}
|
||||
|
||||
asmlinkage __visible __trap_section void do_trap_store_misaligned(struct pt_regs *regs)
|
||||
{
|
||||
if (user_mode(regs)) {
|
||||
irqentry_enter_from_user_mode(regs);
|
||||
|
||||
if (handle_misaligned_store(regs))
|
||||
do_trap_error(regs, SIGBUS, BUS_ADRALN, regs->epc,
|
||||
"Oops - store (or AMO) address misaligned");
|
||||
|
||||
irqentry_exit_to_user_mode(regs);
|
||||
} else {
|
||||
irqentry_state_t state = irqentry_nmi_enter(regs);
|
||||
|
||||
if (handle_misaligned_store(regs))
|
||||
do_trap_error(regs, SIGBUS, BUS_ADRALN, regs->epc,
|
||||
"Oops - store (or AMO) address misaligned");
|
||||
|
||||
irqentry_nmi_exit(regs, state);
|
||||
}
|
||||
do_trap_misaligned(regs, MISALIGNED_STORE);
|
||||
}
|
||||
|
||||
DO_ERROR_INFO(do_trap_store_fault,
|
||||
SIGSEGV, SEGV_ACCERR, "store (or AMO) access fault");
|
||||
DO_ERROR_INFO(do_trap_ecall_s,
|
||||
|
|
|
@ -88,6 +88,13 @@
|
|||
#define INSN_MATCH_C_FSWSP 0xe002
|
||||
#define INSN_MASK_C_FSWSP 0xe003
|
||||
|
||||
#define INSN_MATCH_C_LHU 0x8400
|
||||
#define INSN_MASK_C_LHU 0xfc43
|
||||
#define INSN_MATCH_C_LH 0x8440
|
||||
#define INSN_MASK_C_LH 0xfc43
|
||||
#define INSN_MATCH_C_SH 0x8c00
|
||||
#define INSN_MASK_C_SH 0xfc43
|
||||
|
||||
#define INSN_LEN(insn) ((((insn) & 0x3) < 0x3) ? 2 : 4)
|
||||
|
||||
#if defined(CONFIG_64BIT)
|
||||
|
@ -268,7 +275,7 @@ static unsigned long get_f32_rs(unsigned long insn, u8 fp_reg_offset,
|
|||
int __ret; \
|
||||
\
|
||||
if (user_mode(regs)) { \
|
||||
__ret = __get_user(insn, (type __user *) insn_addr); \
|
||||
__ret = get_user(insn, (type __user *) insn_addr); \
|
||||
} else { \
|
||||
insn = *(type *)insn_addr; \
|
||||
__ret = 0; \
|
||||
|
@ -431,6 +438,13 @@ static int handle_scalar_misaligned_load(struct pt_regs *regs)
|
|||
fp = 1;
|
||||
len = 4;
|
||||
#endif
|
||||
} else if ((insn & INSN_MASK_C_LHU) == INSN_MATCH_C_LHU) {
|
||||
len = 2;
|
||||
insn = RVC_RS2S(insn) << SH_RD;
|
||||
} else if ((insn & INSN_MASK_C_LH) == INSN_MATCH_C_LH) {
|
||||
len = 2;
|
||||
shift = 8 * (sizeof(ulong) - len);
|
||||
insn = RVC_RS2S(insn) << SH_RD;
|
||||
} else {
|
||||
regs->epc = epc;
|
||||
return -1;
|
||||
|
@ -530,6 +544,9 @@ static int handle_scalar_misaligned_store(struct pt_regs *regs)
|
|||
len = 4;
|
||||
val.data_ulong = GET_F32_RS2C(insn, regs);
|
||||
#endif
|
||||
} else if ((insn & INSN_MASK_C_SH) == INSN_MATCH_C_SH) {
|
||||
len = 2;
|
||||
val.data_ulong = GET_RS2S(insn, regs);
|
||||
} else {
|
||||
regs->epc = epc;
|
||||
return -1;
|
||||
|
|
|
@ -77,6 +77,8 @@ static void kvm_riscv_reset_vcpu(struct kvm_vcpu *vcpu)
|
|||
memcpy(cntx, reset_cntx, sizeof(*cntx));
|
||||
spin_unlock(&vcpu->arch.reset_cntx_lock);
|
||||
|
||||
memset(&vcpu->arch.smstateen_csr, 0, sizeof(vcpu->arch.smstateen_csr));
|
||||
|
||||
kvm_riscv_vcpu_fp_reset(vcpu);
|
||||
|
||||
kvm_riscv_vcpu_vector_reset(vcpu);
|
||||
|
|
|
@ -38,7 +38,6 @@ CONFIG_USER_NS=y
|
|||
CONFIG_CHECKPOINT_RESTORE=y
|
||||
CONFIG_SCHED_AUTOGROUP=y
|
||||
CONFIG_EXPERT=y
|
||||
# CONFIG_SYSFS_SYSCALL is not set
|
||||
CONFIG_PROFILING=y
|
||||
CONFIG_KEXEC=y
|
||||
CONFIG_KEXEC_FILE=y
|
||||
|
@ -92,7 +91,6 @@ CONFIG_UNIXWARE_DISKLABEL=y
|
|||
CONFIG_IOSCHED_BFQ=y
|
||||
CONFIG_BINFMT_MISC=m
|
||||
CONFIG_ZSWAP=y
|
||||
CONFIG_ZSMALLOC=y
|
||||
CONFIG_ZSMALLOC_STAT=y
|
||||
CONFIG_SLAB_BUCKETS=y
|
||||
CONFIG_SLUB_STATS=y
|
||||
|
@ -395,6 +393,9 @@ CONFIG_CLS_U32_MARK=y
|
|||
CONFIG_NET_CLS_FLOW=m
|
||||
CONFIG_NET_CLS_CGROUP=y
|
||||
CONFIG_NET_CLS_BPF=m
|
||||
CONFIG_NET_CLS_FLOWER=m
|
||||
CONFIG_NET_CLS_MATCHALL=m
|
||||
CONFIG_NET_EMATCH=y
|
||||
CONFIG_NET_CLS_ACT=y
|
||||
CONFIG_NET_ACT_POLICE=m
|
||||
CONFIG_NET_ACT_GACT=m
|
||||
|
@ -405,6 +406,9 @@ CONFIG_NET_ACT_PEDIT=m
|
|||
CONFIG_NET_ACT_SIMP=m
|
||||
CONFIG_NET_ACT_SKBEDIT=m
|
||||
CONFIG_NET_ACT_CSUM=m
|
||||
CONFIG_NET_ACT_VLAN=m
|
||||
CONFIG_NET_ACT_TUNNEL_KEY=m
|
||||
CONFIG_NET_ACT_CT=m
|
||||
CONFIG_NET_ACT_GATE=m
|
||||
CONFIG_NET_TC_SKB_EXT=y
|
||||
CONFIG_DNS_RESOLVER=y
|
||||
|
@ -628,8 +632,16 @@ CONFIG_VIRTIO_PCI=m
|
|||
CONFIG_VIRTIO_BALLOON=m
|
||||
CONFIG_VIRTIO_MEM=m
|
||||
CONFIG_VIRTIO_INPUT=y
|
||||
CONFIG_VDPA=m
|
||||
CONFIG_VDPA_SIM=m
|
||||
CONFIG_VDPA_SIM_NET=m
|
||||
CONFIG_VDPA_SIM_BLOCK=m
|
||||
CONFIG_VDPA_USER=m
|
||||
CONFIG_MLX5_VDPA_NET=m
|
||||
CONFIG_VP_VDPA=m
|
||||
CONFIG_VHOST_NET=m
|
||||
CONFIG_VHOST_VSOCK=m
|
||||
CONFIG_VHOST_VDPA=m
|
||||
CONFIG_EXT4_FS=y
|
||||
CONFIG_EXT4_FS_POSIX_ACL=y
|
||||
CONFIG_EXT4_FS_SECURITY=y
|
||||
|
@ -654,7 +666,6 @@ CONFIG_NILFS2_FS=m
|
|||
CONFIG_BCACHEFS_FS=y
|
||||
CONFIG_BCACHEFS_QUOTA=y
|
||||
CONFIG_BCACHEFS_POSIX_ACL=y
|
||||
CONFIG_FS_DAX=y
|
||||
CONFIG_EXPORTFS_BLOCK_OPS=y
|
||||
CONFIG_FS_ENCRYPTION=y
|
||||
CONFIG_FS_VERITY=y
|
||||
|
@ -724,11 +735,10 @@ CONFIG_NLS_UTF8=m
|
|||
CONFIG_DLM=m
|
||||
CONFIG_UNICODE=y
|
||||
CONFIG_PERSISTENT_KEYRINGS=y
|
||||
CONFIG_BIG_KEYS=y
|
||||
CONFIG_ENCRYPTED_KEYS=m
|
||||
CONFIG_KEY_NOTIFICATIONS=y
|
||||
CONFIG_SECURITY=y
|
||||
CONFIG_HARDENED_USERCOPY=y
|
||||
CONFIG_FORTIFY_SOURCE=y
|
||||
CONFIG_SECURITY_SELINUX=y
|
||||
CONFIG_SECURITY_SELINUX_BOOTPARAM=y
|
||||
CONFIG_SECURITY_LOCKDOWN_LSM=y
|
||||
|
@ -741,6 +751,8 @@ CONFIG_IMA=y
|
|||
CONFIG_IMA_DEFAULT_HASH_SHA256=y
|
||||
CONFIG_IMA_WRITE_POLICY=y
|
||||
CONFIG_IMA_APPRAISE=y
|
||||
CONFIG_FORTIFY_SOURCE=y
|
||||
CONFIG_HARDENED_USERCOPY=y
|
||||
CONFIG_BUG_ON_DATA_CORRUPTION=y
|
||||
CONFIG_CRYPTO_USER=m
|
||||
# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set
|
||||
|
@ -756,7 +768,6 @@ CONFIG_CRYPTO_AES_TI=m
|
|||
CONFIG_CRYPTO_ANUBIS=m
|
||||
CONFIG_CRYPTO_ARIA=m
|
||||
CONFIG_CRYPTO_BLOWFISH=m
|
||||
CONFIG_CRYPTO_CAMELLIA=m
|
||||
CONFIG_CRYPTO_CAST5=m
|
||||
CONFIG_CRYPTO_CAST6=m
|
||||
CONFIG_CRYPTO_DES=m
|
||||
|
@ -801,7 +812,6 @@ CONFIG_CRYPTO_SHA3_512_S390=m
|
|||
CONFIG_CRYPTO_GHASH_S390=m
|
||||
CONFIG_CRYPTO_AES_S390=m
|
||||
CONFIG_CRYPTO_DES_S390=m
|
||||
CONFIG_CRYPTO_CHACHA_S390=m
|
||||
CONFIG_CRYPTO_HMAC_S390=m
|
||||
CONFIG_ZCRYPT=m
|
||||
CONFIG_PKEY=m
|
||||
|
@ -812,9 +822,9 @@ CONFIG_PKEY_UV=m
|
|||
CONFIG_CRYPTO_PAES_S390=m
|
||||
CONFIG_CRYPTO_DEV_VIRTIO=m
|
||||
CONFIG_SYSTEM_BLACKLIST_KEYRING=y
|
||||
CONFIG_CRYPTO_KRB5=m
|
||||
CONFIG_CRYPTO_KRB5_SELFTESTS=y
|
||||
CONFIG_CORDIC=m
|
||||
CONFIG_CRYPTO_LIB_CURVE25519=m
|
||||
CONFIG_CRYPTO_LIB_CHACHA20POLY1305=m
|
||||
CONFIG_RANDOM32_SELFTEST=y
|
||||
CONFIG_XZ_DEC_MICROLZMA=y
|
||||
CONFIG_DMA_CMA=y
|
||||
|
|
|
@ -36,7 +36,6 @@ CONFIG_USER_NS=y
|
|||
CONFIG_CHECKPOINT_RESTORE=y
|
||||
CONFIG_SCHED_AUTOGROUP=y
|
||||
CONFIG_EXPERT=y
|
||||
# CONFIG_SYSFS_SYSCALL is not set
|
||||
CONFIG_PROFILING=y
|
||||
CONFIG_KEXEC=y
|
||||
CONFIG_KEXEC_FILE=y
|
||||
|
@ -86,7 +85,6 @@ CONFIG_UNIXWARE_DISKLABEL=y
|
|||
CONFIG_IOSCHED_BFQ=y
|
||||
CONFIG_BINFMT_MISC=m
|
||||
CONFIG_ZSWAP=y
|
||||
CONFIG_ZSMALLOC=y
|
||||
CONFIG_ZSMALLOC_STAT=y
|
||||
CONFIG_SLAB_BUCKETS=y
|
||||
# CONFIG_COMPAT_BRK is not set
|
||||
|
@ -385,6 +383,9 @@ CONFIG_CLS_U32_MARK=y
|
|||
CONFIG_NET_CLS_FLOW=m
|
||||
CONFIG_NET_CLS_CGROUP=y
|
||||
CONFIG_NET_CLS_BPF=m
|
||||
CONFIG_NET_CLS_FLOWER=m
|
||||
CONFIG_NET_CLS_MATCHALL=m
|
||||
CONFIG_NET_EMATCH=y
|
||||
CONFIG_NET_CLS_ACT=y
|
||||
CONFIG_NET_ACT_POLICE=m
|
||||
CONFIG_NET_ACT_GACT=m
|
||||
|
@ -395,6 +396,9 @@ CONFIG_NET_ACT_PEDIT=m
|
|||
CONFIG_NET_ACT_SIMP=m
|
||||
CONFIG_NET_ACT_SKBEDIT=m
|
||||
CONFIG_NET_ACT_CSUM=m
|
||||
CONFIG_NET_ACT_VLAN=m
|
||||
CONFIG_NET_ACT_TUNNEL_KEY=m
|
||||
CONFIG_NET_ACT_CT=m
|
||||
CONFIG_NET_ACT_GATE=m
|
||||
CONFIG_NET_TC_SKB_EXT=y
|
||||
CONFIG_DNS_RESOLVER=y
|
||||
|
@ -618,8 +622,16 @@ CONFIG_VIRTIO_PCI=m
|
|||
CONFIG_VIRTIO_BALLOON=m
|
||||
CONFIG_VIRTIO_MEM=m
|
||||
CONFIG_VIRTIO_INPUT=y
|
||||
CONFIG_VDPA=m
|
||||
CONFIG_VDPA_SIM=m
|
||||
CONFIG_VDPA_SIM_NET=m
|
||||
CONFIG_VDPA_SIM_BLOCK=m
|
||||
CONFIG_VDPA_USER=m
|
||||
CONFIG_MLX5_VDPA_NET=m
|
||||
CONFIG_VP_VDPA=m
|
||||
CONFIG_VHOST_NET=m
|
||||
CONFIG_VHOST_VSOCK=m
|
||||
CONFIG_VHOST_VDPA=m
|
||||
CONFIG_EXT4_FS=y
|
||||
CONFIG_EXT4_FS_POSIX_ACL=y
|
||||
CONFIG_EXT4_FS_SECURITY=y
|
||||
|
@ -641,7 +653,6 @@ CONFIG_NILFS2_FS=m
|
|||
CONFIG_BCACHEFS_FS=m
|
||||
CONFIG_BCACHEFS_QUOTA=y
|
||||
CONFIG_BCACHEFS_POSIX_ACL=y
|
||||
CONFIG_FS_DAX=y
|
||||
CONFIG_EXPORTFS_BLOCK_OPS=y
|
||||
CONFIG_FS_ENCRYPTION=y
|
||||
CONFIG_FS_VERITY=y
|
||||
|
@ -711,6 +722,7 @@ CONFIG_NLS_UTF8=m
|
|||
CONFIG_DLM=m
|
||||
CONFIG_UNICODE=y
|
||||
CONFIG_PERSISTENT_KEYRINGS=y
|
||||
CONFIG_BIG_KEYS=y
|
||||
CONFIG_ENCRYPTED_KEYS=m
|
||||
CONFIG_KEY_NOTIFICATIONS=y
|
||||
CONFIG_SECURITY=y
|
||||
|
@ -742,7 +754,6 @@ CONFIG_CRYPTO_AES_TI=m
|
|||
CONFIG_CRYPTO_ANUBIS=m
|
||||
CONFIG_CRYPTO_ARIA=m
|
||||
CONFIG_CRYPTO_BLOWFISH=m
|
||||
CONFIG_CRYPTO_CAMELLIA=m
|
||||
CONFIG_CRYPTO_CAST5=m
|
||||
CONFIG_CRYPTO_CAST6=m
|
||||
CONFIG_CRYPTO_DES=m
|
||||
|
@ -788,7 +799,6 @@ CONFIG_CRYPTO_SHA3_512_S390=m
|
|||
CONFIG_CRYPTO_GHASH_S390=m
|
||||
CONFIG_CRYPTO_AES_S390=m
|
||||
CONFIG_CRYPTO_DES_S390=m
|
||||
CONFIG_CRYPTO_CHACHA_S390=m
|
||||
CONFIG_CRYPTO_HMAC_S390=m
|
||||
CONFIG_ZCRYPT=m
|
||||
CONFIG_PKEY=m
|
||||
|
@ -799,10 +809,10 @@ CONFIG_PKEY_UV=m
|
|||
CONFIG_CRYPTO_PAES_S390=m
|
||||
CONFIG_CRYPTO_DEV_VIRTIO=m
|
||||
CONFIG_SYSTEM_BLACKLIST_KEYRING=y
|
||||
CONFIG_CRYPTO_KRB5=m
|
||||
CONFIG_CRYPTO_KRB5_SELFTESTS=y
|
||||
CONFIG_CORDIC=m
|
||||
CONFIG_PRIME_NUMBERS=m
|
||||
CONFIG_CRYPTO_LIB_CURVE25519=m
|
||||
CONFIG_CRYPTO_LIB_CHACHA20POLY1305=m
|
||||
CONFIG_XZ_DEC_MICROLZMA=y
|
||||
CONFIG_DMA_CMA=y
|
||||
CONFIG_CMA_SIZE_MBYTES=0
|
||||
|
|
|
@ -70,7 +70,6 @@ CONFIG_DEBUG_KERNEL=y
|
|||
CONFIG_DEBUG_INFO_DWARF4=y
|
||||
CONFIG_DEBUG_FS=y
|
||||
CONFIG_PANIC_ON_OOPS=y
|
||||
# CONFIG_SCHED_DEBUG is not set
|
||||
CONFIG_RCU_CPU_STALL_TIMEOUT=60
|
||||
# CONFIG_RCU_TRACE is not set
|
||||
# CONFIG_FTRACE is not set
|
||||
|
|
|
@ -602,7 +602,8 @@ SYM_CODE_START(stack_invalid)
|
|||
stmg %r0,%r7,__PT_R0(%r11)
|
||||
stmg %r8,%r9,__PT_PSW(%r11)
|
||||
mvc __PT_R8(64,%r11),0(%r14)
|
||||
stg %r10,__PT_ORIG_GPR2(%r11) # store last break to orig_gpr2
|
||||
GET_LC %r2
|
||||
mvc __PT_ORIG_GPR2(8,%r11),__LC_PGM_LAST_BREAK(%r2)
|
||||
xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
|
||||
lgr %r2,%r11 # pass pointer to pt_regs
|
||||
jg kernel_stack_invalid
|
||||
|
|
|
@ -428,6 +428,8 @@ static void __clp_add(struct clp_fh_list_entry *entry, void *data)
|
|||
return;
|
||||
}
|
||||
zdev = zpci_create_device(entry->fid, entry->fh, entry->config_state);
|
||||
if (IS_ERR(zdev))
|
||||
return;
|
||||
list_add_tail(&zdev->entry, scan_list);
|
||||
}
|
||||
|
||||
|
|
|
@ -55,6 +55,7 @@ do { \
|
|||
goto err_label; \
|
||||
} \
|
||||
*((type *)dst) = get_unaligned((type *)(src)); \
|
||||
barrier(); \
|
||||
current->thread.segv_continue = NULL; \
|
||||
} while (0)
|
||||
|
||||
|
@ -66,6 +67,7 @@ do { \
|
|||
if (__faulted) \
|
||||
goto err_label; \
|
||||
put_unaligned(*((type *)src), (type *)(dst)); \
|
||||
barrier(); \
|
||||
current->thread.segv_continue = NULL; \
|
||||
} while (0)
|
||||
|
||||
|
|
|
@ -225,20 +225,20 @@ unsigned long segv(struct faultinfo fi, unsigned long ip, int is_user,
|
|||
panic("Failed to sync kernel TLBs: %d", err);
|
||||
goto out;
|
||||
}
|
||||
else if (current->mm == NULL) {
|
||||
if (current->pagefault_disabled) {
|
||||
if (!mc) {
|
||||
show_regs(container_of(regs, struct pt_regs, regs));
|
||||
panic("Segfault with pagefaults disabled but no mcontext");
|
||||
}
|
||||
if (!current->thread.segv_continue) {
|
||||
show_regs(container_of(regs, struct pt_regs, regs));
|
||||
panic("Segfault without recovery target");
|
||||
}
|
||||
mc_set_rip(mc, current->thread.segv_continue);
|
||||
current->thread.segv_continue = NULL;
|
||||
goto out;
|
||||
else if (current->pagefault_disabled) {
|
||||
if (!mc) {
|
||||
show_regs(container_of(regs, struct pt_regs, regs));
|
||||
panic("Segfault with pagefaults disabled but no mcontext");
|
||||
}
|
||||
if (!current->thread.segv_continue) {
|
||||
show_regs(container_of(regs, struct pt_regs, regs));
|
||||
panic("Segfault without recovery target");
|
||||
}
|
||||
mc_set_rip(mc, current->thread.segv_continue);
|
||||
current->thread.segv_continue = NULL;
|
||||
goto out;
|
||||
}
|
||||
else if (current->mm == NULL) {
|
||||
show_regs(container_of(regs, struct pt_regs, regs));
|
||||
panic("Segfault with no mm");
|
||||
}
|
||||
|
|
|
@ -2368,6 +2368,7 @@ config STRICT_SIGALTSTACK_SIZE
|
|||
config CFI_AUTO_DEFAULT
|
||||
bool "Attempt to use FineIBT by default at boot time"
|
||||
depends on FINEIBT
|
||||
depends on !RUST || RUSTC_VERSION >= 108800
|
||||
default y
|
||||
help
|
||||
Attempt to use FineIBT by default at boot time. If enabled,
|
||||
|
|
|
@ -34,14 +34,11 @@ static bool early_is_tdx_guest(void)
|
|||
|
||||
void arch_accept_memory(phys_addr_t start, phys_addr_t end)
|
||||
{
|
||||
static bool sevsnp;
|
||||
|
||||
/* Platform-specific memory-acceptance call goes here */
|
||||
if (early_is_tdx_guest()) {
|
||||
if (!tdx_accept_memory(start, end))
|
||||
panic("TDX: Failed to accept memory\n");
|
||||
} else if (sevsnp || (sev_get_status() & MSR_AMD64_SEV_SNP_ENABLED)) {
|
||||
sevsnp = true;
|
||||
} else if (early_is_sevsnp_guest()) {
|
||||
snp_accept_memory(start, end);
|
||||
} else {
|
||||
error("Cannot accept memory: unknown platform\n");
|
||||
|
|
|
@ -645,3 +645,43 @@ void sev_prep_identity_maps(unsigned long top_level_pgt)
|
|||
|
||||
sev_verify_cbit(top_level_pgt);
|
||||
}
|
||||
|
||||
bool early_is_sevsnp_guest(void)
|
||||
{
|
||||
static bool sevsnp;
|
||||
|
||||
if (sevsnp)
|
||||
return true;
|
||||
|
||||
if (!(sev_get_status() & MSR_AMD64_SEV_SNP_ENABLED))
|
||||
return false;
|
||||
|
||||
sevsnp = true;
|
||||
|
||||
if (!snp_vmpl) {
|
||||
unsigned int eax, ebx, ecx, edx;
|
||||
|
||||
/*
|
||||
* CPUID Fn8000_001F_EAX[28] - SVSM support
|
||||
*/
|
||||
eax = 0x8000001f;
|
||||
ecx = 0;
|
||||
native_cpuid(&eax, &ebx, &ecx, &edx);
|
||||
if (eax & BIT(28)) {
|
||||
struct msr m;
|
||||
|
||||
/* Obtain the address of the calling area to use */
|
||||
boot_rdmsr(MSR_SVSM_CAA, &m);
|
||||
boot_svsm_caa = (void *)m.q;
|
||||
boot_svsm_caa_pa = m.q;
|
||||
|
||||
/*
|
||||
* The real VMPL level cannot be discovered, but the
|
||||
* memory acceptance routines make no use of that so
|
||||
* any non-zero value suffices here.
|
||||
*/
|
||||
snp_vmpl = U8_MAX;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
|
|
@ -13,12 +13,14 @@
|
|||
bool sev_snp_enabled(void);
|
||||
void snp_accept_memory(phys_addr_t start, phys_addr_t end);
|
||||
u64 sev_get_status(void);
|
||||
bool early_is_sevsnp_guest(void);
|
||||
|
||||
#else
|
||||
|
||||
static inline bool sev_snp_enabled(void) { return false; }
|
||||
static inline void snp_accept_memory(phys_addr_t start, phys_addr_t end) { }
|
||||
static inline u64 sev_get_status(void) { return 0; }
|
||||
static inline bool early_is_sevsnp_guest(void) { return false; }
|
||||
|
||||
#endif
|
||||
|
||||
|
|
|
@ -754,7 +754,7 @@ void x86_pmu_enable_all(int added)
|
|||
}
|
||||
}
|
||||
|
||||
static inline int is_x86_event(struct perf_event *event)
|
||||
int is_x86_event(struct perf_event *event)
|
||||
{
|
||||
int i;
|
||||
|
||||
|
|
|
@ -4395,7 +4395,7 @@ static struct perf_guest_switch_msr *intel_guest_get_msrs(int *nr, void *data)
|
|||
arr[pebs_enable] = (struct perf_guest_switch_msr){
|
||||
.msr = MSR_IA32_PEBS_ENABLE,
|
||||
.host = cpuc->pebs_enabled & ~cpuc->intel_ctrl_guest_mask,
|
||||
.guest = pebs_mask & ~cpuc->intel_ctrl_host_mask,
|
||||
.guest = pebs_mask & ~cpuc->intel_ctrl_host_mask & kvm_pmu->pebs_enable,
|
||||
};
|
||||
|
||||
if (arr[pebs_enable].host) {
|
||||
|
|
|
@ -2379,8 +2379,25 @@ __intel_pmu_pebs_last_event(struct perf_event *event,
|
|||
*/
|
||||
intel_pmu_save_and_restart_reload(event, count);
|
||||
}
|
||||
} else
|
||||
intel_pmu_save_and_restart(event);
|
||||
} else {
|
||||
/*
|
||||
* For a non-precise event, it's possible the
|
||||
* counters-snapshotting records a positive value for the
|
||||
* overflowed event. Then the HW auto-reload mechanism
|
||||
* reset the counter to 0 immediately, because the
|
||||
* pebs_event_reset is cleared if the PERF_X86_EVENT_AUTO_RELOAD
|
||||
* is not set. The counter backwards may be observed in a
|
||||
* PMI handler.
|
||||
*
|
||||
* Since the event value has been updated when processing the
|
||||
* counters-snapshotting record, only needs to set the new
|
||||
* period for the counter.
|
||||
*/
|
||||
if (is_pebs_counter_event_group(event))
|
||||
static_call(x86_pmu_set_period)(event);
|
||||
else
|
||||
intel_pmu_save_and_restart(event);
|
||||
}
|
||||
}
|
||||
|
||||
static __always_inline void
|
||||
|
|
|
@ -110,14 +110,21 @@ static inline bool is_topdown_event(struct perf_event *event)
|
|||
return is_metric_event(event) || is_slots_event(event);
|
||||
}
|
||||
|
||||
int is_x86_event(struct perf_event *event);
|
||||
|
||||
static inline bool check_leader_group(struct perf_event *leader, int flags)
|
||||
{
|
||||
return is_x86_event(leader) ? !!(leader->hw.flags & flags) : false;
|
||||
}
|
||||
|
||||
static inline bool is_branch_counters_group(struct perf_event *event)
|
||||
{
|
||||
return event->group_leader->hw.flags & PERF_X86_EVENT_BRANCH_COUNTERS;
|
||||
return check_leader_group(event->group_leader, PERF_X86_EVENT_BRANCH_COUNTERS);
|
||||
}
|
||||
|
||||
static inline bool is_pebs_counter_event_group(struct perf_event *event)
|
||||
{
|
||||
return event->group_leader->hw.flags & PERF_X86_EVENT_PEBS_CNTR;
|
||||
return check_leader_group(event->group_leader, PERF_X86_EVENT_PEBS_CNTR);
|
||||
}
|
||||
|
||||
struct amd_nb {
|
||||
|
|
|
@ -17,10 +17,12 @@ struct ucode_cpu_info {
|
|||
void load_ucode_bsp(void);
|
||||
void load_ucode_ap(void);
|
||||
void microcode_bsp_resume(void);
|
||||
bool __init microcode_loader_disabled(void);
|
||||
#else
|
||||
static inline void load_ucode_bsp(void) { }
|
||||
static inline void load_ucode_ap(void) { }
|
||||
static inline void microcode_bsp_resume(void) { }
|
||||
static inline bool __init microcode_loader_disabled(void) { return false; }
|
||||
#endif
|
||||
|
||||
extern unsigned long initrd_start_early;
|
||||
|
|
|
@ -1098,15 +1098,17 @@ static enum ucode_state load_microcode_amd(u8 family, const u8 *data, size_t siz
|
|||
|
||||
static int __init save_microcode_in_initrd(void)
|
||||
{
|
||||
unsigned int cpuid_1_eax = native_cpuid_eax(1);
|
||||
struct cpuinfo_x86 *c = &boot_cpu_data;
|
||||
struct cont_desc desc = { 0 };
|
||||
unsigned int cpuid_1_eax;
|
||||
enum ucode_state ret;
|
||||
struct cpio_data cp;
|
||||
|
||||
if (dis_ucode_ldr || c->x86_vendor != X86_VENDOR_AMD || c->x86 < 0x10)
|
||||
if (microcode_loader_disabled() || c->x86_vendor != X86_VENDOR_AMD || c->x86 < 0x10)
|
||||
return 0;
|
||||
|
||||
cpuid_1_eax = native_cpuid_eax(1);
|
||||
|
||||
if (!find_blobs_in_containers(&cp))
|
||||
return -EINVAL;
|
||||
|
||||
|
|
|
@ -41,8 +41,8 @@
|
|||
|
||||
#include "internal.h"
|
||||
|
||||
static struct microcode_ops *microcode_ops;
|
||||
bool dis_ucode_ldr = true;
|
||||
static struct microcode_ops *microcode_ops;
|
||||
static bool dis_ucode_ldr = false;
|
||||
|
||||
bool force_minrev = IS_ENABLED(CONFIG_MICROCODE_LATE_FORCE_MINREV);
|
||||
module_param(force_minrev, bool, S_IRUSR | S_IWUSR);
|
||||
|
@ -84,6 +84,9 @@ static bool amd_check_current_patch_level(void)
|
|||
u32 lvl, dummy, i;
|
||||
u32 *levels;
|
||||
|
||||
if (x86_cpuid_vendor() != X86_VENDOR_AMD)
|
||||
return false;
|
||||
|
||||
native_rdmsr(MSR_AMD64_PATCH_LEVEL, lvl, dummy);
|
||||
|
||||
levels = final_levels;
|
||||
|
@ -95,27 +98,29 @@ static bool amd_check_current_patch_level(void)
|
|||
return false;
|
||||
}
|
||||
|
||||
static bool __init check_loader_disabled_bsp(void)
|
||||
bool __init microcode_loader_disabled(void)
|
||||
{
|
||||
static const char *__dis_opt_str = "dis_ucode_ldr";
|
||||
const char *cmdline = boot_command_line;
|
||||
const char *option = __dis_opt_str;
|
||||
|
||||
/*
|
||||
* CPUID(1).ECX[31]: reserved for hypervisor use. This is still not
|
||||
* completely accurate as xen pv guests don't see that CPUID bit set but
|
||||
* that's good enough as they don't land on the BSP path anyway.
|
||||
*/
|
||||
if (native_cpuid_ecx(1) & BIT(31))
|
||||
if (dis_ucode_ldr)
|
||||
return true;
|
||||
|
||||
if (x86_cpuid_vendor() == X86_VENDOR_AMD) {
|
||||
if (amd_check_current_patch_level())
|
||||
return true;
|
||||
}
|
||||
|
||||
if (cmdline_find_option_bool(cmdline, option) <= 0)
|
||||
dis_ucode_ldr = false;
|
||||
/*
|
||||
* Disable when:
|
||||
*
|
||||
* 1) The CPU does not support CPUID.
|
||||
*
|
||||
* 2) Bit 31 in CPUID[1]:ECX is clear
|
||||
* The bit is reserved for hypervisor use. This is still not
|
||||
* completely accurate as XEN PV guests don't see that CPUID bit
|
||||
* set, but that's good enough as they don't land on the BSP
|
||||
* path anyway.
|
||||
*
|
||||
* 3) Certain AMD patch levels are not allowed to be
|
||||
* overwritten.
|
||||
*/
|
||||
if (!have_cpuid_p() ||
|
||||
native_cpuid_ecx(1) & BIT(31) ||
|
||||
amd_check_current_patch_level())
|
||||
dis_ucode_ldr = true;
|
||||
|
||||
return dis_ucode_ldr;
|
||||
}
|
||||
|
@ -125,7 +130,10 @@ void __init load_ucode_bsp(void)
|
|||
unsigned int cpuid_1_eax;
|
||||
bool intel = true;
|
||||
|
||||
if (!have_cpuid_p())
|
||||
if (cmdline_find_option_bool(boot_command_line, "dis_ucode_ldr") > 0)
|
||||
dis_ucode_ldr = true;
|
||||
|
||||
if (microcode_loader_disabled())
|
||||
return;
|
||||
|
||||
cpuid_1_eax = native_cpuid_eax(1);
|
||||
|
@ -146,9 +154,6 @@ void __init load_ucode_bsp(void)
|
|||
return;
|
||||
}
|
||||
|
||||
if (check_loader_disabled_bsp())
|
||||
return;
|
||||
|
||||
if (intel)
|
||||
load_ucode_intel_bsp(&early_data);
|
||||
else
|
||||
|
@ -159,6 +164,11 @@ void load_ucode_ap(void)
|
|||
{
|
||||
unsigned int cpuid_1_eax;
|
||||
|
||||
/*
|
||||
* Can't use microcode_loader_disabled() here - .init section
|
||||
* hell. It doesn't have to either - the BSP variant must've
|
||||
* parsed cmdline already anyway.
|
||||
*/
|
||||
if (dis_ucode_ldr)
|
||||
return;
|
||||
|
||||
|
@ -810,7 +820,7 @@ static int __init microcode_init(void)
|
|||
struct cpuinfo_x86 *c = &boot_cpu_data;
|
||||
int error;
|
||||
|
||||
if (dis_ucode_ldr)
|
||||
if (microcode_loader_disabled())
|
||||
return -EINVAL;
|
||||
|
||||
if (c->x86_vendor == X86_VENDOR_INTEL)
|
||||
|
|
|
@ -389,7 +389,7 @@ static int __init save_builtin_microcode(void)
|
|||
if (xchg(&ucode_patch_va, NULL) != UCODE_BSP_LOADED)
|
||||
return 0;
|
||||
|
||||
if (dis_ucode_ldr || boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
|
||||
if (microcode_loader_disabled() || boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
|
||||
return 0;
|
||||
|
||||
uci.mc = get_microcode_blob(&uci, true);
|
||||
|
|
|
@ -94,7 +94,6 @@ static inline unsigned int x86_cpuid_family(void)
|
|||
return x86_family(eax);
|
||||
}
|
||||
|
||||
extern bool dis_ucode_ldr;
|
||||
extern bool force_minrev;
|
||||
|
||||
#ifdef CONFIG_CPU_SUP_AMD
|
||||
|
|
|
@ -145,10 +145,6 @@ void __init __no_stack_protector mk_early_pgtbl_32(void)
|
|||
*ptr = (unsigned long)ptep + PAGE_OFFSET;
|
||||
|
||||
#ifdef CONFIG_MICROCODE_INITRD32
|
||||
/* Running on a hypervisor? */
|
||||
if (native_cpuid_ecx(1) & BIT(31))
|
||||
return;
|
||||
|
||||
params = (struct boot_params *)__pa_nodebug(&boot_params);
|
||||
if (!params->hdr.ramdisk_size || !params->hdr.ramdisk_image)
|
||||
return;
|
||||
|
|
|
@ -466,10 +466,18 @@ SECTIONS
|
|||
}
|
||||
|
||||
/*
|
||||
* The ASSERT() sink to . is intentional, for binutils 2.14 compatibility:
|
||||
* COMPILE_TEST kernels can be large - CONFIG_KASAN, for example, can cause
|
||||
* this. Let's assume that nobody will be running a COMPILE_TEST kernel and
|
||||
* let's assert that fuller build coverage is more valuable than being able to
|
||||
* run a COMPILE_TEST kernel.
|
||||
*/
|
||||
#ifndef CONFIG_COMPILE_TEST
|
||||
/*
|
||||
* The ASSERT() sync to . is intentional, for binutils 2.14 compatibility:
|
||||
*/
|
||||
. = ASSERT((_end - LOAD_OFFSET <= KERNEL_IMAGE_SIZE),
|
||||
"kernel image bigger than KERNEL_IMAGE_SIZE");
|
||||
#endif
|
||||
|
||||
/* needed for Clang - see arch/x86/entry/entry.S */
|
||||
PROVIDE(__ref_stack_chk_guard = __stack_chk_guard);
|
||||
|
|
|
@ -104,6 +104,9 @@ void kvm_mmu_track_write(struct kvm_vcpu *vcpu, gpa_t gpa, const u8 *new,
|
|||
|
||||
static inline int kvm_mmu_reload(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
if (kvm_check_request(KVM_REQ_MMU_FREE_OBSOLETE_ROOTS, vcpu))
|
||||
kvm_mmu_free_obsolete_roots(vcpu);
|
||||
|
||||
/*
|
||||
* Checking root.hpa is sufficient even when KVM has mirror root.
|
||||
* We can have either:
|
||||
|
|
|
@ -5974,6 +5974,7 @@ void kvm_mmu_free_obsolete_roots(struct kvm_vcpu *vcpu)
|
|||
__kvm_mmu_free_obsolete_roots(vcpu->kvm, &vcpu->arch.root_mmu);
|
||||
__kvm_mmu_free_obsolete_roots(vcpu->kvm, &vcpu->arch.guest_mmu);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_mmu_free_obsolete_roots);
|
||||
|
||||
static u64 mmu_pte_write_fetch_gpte(struct kvm_vcpu *vcpu, gpa_t *gpa,
|
||||
int *bytes)
|
||||
|
@ -7669,32 +7670,6 @@ void kvm_mmu_pre_destroy_vm(struct kvm *kvm)
|
|||
}
|
||||
|
||||
#ifdef CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES
|
||||
bool kvm_arch_pre_set_memory_attributes(struct kvm *kvm,
|
||||
struct kvm_gfn_range *range)
|
||||
{
|
||||
/*
|
||||
* Zap SPTEs even if the slot can't be mapped PRIVATE. KVM x86 only
|
||||
* supports KVM_MEMORY_ATTRIBUTE_PRIVATE, and so it *seems* like KVM
|
||||
* can simply ignore such slots. But if userspace is making memory
|
||||
* PRIVATE, then KVM must prevent the guest from accessing the memory
|
||||
* as shared. And if userspace is making memory SHARED and this point
|
||||
* is reached, then at least one page within the range was previously
|
||||
* PRIVATE, i.e. the slot's possible hugepage ranges are changing.
|
||||
* Zapping SPTEs in this case ensures KVM will reassess whether or not
|
||||
* a hugepage can be used for affected ranges.
|
||||
*/
|
||||
if (WARN_ON_ONCE(!kvm_arch_has_private_mem(kvm)))
|
||||
return false;
|
||||
|
||||
/* Unmap the old attribute page. */
|
||||
if (range->arg.attributes & KVM_MEMORY_ATTRIBUTE_PRIVATE)
|
||||
range->attr_filter = KVM_FILTER_SHARED;
|
||||
else
|
||||
range->attr_filter = KVM_FILTER_PRIVATE;
|
||||
|
||||
return kvm_unmap_gfn_range(kvm, range);
|
||||
}
|
||||
|
||||
static bool hugepage_test_mixed(struct kvm_memory_slot *slot, gfn_t gfn,
|
||||
int level)
|
||||
{
|
||||
|
@ -7713,6 +7688,69 @@ static void hugepage_set_mixed(struct kvm_memory_slot *slot, gfn_t gfn,
|
|||
lpage_info_slot(gfn, slot, level)->disallow_lpage |= KVM_LPAGE_MIXED_FLAG;
|
||||
}
|
||||
|
||||
bool kvm_arch_pre_set_memory_attributes(struct kvm *kvm,
|
||||
struct kvm_gfn_range *range)
|
||||
{
|
||||
struct kvm_memory_slot *slot = range->slot;
|
||||
int level;
|
||||
|
||||
/*
|
||||
* Zap SPTEs even if the slot can't be mapped PRIVATE. KVM x86 only
|
||||
* supports KVM_MEMORY_ATTRIBUTE_PRIVATE, and so it *seems* like KVM
|
||||
* can simply ignore such slots. But if userspace is making memory
|
||||
* PRIVATE, then KVM must prevent the guest from accessing the memory
|
||||
* as shared. And if userspace is making memory SHARED and this point
|
||||
* is reached, then at least one page within the range was previously
|
||||
* PRIVATE, i.e. the slot's possible hugepage ranges are changing.
|
||||
* Zapping SPTEs in this case ensures KVM will reassess whether or not
|
||||
* a hugepage can be used for affected ranges.
|
||||
*/
|
||||
if (WARN_ON_ONCE(!kvm_arch_has_private_mem(kvm)))
|
||||
return false;
|
||||
|
||||
if (WARN_ON_ONCE(range->end <= range->start))
|
||||
return false;
|
||||
|
||||
/*
|
||||
* If the head and tail pages of the range currently allow a hugepage,
|
||||
* i.e. reside fully in the slot and don't have mixed attributes, then
|
||||
* add each corresponding hugepage range to the ongoing invalidation,
|
||||
* e.g. to prevent KVM from creating a hugepage in response to a fault
|
||||
* for a gfn whose attributes aren't changing. Note, only the range
|
||||
* of gfns whose attributes are being modified needs to be explicitly
|
||||
* unmapped, as that will unmap any existing hugepages.
|
||||
*/
|
||||
for (level = PG_LEVEL_2M; level <= KVM_MAX_HUGEPAGE_LEVEL; level++) {
|
||||
gfn_t start = gfn_round_for_level(range->start, level);
|
||||
gfn_t end = gfn_round_for_level(range->end - 1, level);
|
||||
gfn_t nr_pages = KVM_PAGES_PER_HPAGE(level);
|
||||
|
||||
if ((start != range->start || start + nr_pages > range->end) &&
|
||||
start >= slot->base_gfn &&
|
||||
start + nr_pages <= slot->base_gfn + slot->npages &&
|
||||
!hugepage_test_mixed(slot, start, level))
|
||||
kvm_mmu_invalidate_range_add(kvm, start, start + nr_pages);
|
||||
|
||||
if (end == start)
|
||||
continue;
|
||||
|
||||
if ((end + nr_pages) > range->end &&
|
||||
(end + nr_pages) <= (slot->base_gfn + slot->npages) &&
|
||||
!hugepage_test_mixed(slot, end, level))
|
||||
kvm_mmu_invalidate_range_add(kvm, end, end + nr_pages);
|
||||
}
|
||||
|
||||
/* Unmap the old attribute page. */
|
||||
if (range->arg.attributes & KVM_MEMORY_ATTRIBUTE_PRIVATE)
|
||||
range->attr_filter = KVM_FILTER_SHARED;
|
||||
else
|
||||
range->attr_filter = KVM_FILTER_PRIVATE;
|
||||
|
||||
return kvm_unmap_gfn_range(kvm, range);
|
||||
}
|
||||
|
||||
|
||||
|
||||
static bool hugepage_has_attrs(struct kvm *kvm, struct kvm_memory_slot *slot,
|
||||
gfn_t gfn, int level, unsigned long attrs)
|
||||
{
|
||||
|
|
|
@ -131,6 +131,7 @@ void kvm_smm_changed(struct kvm_vcpu *vcpu, bool entering_smm)
|
|||
|
||||
kvm_mmu_reset_context(vcpu);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_smm_changed);
|
||||
|
||||
void process_smi(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
|
|
|
@ -3173,9 +3173,14 @@ skip_vmsa_free:
|
|||
kvfree(svm->sev_es.ghcb_sa);
|
||||
}
|
||||
|
||||
static u64 kvm_ghcb_get_sw_exit_code(struct vmcb_control_area *control)
|
||||
{
|
||||
return (((u64)control->exit_code_hi) << 32) | control->exit_code;
|
||||
}
|
||||
|
||||
static void dump_ghcb(struct vcpu_svm *svm)
|
||||
{
|
||||
struct ghcb *ghcb = svm->sev_es.ghcb;
|
||||
struct vmcb_control_area *control = &svm->vmcb->control;
|
||||
unsigned int nbits;
|
||||
|
||||
/* Re-use the dump_invalid_vmcb module parameter */
|
||||
|
@ -3184,18 +3189,24 @@ static void dump_ghcb(struct vcpu_svm *svm)
|
|||
return;
|
||||
}
|
||||
|
||||
nbits = sizeof(ghcb->save.valid_bitmap) * 8;
|
||||
nbits = sizeof(svm->sev_es.valid_bitmap) * 8;
|
||||
|
||||
pr_err("GHCB (GPA=%016llx):\n", svm->vmcb->control.ghcb_gpa);
|
||||
/*
|
||||
* Print KVM's snapshot of the GHCB values that were (unsuccessfully)
|
||||
* used to handle the exit. If the guest has since modified the GHCB
|
||||
* itself, dumping the raw GHCB won't help debug why KVM was unable to
|
||||
* handle the VMGEXIT that KVM observed.
|
||||
*/
|
||||
pr_err("GHCB (GPA=%016llx) snapshot:\n", svm->vmcb->control.ghcb_gpa);
|
||||
pr_err("%-20s%016llx is_valid: %u\n", "sw_exit_code",
|
||||
ghcb->save.sw_exit_code, ghcb_sw_exit_code_is_valid(ghcb));
|
||||
kvm_ghcb_get_sw_exit_code(control), kvm_ghcb_sw_exit_code_is_valid(svm));
|
||||
pr_err("%-20s%016llx is_valid: %u\n", "sw_exit_info_1",
|
||||
ghcb->save.sw_exit_info_1, ghcb_sw_exit_info_1_is_valid(ghcb));
|
||||
control->exit_info_1, kvm_ghcb_sw_exit_info_1_is_valid(svm));
|
||||
pr_err("%-20s%016llx is_valid: %u\n", "sw_exit_info_2",
|
||||
ghcb->save.sw_exit_info_2, ghcb_sw_exit_info_2_is_valid(ghcb));
|
||||
control->exit_info_2, kvm_ghcb_sw_exit_info_2_is_valid(svm));
|
||||
pr_err("%-20s%016llx is_valid: %u\n", "sw_scratch",
|
||||
ghcb->save.sw_scratch, ghcb_sw_scratch_is_valid(ghcb));
|
||||
pr_err("%-20s%*pb\n", "valid_bitmap", nbits, ghcb->save.valid_bitmap);
|
||||
svm->sev_es.sw_scratch, kvm_ghcb_sw_scratch_is_valid(svm));
|
||||
pr_err("%-20s%*pb\n", "valid_bitmap", nbits, svm->sev_es.valid_bitmap);
|
||||
}
|
||||
|
||||
static void sev_es_sync_to_ghcb(struct vcpu_svm *svm)
|
||||
|
@ -3266,11 +3277,6 @@ static void sev_es_sync_from_ghcb(struct vcpu_svm *svm)
|
|||
memset(ghcb->save.valid_bitmap, 0, sizeof(ghcb->save.valid_bitmap));
|
||||
}
|
||||
|
||||
static u64 kvm_ghcb_get_sw_exit_code(struct vmcb_control_area *control)
|
||||
{
|
||||
return (((u64)control->exit_code_hi) << 32) | control->exit_code;
|
||||
}
|
||||
|
||||
static int sev_es_validate_vmgexit(struct vcpu_svm *svm)
|
||||
{
|
||||
struct vmcb_control_area *control = &svm->vmcb->control;
|
||||
|
|
|
@ -607,9 +607,6 @@ static void svm_disable_virtualization_cpu(void)
|
|||
kvm_cpu_svm_disable();
|
||||
|
||||
amd_pmu_disable_virt();
|
||||
|
||||
if (cpu_feature_enabled(X86_FEATURE_SRSO_BP_SPEC_REDUCE))
|
||||
msr_clear_bit(MSR_ZEN4_BP_CFG, MSR_ZEN4_BP_CFG_BP_SPEC_REDUCE_BIT);
|
||||
}
|
||||
|
||||
static int svm_enable_virtualization_cpu(void)
|
||||
|
@ -687,9 +684,6 @@ static int svm_enable_virtualization_cpu(void)
|
|||
rdmsr(MSR_TSC_AUX, sev_es_host_save_area(sd)->tsc_aux, msr_hi);
|
||||
}
|
||||
|
||||
if (cpu_feature_enabled(X86_FEATURE_SRSO_BP_SPEC_REDUCE))
|
||||
msr_set_bit(MSR_ZEN4_BP_CFG, MSR_ZEN4_BP_CFG_BP_SPEC_REDUCE_BIT);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1518,6 +1512,63 @@ static void svm_vcpu_free(struct kvm_vcpu *vcpu)
|
|||
__free_pages(virt_to_page(svm->msrpm), get_order(MSRPM_SIZE));
|
||||
}
|
||||
|
||||
#ifdef CONFIG_CPU_MITIGATIONS
|
||||
static DEFINE_SPINLOCK(srso_lock);
|
||||
static atomic_t srso_nr_vms;
|
||||
|
||||
static void svm_srso_clear_bp_spec_reduce(void *ign)
|
||||
{
|
||||
struct svm_cpu_data *sd = this_cpu_ptr(&svm_data);
|
||||
|
||||
if (!sd->bp_spec_reduce_set)
|
||||
return;
|
||||
|
||||
msr_clear_bit(MSR_ZEN4_BP_CFG, MSR_ZEN4_BP_CFG_BP_SPEC_REDUCE_BIT);
|
||||
sd->bp_spec_reduce_set = false;
|
||||
}
|
||||
|
||||
static void svm_srso_vm_destroy(void)
|
||||
{
|
||||
if (!cpu_feature_enabled(X86_FEATURE_SRSO_BP_SPEC_REDUCE))
|
||||
return;
|
||||
|
||||
if (atomic_dec_return(&srso_nr_vms))
|
||||
return;
|
||||
|
||||
guard(spinlock)(&srso_lock);
|
||||
|
||||
/*
|
||||
* Verify a new VM didn't come along, acquire the lock, and increment
|
||||
* the count before this task acquired the lock.
|
||||
*/
|
||||
if (atomic_read(&srso_nr_vms))
|
||||
return;
|
||||
|
||||
on_each_cpu(svm_srso_clear_bp_spec_reduce, NULL, 1);
|
||||
}
|
||||
|
||||
static void svm_srso_vm_init(void)
|
||||
{
|
||||
if (!cpu_feature_enabled(X86_FEATURE_SRSO_BP_SPEC_REDUCE))
|
||||
return;
|
||||
|
||||
/*
|
||||
* Acquire the lock on 0 => 1 transitions to ensure a potential 1 => 0
|
||||
* transition, i.e. destroying the last VM, is fully complete, e.g. so
|
||||
* that a delayed IPI doesn't clear BP_SPEC_REDUCE after a vCPU runs.
|
||||
*/
|
||||
if (atomic_inc_not_zero(&srso_nr_vms))
|
||||
return;
|
||||
|
||||
guard(spinlock)(&srso_lock);
|
||||
|
||||
atomic_inc(&srso_nr_vms);
|
||||
}
|
||||
#else
|
||||
static void svm_srso_vm_init(void) { }
|
||||
static void svm_srso_vm_destroy(void) { }
|
||||
#endif
|
||||
|
||||
static void svm_prepare_switch_to_guest(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct vcpu_svm *svm = to_svm(vcpu);
|
||||
|
@ -1550,6 +1601,11 @@ static void svm_prepare_switch_to_guest(struct kvm_vcpu *vcpu)
|
|||
(!boot_cpu_has(X86_FEATURE_V_TSC_AUX) || !sev_es_guest(vcpu->kvm)))
|
||||
kvm_set_user_return_msr(tsc_aux_uret_slot, svm->tsc_aux, -1ull);
|
||||
|
||||
if (cpu_feature_enabled(X86_FEATURE_SRSO_BP_SPEC_REDUCE) &&
|
||||
!sd->bp_spec_reduce_set) {
|
||||
sd->bp_spec_reduce_set = true;
|
||||
msr_set_bit(MSR_ZEN4_BP_CFG, MSR_ZEN4_BP_CFG_BP_SPEC_REDUCE_BIT);
|
||||
}
|
||||
svm->guest_state_loaded = true;
|
||||
}
|
||||
|
||||
|
@ -2231,6 +2287,10 @@ static int shutdown_interception(struct kvm_vcpu *vcpu)
|
|||
*/
|
||||
if (!sev_es_guest(vcpu->kvm)) {
|
||||
clear_page(svm->vmcb);
|
||||
#ifdef CONFIG_KVM_SMM
|
||||
if (is_smm(vcpu))
|
||||
kvm_smm_changed(vcpu, false);
|
||||
#endif
|
||||
kvm_vcpu_reset(vcpu, true);
|
||||
}
|
||||
|
||||
|
@ -5036,6 +5096,8 @@ static void svm_vm_destroy(struct kvm *kvm)
|
|||
{
|
||||
avic_vm_destroy(kvm);
|
||||
sev_vm_destroy(kvm);
|
||||
|
||||
svm_srso_vm_destroy();
|
||||
}
|
||||
|
||||
static int svm_vm_init(struct kvm *kvm)
|
||||
|
@ -5061,6 +5123,7 @@ static int svm_vm_init(struct kvm *kvm)
|
|||
return ret;
|
||||
}
|
||||
|
||||
svm_srso_vm_init();
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -335,6 +335,8 @@ struct svm_cpu_data {
|
|||
u32 next_asid;
|
||||
u32 min_asid;
|
||||
|
||||
bool bp_spec_reduce_set;
|
||||
|
||||
struct vmcb *save_area;
|
||||
unsigned long save_area_pa;
|
||||
|
||||
|
|
|
@ -4597,7 +4597,7 @@ static bool kvm_is_vm_type_supported(unsigned long type)
|
|||
return type < 32 && (kvm_caps.supported_vm_types & BIT(type));
|
||||
}
|
||||
|
||||
static inline u32 kvm_sync_valid_fields(struct kvm *kvm)
|
||||
static inline u64 kvm_sync_valid_fields(struct kvm *kvm)
|
||||
{
|
||||
return kvm && kvm->arch.has_protected_state ? 0 : KVM_SYNC_X86_VALID_FIELDS;
|
||||
}
|
||||
|
@ -11493,7 +11493,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
|
|||
{
|
||||
struct kvm_queued_exception *ex = &vcpu->arch.exception;
|
||||
struct kvm_run *kvm_run = vcpu->run;
|
||||
u32 sync_valid_fields;
|
||||
u64 sync_valid_fields;
|
||||
int r;
|
||||
|
||||
r = kvm_mmu_post_init_vm(vcpu->kvm);
|
||||
|
|
|
@ -899,8 +899,9 @@ void switch_mm_irqs_off(struct mm_struct *unused, struct mm_struct *next,
|
|||
cond_mitigation(tsk);
|
||||
|
||||
/*
|
||||
* Let nmi_uaccess_okay() and finish_asid_transition()
|
||||
* know that CR3 is changing.
|
||||
* Indicate that CR3 is about to change. nmi_uaccess_okay()
|
||||
* and others are sensitive to the window where mm_cpumask(),
|
||||
* CR3 and cpu_tlbstate.loaded_mm are not all in sync.
|
||||
*/
|
||||
this_cpu_write(cpu_tlbstate.loaded_mm, LOADED_MM_SWITCHING);
|
||||
barrier();
|
||||
|
@ -1204,8 +1205,16 @@ done:
|
|||
|
||||
static bool should_flush_tlb(int cpu, void *data)
|
||||
{
|
||||
struct mm_struct *loaded_mm = per_cpu(cpu_tlbstate.loaded_mm, cpu);
|
||||
struct flush_tlb_info *info = data;
|
||||
|
||||
/*
|
||||
* Order the 'loaded_mm' and 'is_lazy' against their
|
||||
* write ordering in switch_mm_irqs_off(). Ensure
|
||||
* 'is_lazy' is at least as new as 'loaded_mm'.
|
||||
*/
|
||||
smp_rmb();
|
||||
|
||||
/* Lazy TLB will get flushed at the next context switch. */
|
||||
if (per_cpu(cpu_tlbstate_shared.is_lazy, cpu))
|
||||
return false;
|
||||
|
@ -1214,8 +1223,15 @@ static bool should_flush_tlb(int cpu, void *data)
|
|||
if (!info->mm)
|
||||
return true;
|
||||
|
||||
/*
|
||||
* While switching, the remote CPU could have state from
|
||||
* either the prev or next mm. Assume the worst and flush.
|
||||
*/
|
||||
if (loaded_mm == LOADED_MM_SWITCHING)
|
||||
return true;
|
||||
|
||||
/* The target mm is loaded, and the CPU is not lazy. */
|
||||
if (per_cpu(cpu_tlbstate.loaded_mm, cpu) == info->mm)
|
||||
if (loaded_mm == info->mm)
|
||||
return true;
|
||||
|
||||
/* In cpumask, but not the loaded mm? Periodically remove by flushing. */
|
||||
|
|
|
@ -31,8 +31,8 @@ struct faultinfo {
|
|||
|
||||
#define ___backtrack_faulted(_faulted) \
|
||||
asm volatile ( \
|
||||
"mov $0, %0\n" \
|
||||
"movl $__get_kernel_nofault_faulted_%=,%1\n" \
|
||||
"mov $0, %0\n" \
|
||||
"jmp _end_%=\n" \
|
||||
"__get_kernel_nofault_faulted_%=:\n" \
|
||||
"mov $1, %0;" \
|
||||
|
|
|
@ -31,8 +31,8 @@ struct faultinfo {
|
|||
|
||||
#define ___backtrack_faulted(_faulted) \
|
||||
asm volatile ( \
|
||||
"mov $0, %0\n" \
|
||||
"movq $__get_kernel_nofault_faulted_%=,%1\n" \
|
||||
"mov $0, %0\n" \
|
||||
"jmp _end_%=\n" \
|
||||
"__get_kernel_nofault_faulted_%=:\n" \
|
||||
"mov $1, %0;" \
|
||||
|
|
|
@ -480,7 +480,8 @@ static inline void blk_zone_update_request_bio(struct request *rq,
|
|||
* the original BIO sector so that blk_zone_write_plug_bio_endio() can
|
||||
* lookup the zone write plug.
|
||||
*/
|
||||
if (req_op(rq) == REQ_OP_ZONE_APPEND || bio_zone_write_plugging(bio))
|
||||
if (req_op(rq) == REQ_OP_ZONE_APPEND ||
|
||||
bio_flagged(bio, BIO_EMULATES_ZONE_APPEND))
|
||||
bio->bi_iter.bi_sector = rq->__sector;
|
||||
}
|
||||
void blk_zone_write_plug_bio_endio(struct bio *bio);
|
||||
|
|
|
@ -46,12 +46,8 @@ int ioprio_check_cap(int ioprio)
|
|||
*/
|
||||
if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_NICE))
|
||||
return -EPERM;
|
||||
fallthrough;
|
||||
/* rt has prio field too */
|
||||
case IOPRIO_CLASS_BE:
|
||||
if (level >= IOPRIO_NR_LEVELS)
|
||||
return -EINVAL;
|
||||
break;
|
||||
case IOPRIO_CLASS_BE:
|
||||
case IOPRIO_CLASS_IDLE:
|
||||
break;
|
||||
case IOPRIO_CLASS_NONE:
|
||||
|
|
|
@ -163,11 +163,10 @@ static int crypto_scomp_init_tfm(struct crypto_tfm *tfm)
|
|||
if (ret)
|
||||
goto unlock;
|
||||
}
|
||||
if (!scomp_scratch_users) {
|
||||
if (!scomp_scratch_users++) {
|
||||
ret = crypto_scomp_alloc_scratches();
|
||||
if (ret)
|
||||
goto unlock;
|
||||
scomp_scratch_users++;
|
||||
scomp_scratch_users--;
|
||||
}
|
||||
unlock:
|
||||
mutex_unlock(&scomp_lock);
|
||||
|
|
|
@ -544,7 +544,7 @@ static void ivpu_fw_boot_params_print(struct ivpu_device *vdev, struct vpu_boot_
|
|||
boot_params->d0i3_entry_vpu_ts);
|
||||
ivpu_dbg(vdev, FW_BOOT, "boot_params.system_time_us = %llu\n",
|
||||
boot_params->system_time_us);
|
||||
ivpu_dbg(vdev, FW_BOOT, "boot_params.power_profile = %u\n",
|
||||
ivpu_dbg(vdev, FW_BOOT, "boot_params.power_profile = 0x%x\n",
|
||||
boot_params->power_profile);
|
||||
}
|
||||
|
||||
|
@ -646,7 +646,7 @@ void ivpu_fw_boot_params_setup(struct ivpu_device *vdev, struct vpu_boot_params
|
|||
boot_params->d0i3_residency_time_us = 0;
|
||||
boot_params->d0i3_entry_vpu_ts = 0;
|
||||
if (IVPU_WA(disable_d0i2))
|
||||
boot_params->power_profile = 1;
|
||||
boot_params->power_profile |= BIT(1);
|
||||
|
||||
boot_params->system_time_us = ktime_to_us(ktime_get_real());
|
||||
wmb(); /* Flush WC buffers after writing bootparams */
|
||||
|
|
|
@ -119,7 +119,7 @@ static void timeouts_init(struct ivpu_device *vdev)
|
|||
else
|
||||
vdev->timeout.autosuspend = 100;
|
||||
vdev->timeout.d0i3_entry_msg = 5;
|
||||
vdev->timeout.state_dump_msg = 10;
|
||||
vdev->timeout.state_dump_msg = 100;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -14,7 +14,7 @@
|
|||
#define PLL_PROFILING_FREQ_DEFAULT 38400000
|
||||
#define PLL_PROFILING_FREQ_HIGH 400000000
|
||||
|
||||
#define DCT_DEFAULT_ACTIVE_PERCENT 15u
|
||||
#define DCT_DEFAULT_ACTIVE_PERCENT 30u
|
||||
#define DCT_PERIOD_US 35300u
|
||||
|
||||
int ivpu_hw_btrs_info_init(struct ivpu_device *vdev);
|
||||
|
|
|
@ -681,8 +681,8 @@ static int ivpu_job_submit(struct ivpu_job *job, u8 priority, u32 cmdq_id)
|
|||
err_erase_xa:
|
||||
xa_erase(&vdev->submitted_jobs_xa, job->job_id);
|
||||
err_unlock:
|
||||
mutex_unlock(&vdev->submitted_jobs_lock);
|
||||
mutex_unlock(&file_priv->lock);
|
||||
mutex_unlock(&vdev->submitted_jobs_lock);
|
||||
ivpu_rpm_put(vdev);
|
||||
return ret;
|
||||
}
|
||||
|
@ -874,15 +874,21 @@ int ivpu_cmdq_submit_ioctl(struct drm_device *dev, void *data, struct drm_file *
|
|||
int ivpu_cmdq_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
|
||||
{
|
||||
struct ivpu_file_priv *file_priv = file->driver_priv;
|
||||
struct ivpu_device *vdev = file_priv->vdev;
|
||||
struct drm_ivpu_cmdq_create *args = data;
|
||||
struct ivpu_cmdq *cmdq;
|
||||
int ret;
|
||||
|
||||
if (!ivpu_is_capable(file_priv->vdev, DRM_IVPU_CAP_MANAGE_CMDQ))
|
||||
if (!ivpu_is_capable(vdev, DRM_IVPU_CAP_MANAGE_CMDQ))
|
||||
return -ENODEV;
|
||||
|
||||
if (args->priority > DRM_IVPU_JOB_PRIORITY_REALTIME)
|
||||
return -EINVAL;
|
||||
|
||||
ret = ivpu_rpm_get(vdev);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
mutex_lock(&file_priv->lock);
|
||||
|
||||
cmdq = ivpu_cmdq_create(file_priv, ivpu_job_to_jsm_priority(args->priority), false);
|
||||
|
@ -891,6 +897,8 @@ int ivpu_cmdq_create_ioctl(struct drm_device *dev, void *data, struct drm_file *
|
|||
|
||||
mutex_unlock(&file_priv->lock);
|
||||
|
||||
ivpu_rpm_put(vdev);
|
||||
|
||||
return cmdq ? 0 : -ENOMEM;
|
||||
}
|
||||
|
||||
|
@ -900,28 +908,35 @@ int ivpu_cmdq_destroy_ioctl(struct drm_device *dev, void *data, struct drm_file
|
|||
struct ivpu_device *vdev = file_priv->vdev;
|
||||
struct drm_ivpu_cmdq_destroy *args = data;
|
||||
struct ivpu_cmdq *cmdq;
|
||||
u32 cmdq_id;
|
||||
u32 cmdq_id = 0;
|
||||
int ret;
|
||||
|
||||
if (!ivpu_is_capable(vdev, DRM_IVPU_CAP_MANAGE_CMDQ))
|
||||
return -ENODEV;
|
||||
|
||||
ret = ivpu_rpm_get(vdev);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
mutex_lock(&file_priv->lock);
|
||||
|
||||
cmdq = xa_load(&file_priv->cmdq_xa, args->cmdq_id);
|
||||
if (!cmdq || cmdq->is_legacy) {
|
||||
ret = -ENOENT;
|
||||
goto err_unlock;
|
||||
} else {
|
||||
cmdq_id = cmdq->id;
|
||||
ivpu_cmdq_destroy(file_priv, cmdq);
|
||||
ret = 0;
|
||||
}
|
||||
|
||||
cmdq_id = cmdq->id;
|
||||
ivpu_cmdq_destroy(file_priv, cmdq);
|
||||
mutex_unlock(&file_priv->lock);
|
||||
ivpu_cmdq_abort_all_jobs(vdev, file_priv->ctx.id, cmdq_id);
|
||||
return 0;
|
||||
|
||||
err_unlock:
|
||||
mutex_unlock(&file_priv->lock);
|
||||
/* Abort any pending jobs only if cmdq was destroyed */
|
||||
if (!ret)
|
||||
ivpu_cmdq_abort_all_jobs(vdev, file_priv->ctx.id, cmdq_id);
|
||||
|
||||
ivpu_rpm_put(vdev);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
@ -428,16 +428,17 @@ int ivpu_pm_dct_enable(struct ivpu_device *vdev, u8 active_percent)
|
|||
active_us = (DCT_PERIOD_US * active_percent) / 100;
|
||||
inactive_us = DCT_PERIOD_US - active_us;
|
||||
|
||||
vdev->pm->dct_active_percent = active_percent;
|
||||
|
||||
ivpu_dbg(vdev, PM, "DCT requested %u%% (D0: %uus, D0i2: %uus)\n",
|
||||
active_percent, active_us, inactive_us);
|
||||
|
||||
ret = ivpu_jsm_dct_enable(vdev, active_us, inactive_us);
|
||||
if (ret) {
|
||||
ivpu_err_ratelimited(vdev, "Failed to enable DCT: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
vdev->pm->dct_active_percent = active_percent;
|
||||
|
||||
ivpu_dbg(vdev, PM, "DCT set to %u%% (D0: %uus, D0i2: %uus)\n",
|
||||
active_percent, active_us, inactive_us);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -445,15 +446,16 @@ int ivpu_pm_dct_disable(struct ivpu_device *vdev)
|
|||
{
|
||||
int ret;
|
||||
|
||||
vdev->pm->dct_active_percent = 0;
|
||||
|
||||
ivpu_dbg(vdev, PM, "DCT requested to be disabled\n");
|
||||
|
||||
ret = ivpu_jsm_dct_disable(vdev);
|
||||
if (ret) {
|
||||
ivpu_err_ratelimited(vdev, "Failed to disable DCT: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
vdev->pm->dct_active_percent = 0;
|
||||
|
||||
ivpu_dbg(vdev, PM, "DCT disabled\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -466,7 +468,7 @@ void ivpu_pm_irq_dct_work_fn(struct work_struct *work)
|
|||
if (ivpu_hw_btrs_dct_get_request(vdev, &enable))
|
||||
return;
|
||||
|
||||
if (vdev->pm->dct_active_percent)
|
||||
if (enable)
|
||||
ret = ivpu_pm_dct_enable(vdev, DCT_DEFAULT_ACTIVE_PERCENT);
|
||||
else
|
||||
ret = ivpu_pm_dct_disable(vdev);
|
||||
|
|
|
@ -42,16 +42,13 @@ int module_add_driver(struct module *mod, const struct device_driver *drv)
|
|||
if (mod)
|
||||
mk = &mod->mkobj;
|
||||
else if (drv->mod_name) {
|
||||
struct kobject *mkobj;
|
||||
|
||||
/* Lookup built-in module entry in /sys/modules */
|
||||
mkobj = kset_find_obj(module_kset, drv->mod_name);
|
||||
if (mkobj) {
|
||||
mk = container_of(mkobj, struct module_kobject, kobj);
|
||||
/* Lookup or create built-in module entry in /sys/modules */
|
||||
mk = lookup_or_create_module_kobject(drv->mod_name);
|
||||
if (mk) {
|
||||
/* remember our module structure */
|
||||
drv->p->mkobj = mk;
|
||||
/* kset_find_obj took a reference */
|
||||
kobject_put(mkobj);
|
||||
/* lookup_or_create_module_kobject took a reference */
|
||||
kobject_put(&mk->kobj);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -1440,7 +1440,7 @@ static void platform_shutdown(struct device *_dev)
|
|||
|
||||
static int platform_dma_configure(struct device *dev)
|
||||
{
|
||||
struct platform_driver *drv = to_platform_driver(dev->driver);
|
||||
struct device_driver *drv = READ_ONCE(dev->driver);
|
||||
struct fwnode_handle *fwnode = dev_fwnode(dev);
|
||||
enum dev_dma_attr attr;
|
||||
int ret = 0;
|
||||
|
@ -1451,8 +1451,8 @@ static int platform_dma_configure(struct device *dev)
|
|||
attr = acpi_get_dma_attr(to_acpi_device_node(fwnode));
|
||||
ret = acpi_dma_configure(dev, attr);
|
||||
}
|
||||
/* @drv may not be valid when we're called from the IOMMU layer */
|
||||
if (ret || !dev->driver || drv->driver_managed_dma)
|
||||
/* @dev->driver may not be valid when we're called from the IOMMU layer */
|
||||
if (ret || !drv || to_platform_driver(drv)->driver_managed_dma)
|
||||
return ret;
|
||||
|
||||
ret = iommu_device_use_default_domain(dev);
|
||||
|
|
|
@ -505,6 +505,17 @@ static void loop_assign_backing_file(struct loop_device *lo, struct file *file)
|
|||
lo->lo_min_dio_size = loop_query_min_dio_size(lo);
|
||||
}
|
||||
|
||||
static int loop_check_backing_file(struct file *file)
|
||||
{
|
||||
if (!file->f_op->read_iter)
|
||||
return -EINVAL;
|
||||
|
||||
if ((file->f_mode & FMODE_WRITE) && !file->f_op->write_iter)
|
||||
return -EINVAL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* loop_change_fd switched the backing store of a loopback device to
|
||||
* a new file. This is useful for operating system installers to free up
|
||||
|
@ -526,6 +537,10 @@ static int loop_change_fd(struct loop_device *lo, struct block_device *bdev,
|
|||
if (!file)
|
||||
return -EBADF;
|
||||
|
||||
error = loop_check_backing_file(file);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
/* suppress uevents while reconfiguring the device */
|
||||
dev_set_uevent_suppress(disk_to_dev(lo->lo_disk), 1);
|
||||
|
||||
|
@ -963,6 +978,14 @@ static int loop_configure(struct loop_device *lo, blk_mode_t mode,
|
|||
|
||||
if (!file)
|
||||
return -EBADF;
|
||||
|
||||
if ((mode & BLK_OPEN_WRITE) && !file->f_op->write_iter)
|
||||
return -EINVAL;
|
||||
|
||||
error = loop_check_backing_file(file);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
is_loop = is_loop_device(file);
|
||||
|
||||
/* This is safe, since we have a reference from open(). */
|
||||
|
|
|
@ -201,15 +201,10 @@ struct ublk_params_header {
|
|||
static void ublk_stop_dev_unlocked(struct ublk_device *ub);
|
||||
static void ublk_abort_queue(struct ublk_device *ub, struct ublk_queue *ubq);
|
||||
static inline struct request *__ublk_check_and_get_req(struct ublk_device *ub,
|
||||
struct ublk_queue *ubq, int tag, size_t offset);
|
||||
const struct ublk_queue *ubq, int tag, size_t offset);
|
||||
static inline unsigned int ublk_req_build_flags(struct request *req);
|
||||
static inline struct ublksrv_io_desc *ublk_get_iod(struct ublk_queue *ubq,
|
||||
int tag);
|
||||
static inline bool ublk_dev_is_user_copy(const struct ublk_device *ub)
|
||||
{
|
||||
return ub->dev_info.flags & (UBLK_F_USER_COPY | UBLK_F_SUPPORT_ZERO_COPY);
|
||||
}
|
||||
|
||||
static inline bool ublk_dev_is_zoned(const struct ublk_device *ub)
|
||||
{
|
||||
return ub->dev_info.flags & UBLK_F_ZONED;
|
||||
|
@ -609,14 +604,19 @@ static void ublk_apply_params(struct ublk_device *ub)
|
|||
ublk_dev_param_zoned_apply(ub);
|
||||
}
|
||||
|
||||
static inline bool ublk_support_zero_copy(const struct ublk_queue *ubq)
|
||||
{
|
||||
return ubq->flags & UBLK_F_SUPPORT_ZERO_COPY;
|
||||
}
|
||||
|
||||
static inline bool ublk_support_user_copy(const struct ublk_queue *ubq)
|
||||
{
|
||||
return ubq->flags & (UBLK_F_USER_COPY | UBLK_F_SUPPORT_ZERO_COPY);
|
||||
return ubq->flags & UBLK_F_USER_COPY;
|
||||
}
|
||||
|
||||
static inline bool ublk_need_map_io(const struct ublk_queue *ubq)
|
||||
{
|
||||
return !ublk_support_user_copy(ubq);
|
||||
return !ublk_support_user_copy(ubq) && !ublk_support_zero_copy(ubq);
|
||||
}
|
||||
|
||||
static inline bool ublk_need_req_ref(const struct ublk_queue *ubq)
|
||||
|
@ -624,8 +624,11 @@ static inline bool ublk_need_req_ref(const struct ublk_queue *ubq)
|
|||
/*
|
||||
* read()/write() is involved in user copy, so request reference
|
||||
* has to be grabbed
|
||||
*
|
||||
* for zero copy, request buffer need to be registered to io_uring
|
||||
* buffer table, so reference is needed
|
||||
*/
|
||||
return ublk_support_user_copy(ubq);
|
||||
return ublk_support_user_copy(ubq) || ublk_support_zero_copy(ubq);
|
||||
}
|
||||
|
||||
static inline void ublk_init_req_ref(const struct ublk_queue *ubq,
|
||||
|
@ -1946,13 +1949,20 @@ static void ublk_io_release(void *priv)
|
|||
}
|
||||
|
||||
static int ublk_register_io_buf(struct io_uring_cmd *cmd,
|
||||
struct ublk_queue *ubq, unsigned int tag,
|
||||
const struct ublk_queue *ubq, unsigned int tag,
|
||||
unsigned int index, unsigned int issue_flags)
|
||||
{
|
||||
struct ublk_device *ub = cmd->file->private_data;
|
||||
const struct ublk_io *io = &ubq->ios[tag];
|
||||
struct request *req;
|
||||
int ret;
|
||||
|
||||
if (!ublk_support_zero_copy(ubq))
|
||||
return -EINVAL;
|
||||
|
||||
if (!(io->flags & UBLK_IO_FLAG_OWNED_BY_SRV))
|
||||
return -EINVAL;
|
||||
|
||||
req = __ublk_check_and_get_req(ub, ubq, tag, 0);
|
||||
if (!req)
|
||||
return -EINVAL;
|
||||
|
@ -1968,8 +1978,17 @@ static int ublk_register_io_buf(struct io_uring_cmd *cmd,
|
|||
}
|
||||
|
||||
static int ublk_unregister_io_buf(struct io_uring_cmd *cmd,
|
||||
const struct ublk_queue *ubq, unsigned int tag,
|
||||
unsigned int index, unsigned int issue_flags)
|
||||
{
|
||||
const struct ublk_io *io = &ubq->ios[tag];
|
||||
|
||||
if (!ublk_support_zero_copy(ubq))
|
||||
return -EINVAL;
|
||||
|
||||
if (!(io->flags & UBLK_IO_FLAG_OWNED_BY_SRV))
|
||||
return -EINVAL;
|
||||
|
||||
return io_buffer_unregister_bvec(cmd, index, issue_flags);
|
||||
}
|
||||
|
||||
|
@ -2073,7 +2092,7 @@ static int __ublk_ch_uring_cmd(struct io_uring_cmd *cmd,
|
|||
case UBLK_IO_REGISTER_IO_BUF:
|
||||
return ublk_register_io_buf(cmd, ubq, tag, ub_cmd->addr, issue_flags);
|
||||
case UBLK_IO_UNREGISTER_IO_BUF:
|
||||
return ublk_unregister_io_buf(cmd, ub_cmd->addr, issue_flags);
|
||||
return ublk_unregister_io_buf(cmd, ubq, tag, ub_cmd->addr, issue_flags);
|
||||
case UBLK_IO_FETCH_REQ:
|
||||
ret = ublk_fetch(cmd, ubq, io, ub_cmd->addr);
|
||||
if (ret)
|
||||
|
@ -2125,13 +2144,10 @@ static int __ublk_ch_uring_cmd(struct io_uring_cmd *cmd,
|
|||
}
|
||||
|
||||
static inline struct request *__ublk_check_and_get_req(struct ublk_device *ub,
|
||||
struct ublk_queue *ubq, int tag, size_t offset)
|
||||
const struct ublk_queue *ubq, int tag, size_t offset)
|
||||
{
|
||||
struct request *req;
|
||||
|
||||
if (!ublk_need_req_ref(ubq))
|
||||
return NULL;
|
||||
|
||||
req = blk_mq_tag_to_rq(ub->tag_set.tags[ubq->q_id], tag);
|
||||
if (!req)
|
||||
return NULL;
|
||||
|
@ -2245,6 +2261,9 @@ static struct request *ublk_check_and_get_req(struct kiocb *iocb,
|
|||
if (!ubq)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
if (!ublk_support_user_copy(ubq))
|
||||
return ERR_PTR(-EACCES);
|
||||
|
||||
if (tag >= ubq->q_depth)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
|
@ -2783,13 +2802,18 @@ static int ublk_ctrl_add_dev(const struct ublksrv_ctrl_cmd *header)
|
|||
ub->dev_info.flags |= UBLK_F_CMD_IOCTL_ENCODE |
|
||||
UBLK_F_URING_CMD_COMP_IN_TASK;
|
||||
|
||||
/* GET_DATA isn't needed any more with USER_COPY */
|
||||
if (ublk_dev_is_user_copy(ub))
|
||||
/* GET_DATA isn't needed any more with USER_COPY or ZERO COPY */
|
||||
if (ub->dev_info.flags & (UBLK_F_USER_COPY | UBLK_F_SUPPORT_ZERO_COPY))
|
||||
ub->dev_info.flags &= ~UBLK_F_NEED_GET_DATA;
|
||||
|
||||
/* Zoned storage support requires user copy feature */
|
||||
/*
|
||||
* Zoned storage support requires reuse `ublksrv_io_cmd->addr` for
|
||||
* returning write_append_lba, which is only allowed in case of
|
||||
* user copy or zero copy
|
||||
*/
|
||||
if (ublk_dev_is_zoned(ub) &&
|
||||
(!IS_ENABLED(CONFIG_BLK_DEV_ZONED) || !ublk_dev_is_user_copy(ub))) {
|
||||
(!IS_ENABLED(CONFIG_BLK_DEV_ZONED) || !(ub->dev_info.flags &
|
||||
(UBLK_F_USER_COPY | UBLK_F_SUPPORT_ZERO_COPY)))) {
|
||||
ret = -EINVAL;
|
||||
goto out_free_dev_number;
|
||||
}
|
||||
|
|
|
@ -957,8 +957,10 @@ static int btintel_pcie_recv_event(struct hci_dev *hdev, struct sk_buff *skb)
|
|||
/* This is a debug event that comes from IML and OP image when it
|
||||
* starts execution. There is no need pass this event to stack.
|
||||
*/
|
||||
if (skb->data[2] == 0x97)
|
||||
if (skb->data[2] == 0x97) {
|
||||
hci_recv_diag(hdev, skb);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
return hci_recv_frame(hdev, skb);
|
||||
|
@ -974,7 +976,6 @@ static int btintel_pcie_recv_frame(struct btintel_pcie_data *data,
|
|||
u8 pkt_type;
|
||||
u16 plen;
|
||||
u32 pcie_pkt_type;
|
||||
struct sk_buff *new_skb;
|
||||
void *pdata;
|
||||
struct hci_dev *hdev = data->hdev;
|
||||
|
||||
|
@ -1051,24 +1052,20 @@ static int btintel_pcie_recv_frame(struct btintel_pcie_data *data,
|
|||
|
||||
bt_dev_dbg(hdev, "pkt_type: 0x%2.2x len: %u", pkt_type, plen);
|
||||
|
||||
new_skb = bt_skb_alloc(plen, GFP_ATOMIC);
|
||||
if (!new_skb) {
|
||||
bt_dev_err(hdev, "Failed to allocate memory for skb of len: %u",
|
||||
skb->len);
|
||||
ret = -ENOMEM;
|
||||
goto exit_error;
|
||||
}
|
||||
|
||||
hci_skb_pkt_type(new_skb) = pkt_type;
|
||||
skb_put_data(new_skb, skb->data, plen);
|
||||
hci_skb_pkt_type(skb) = pkt_type;
|
||||
hdev->stat.byte_rx += plen;
|
||||
skb_trim(skb, plen);
|
||||
|
||||
if (pcie_pkt_type == BTINTEL_PCIE_HCI_EVT_PKT)
|
||||
ret = btintel_pcie_recv_event(hdev, new_skb);
|
||||
ret = btintel_pcie_recv_event(hdev, skb);
|
||||
else
|
||||
ret = hci_recv_frame(hdev, new_skb);
|
||||
ret = hci_recv_frame(hdev, skb);
|
||||
skb = NULL; /* skb is freed in the callee */
|
||||
|
||||
exit_error:
|
||||
if (skb)
|
||||
kfree_skb(skb);
|
||||
|
||||
if (ret)
|
||||
hdev->stat.err_rx++;
|
||||
|
||||
|
@ -1202,8 +1199,6 @@ static void btintel_pcie_rx_work(struct work_struct *work)
|
|||
struct btintel_pcie_data *data = container_of(work,
|
||||
struct btintel_pcie_data, rx_work);
|
||||
struct sk_buff *skb;
|
||||
int err;
|
||||
struct hci_dev *hdev = data->hdev;
|
||||
|
||||
if (test_bit(BTINTEL_PCIE_HWEXP_INPROGRESS, &data->flags)) {
|
||||
/* Unlike usb products, controller will not send hardware
|
||||
|
@ -1224,11 +1219,7 @@ static void btintel_pcie_rx_work(struct work_struct *work)
|
|||
|
||||
/* Process the sk_buf in queue and send to the HCI layer */
|
||||
while ((skb = skb_dequeue(&data->rx_skb_q))) {
|
||||
err = btintel_pcie_recv_frame(data, skb);
|
||||
if (err)
|
||||
bt_dev_err(hdev, "Failed to send received frame: %d",
|
||||
err);
|
||||
kfree_skb(skb);
|
||||
btintel_pcie_recv_frame(data, skb);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1281,10 +1272,8 @@ static void btintel_pcie_msix_rx_handle(struct btintel_pcie_data *data)
|
|||
bt_dev_dbg(hdev, "RXQ: cr_hia: %u cr_tia: %u", cr_hia, cr_tia);
|
||||
|
||||
/* Check CR_TIA and CR_HIA for change */
|
||||
if (cr_tia == cr_hia) {
|
||||
bt_dev_warn(hdev, "RXQ: no new CD found");
|
||||
if (cr_tia == cr_hia)
|
||||
return;
|
||||
}
|
||||
|
||||
rxq = &data->rxq;
|
||||
|
||||
|
@ -1320,6 +1309,16 @@ static irqreturn_t btintel_pcie_msix_isr(int irq, void *data)
|
|||
return IRQ_WAKE_THREAD;
|
||||
}
|
||||
|
||||
static inline bool btintel_pcie_is_rxq_empty(struct btintel_pcie_data *data)
|
||||
{
|
||||
return data->ia.cr_hia[BTINTEL_PCIE_RXQ_NUM] == data->ia.cr_tia[BTINTEL_PCIE_RXQ_NUM];
|
||||
}
|
||||
|
||||
static inline bool btintel_pcie_is_txackq_empty(struct btintel_pcie_data *data)
|
||||
{
|
||||
return data->ia.cr_tia[BTINTEL_PCIE_TXQ_NUM] == data->ia.cr_hia[BTINTEL_PCIE_TXQ_NUM];
|
||||
}
|
||||
|
||||
static irqreturn_t btintel_pcie_irq_msix_handler(int irq, void *dev_id)
|
||||
{
|
||||
struct msix_entry *entry = dev_id;
|
||||
|
@ -1351,12 +1350,18 @@ static irqreturn_t btintel_pcie_irq_msix_handler(int irq, void *dev_id)
|
|||
btintel_pcie_msix_gp0_handler(data);
|
||||
|
||||
/* For TX */
|
||||
if (intr_fh & BTINTEL_PCIE_MSIX_FH_INT_CAUSES_0)
|
||||
if (intr_fh & BTINTEL_PCIE_MSIX_FH_INT_CAUSES_0) {
|
||||
btintel_pcie_msix_tx_handle(data);
|
||||
if (!btintel_pcie_is_rxq_empty(data))
|
||||
btintel_pcie_msix_rx_handle(data);
|
||||
}
|
||||
|
||||
/* For RX */
|
||||
if (intr_fh & BTINTEL_PCIE_MSIX_FH_INT_CAUSES_1)
|
||||
if (intr_fh & BTINTEL_PCIE_MSIX_FH_INT_CAUSES_1) {
|
||||
btintel_pcie_msix_rx_handle(data);
|
||||
if (!btintel_pcie_is_txackq_empty(data))
|
||||
btintel_pcie_msix_tx_handle(data);
|
||||
}
|
||||
|
||||
/*
|
||||
* Before sending the interrupt the HW disables it to prevent a nested
|
||||
|
|
|
@ -723,6 +723,10 @@ static int btmtksdio_close(struct hci_dev *hdev)
|
|||
{
|
||||
struct btmtksdio_dev *bdev = hci_get_drvdata(hdev);
|
||||
|
||||
/* Skip btmtksdio_close if BTMTKSDIO_FUNC_ENABLED isn't set */
|
||||
if (!test_bit(BTMTKSDIO_FUNC_ENABLED, &bdev->tx_state))
|
||||
return 0;
|
||||
|
||||
sdio_claim_host(bdev->func);
|
||||
|
||||
/* Disable interrupt */
|
||||
|
@ -1443,11 +1447,15 @@ static void btmtksdio_remove(struct sdio_func *func)
|
|||
if (!bdev)
|
||||
return;
|
||||
|
||||
hdev = bdev->hdev;
|
||||
|
||||
/* Make sure to call btmtksdio_close before removing sdio card */
|
||||
if (test_bit(BTMTKSDIO_FUNC_ENABLED, &bdev->tx_state))
|
||||
btmtksdio_close(hdev);
|
||||
|
||||
/* Be consistent the state in btmtksdio_probe */
|
||||
pm_runtime_get_noresume(bdev->dev);
|
||||
|
||||
hdev = bdev->hdev;
|
||||
|
||||
sdio_set_drvdata(func, NULL);
|
||||
hci_unregister_dev(hdev);
|
||||
hci_free_dev(hdev);
|
||||
|
|
|
@ -3010,22 +3010,16 @@ static void btusb_coredump_qca(struct hci_dev *hdev)
|
|||
bt_dev_err(hdev, "%s: triggle crash failed (%d)", __func__, err);
|
||||
}
|
||||
|
||||
/*
|
||||
* ==0: not a dump pkt.
|
||||
* < 0: fails to handle a dump pkt
|
||||
* > 0: otherwise.
|
||||
*/
|
||||
/* Return: 0 on success, negative errno on failure. */
|
||||
static int handle_dump_pkt_qca(struct hci_dev *hdev, struct sk_buff *skb)
|
||||
{
|
||||
int ret = 1;
|
||||
int ret = 0;
|
||||
u8 pkt_type;
|
||||
u8 *sk_ptr;
|
||||
unsigned int sk_len;
|
||||
u16 seqno;
|
||||
u32 dump_size;
|
||||
|
||||
struct hci_event_hdr *event_hdr;
|
||||
struct hci_acl_hdr *acl_hdr;
|
||||
struct qca_dump_hdr *dump_hdr;
|
||||
struct btusb_data *btdata = hci_get_drvdata(hdev);
|
||||
struct usb_device *udev = btdata->udev;
|
||||
|
@ -3035,30 +3029,14 @@ static int handle_dump_pkt_qca(struct hci_dev *hdev, struct sk_buff *skb)
|
|||
sk_len = skb->len;
|
||||
|
||||
if (pkt_type == HCI_ACLDATA_PKT) {
|
||||
acl_hdr = hci_acl_hdr(skb);
|
||||
if (le16_to_cpu(acl_hdr->handle) != QCA_MEMDUMP_ACL_HANDLE)
|
||||
return 0;
|
||||
sk_ptr += HCI_ACL_HDR_SIZE;
|
||||
sk_len -= HCI_ACL_HDR_SIZE;
|
||||
event_hdr = (struct hci_event_hdr *)sk_ptr;
|
||||
} else {
|
||||
event_hdr = hci_event_hdr(skb);
|
||||
}
|
||||
|
||||
if ((event_hdr->evt != HCI_VENDOR_PKT)
|
||||
|| (event_hdr->plen != (sk_len - HCI_EVENT_HDR_SIZE)))
|
||||
return 0;
|
||||
|
||||
sk_ptr += HCI_EVENT_HDR_SIZE;
|
||||
sk_len -= HCI_EVENT_HDR_SIZE;
|
||||
|
||||
dump_hdr = (struct qca_dump_hdr *)sk_ptr;
|
||||
if ((sk_len < offsetof(struct qca_dump_hdr, data))
|
||||
|| (dump_hdr->vse_class != QCA_MEMDUMP_VSE_CLASS)
|
||||
|| (dump_hdr->msg_type != QCA_MEMDUMP_MSG_TYPE))
|
||||
return 0;
|
||||
|
||||
/*it is dump pkt now*/
|
||||
seqno = le16_to_cpu(dump_hdr->seqno);
|
||||
if (seqno == 0) {
|
||||
set_bit(BTUSB_HW_SSR_ACTIVE, &btdata->flags);
|
||||
|
@ -3132,17 +3110,84 @@ out:
|
|||
return ret;
|
||||
}
|
||||
|
||||
/* Return: true if the ACL packet is a dump packet, false otherwise. */
|
||||
static bool acl_pkt_is_dump_qca(struct hci_dev *hdev, struct sk_buff *skb)
|
||||
{
|
||||
u8 *sk_ptr;
|
||||
unsigned int sk_len;
|
||||
|
||||
struct hci_event_hdr *event_hdr;
|
||||
struct hci_acl_hdr *acl_hdr;
|
||||
struct qca_dump_hdr *dump_hdr;
|
||||
|
||||
sk_ptr = skb->data;
|
||||
sk_len = skb->len;
|
||||
|
||||
acl_hdr = hci_acl_hdr(skb);
|
||||
if (le16_to_cpu(acl_hdr->handle) != QCA_MEMDUMP_ACL_HANDLE)
|
||||
return false;
|
||||
|
||||
sk_ptr += HCI_ACL_HDR_SIZE;
|
||||
sk_len -= HCI_ACL_HDR_SIZE;
|
||||
event_hdr = (struct hci_event_hdr *)sk_ptr;
|
||||
|
||||
if ((event_hdr->evt != HCI_VENDOR_PKT) ||
|
||||
(event_hdr->plen != (sk_len - HCI_EVENT_HDR_SIZE)))
|
||||
return false;
|
||||
|
||||
sk_ptr += HCI_EVENT_HDR_SIZE;
|
||||
sk_len -= HCI_EVENT_HDR_SIZE;
|
||||
|
||||
dump_hdr = (struct qca_dump_hdr *)sk_ptr;
|
||||
if ((sk_len < offsetof(struct qca_dump_hdr, data)) ||
|
||||
(dump_hdr->vse_class != QCA_MEMDUMP_VSE_CLASS) ||
|
||||
(dump_hdr->msg_type != QCA_MEMDUMP_MSG_TYPE))
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/* Return: true if the event packet is a dump packet, false otherwise. */
|
||||
static bool evt_pkt_is_dump_qca(struct hci_dev *hdev, struct sk_buff *skb)
|
||||
{
|
||||
u8 *sk_ptr;
|
||||
unsigned int sk_len;
|
||||
|
||||
struct hci_event_hdr *event_hdr;
|
||||
struct qca_dump_hdr *dump_hdr;
|
||||
|
||||
sk_ptr = skb->data;
|
||||
sk_len = skb->len;
|
||||
|
||||
event_hdr = hci_event_hdr(skb);
|
||||
|
||||
if ((event_hdr->evt != HCI_VENDOR_PKT)
|
||||
|| (event_hdr->plen != (sk_len - HCI_EVENT_HDR_SIZE)))
|
||||
return false;
|
||||
|
||||
sk_ptr += HCI_EVENT_HDR_SIZE;
|
||||
sk_len -= HCI_EVENT_HDR_SIZE;
|
||||
|
||||
dump_hdr = (struct qca_dump_hdr *)sk_ptr;
|
||||
if ((sk_len < offsetof(struct qca_dump_hdr, data)) ||
|
||||
(dump_hdr->vse_class != QCA_MEMDUMP_VSE_CLASS) ||
|
||||
(dump_hdr->msg_type != QCA_MEMDUMP_MSG_TYPE))
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static int btusb_recv_acl_qca(struct hci_dev *hdev, struct sk_buff *skb)
|
||||
{
|
||||
if (handle_dump_pkt_qca(hdev, skb))
|
||||
return 0;
|
||||
if (acl_pkt_is_dump_qca(hdev, skb))
|
||||
return handle_dump_pkt_qca(hdev, skb);
|
||||
return hci_recv_frame(hdev, skb);
|
||||
}
|
||||
|
||||
static int btusb_recv_evt_qca(struct hci_dev *hdev, struct sk_buff *skb)
|
||||
{
|
||||
if (handle_dump_pkt_qca(hdev, skb))
|
||||
return 0;
|
||||
if (evt_pkt_is_dump_qca(hdev, skb))
|
||||
return handle_dump_pkt_qca(hdev, skb);
|
||||
return hci_recv_frame(hdev, skb);
|
||||
}
|
||||
|
||||
|
|
|
@ -103,7 +103,7 @@ int __init clocksource_i8253_init(void)
|
|||
#ifdef CONFIG_CLKEVT_I8253
|
||||
void clockevent_i8253_disable(void)
|
||||
{
|
||||
raw_spin_lock(&i8253_lock);
|
||||
guard(raw_spinlock_irqsave)(&i8253_lock);
|
||||
|
||||
/*
|
||||
* Writing the MODE register should stop the counter, according to
|
||||
|
@ -132,8 +132,6 @@ void clockevent_i8253_disable(void)
|
|||
outb_p(0, PIT_CH0);
|
||||
|
||||
outb_p(0x30, PIT_MODE);
|
||||
|
||||
raw_spin_unlock(&i8253_lock);
|
||||
}
|
||||
|
||||
static int pit_shutdown(struct clock_event_device *evt)
|
||||
|
|
|
@ -909,8 +909,19 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
|
|||
if (perf->states[0].core_frequency * 1000 != freq_table[0].frequency)
|
||||
pr_warn(FW_WARN "P-state 0 is not max freq\n");
|
||||
|
||||
if (acpi_cpufreq_driver.set_boost)
|
||||
policy->boost_supported = true;
|
||||
if (acpi_cpufreq_driver.set_boost) {
|
||||
if (policy->boost_supported) {
|
||||
/*
|
||||
* The firmware may have altered boost state while the
|
||||
* CPU was offline (for example during a suspend-resume
|
||||
* cycle).
|
||||
*/
|
||||
if (policy->boost_enabled != boost_state(cpu))
|
||||
set_boost(policy, policy->boost_enabled);
|
||||
} else {
|
||||
policy->boost_supported = true;
|
||||
}
|
||||
}
|
||||
|
||||
return result;
|
||||
|
||||
|
|
|
@ -536,14 +536,18 @@ void cpufreq_disable_fast_switch(struct cpufreq_policy *policy)
|
|||
EXPORT_SYMBOL_GPL(cpufreq_disable_fast_switch);
|
||||
|
||||
static unsigned int __resolve_freq(struct cpufreq_policy *policy,
|
||||
unsigned int target_freq, unsigned int relation)
|
||||
unsigned int target_freq,
|
||||
unsigned int min, unsigned int max,
|
||||
unsigned int relation)
|
||||
{
|
||||
unsigned int idx;
|
||||
|
||||
target_freq = clamp_val(target_freq, min, max);
|
||||
|
||||
if (!policy->freq_table)
|
||||
return target_freq;
|
||||
|
||||
idx = cpufreq_frequency_table_target(policy, target_freq, relation);
|
||||
idx = cpufreq_frequency_table_target(policy, target_freq, min, max, relation);
|
||||
policy->cached_resolved_idx = idx;
|
||||
policy->cached_target_freq = target_freq;
|
||||
return policy->freq_table[idx].frequency;
|
||||
|
@ -577,8 +581,7 @@ unsigned int cpufreq_driver_resolve_freq(struct cpufreq_policy *policy,
|
|||
if (unlikely(min > max))
|
||||
min = max;
|
||||
|
||||
return __resolve_freq(policy, clamp_val(target_freq, min, max),
|
||||
CPUFREQ_RELATION_LE);
|
||||
return __resolve_freq(policy, target_freq, min, max, CPUFREQ_RELATION_LE);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(cpufreq_driver_resolve_freq);
|
||||
|
||||
|
@ -2397,8 +2400,8 @@ int __cpufreq_driver_target(struct cpufreq_policy *policy,
|
|||
if (cpufreq_disabled())
|
||||
return -ENODEV;
|
||||
|
||||
target_freq = clamp_val(target_freq, policy->min, policy->max);
|
||||
target_freq = __resolve_freq(policy, target_freq, relation);
|
||||
target_freq = __resolve_freq(policy, target_freq, policy->min,
|
||||
policy->max, relation);
|
||||
|
||||
pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
|
||||
policy->cpu, target_freq, relation, old_target_freq);
|
||||
|
@ -2727,8 +2730,11 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy,
|
|||
* compiler optimizations around them because they may be accessed
|
||||
* concurrently by cpufreq_driver_resolve_freq() during the update.
|
||||
*/
|
||||
WRITE_ONCE(policy->max, __resolve_freq(policy, new_data.max, CPUFREQ_RELATION_H));
|
||||
new_data.min = __resolve_freq(policy, new_data.min, CPUFREQ_RELATION_L);
|
||||
WRITE_ONCE(policy->max, __resolve_freq(policy, new_data.max,
|
||||
new_data.min, new_data.max,
|
||||
CPUFREQ_RELATION_H));
|
||||
new_data.min = __resolve_freq(policy, new_data.min, new_data.min,
|
||||
new_data.max, CPUFREQ_RELATION_L);
|
||||
WRITE_ONCE(policy->min, new_data.min > policy->max ? policy->max : new_data.min);
|
||||
|
||||
trace_cpu_frequency_limits(policy);
|
||||
|
|
|
@ -76,7 +76,8 @@ static unsigned int generic_powersave_bias_target(struct cpufreq_policy *policy,
|
|||
return freq_next;
|
||||
}
|
||||
|
||||
index = cpufreq_frequency_table_target(policy, freq_next, relation);
|
||||
index = cpufreq_frequency_table_target(policy, freq_next, policy->min,
|
||||
policy->max, relation);
|
||||
freq_req = freq_table[index].frequency;
|
||||
freq_reduc = freq_req * od_tuners->powersave_bias / 1000;
|
||||
freq_avg = freq_req - freq_reduc;
|
||||
|
|
|
@ -115,8 +115,8 @@ int cpufreq_generic_frequency_table_verify(struct cpufreq_policy_data *policy)
|
|||
EXPORT_SYMBOL_GPL(cpufreq_generic_frequency_table_verify);
|
||||
|
||||
int cpufreq_table_index_unsorted(struct cpufreq_policy *policy,
|
||||
unsigned int target_freq,
|
||||
unsigned int relation)
|
||||
unsigned int target_freq, unsigned int min,
|
||||
unsigned int max, unsigned int relation)
|
||||
{
|
||||
struct cpufreq_frequency_table optimal = {
|
||||
.driver_data = ~0,
|
||||
|
@ -147,7 +147,7 @@ int cpufreq_table_index_unsorted(struct cpufreq_policy *policy,
|
|||
cpufreq_for_each_valid_entry_idx(pos, table, i) {
|
||||
freq = pos->frequency;
|
||||
|
||||
if ((freq < policy->min) || (freq > policy->max))
|
||||
if (freq < min || freq > max)
|
||||
continue;
|
||||
if (freq == target_freq) {
|
||||
optimal.driver_data = i;
|
||||
|
|
|
@ -598,6 +598,9 @@ static bool turbo_is_disabled(void)
|
|||
{
|
||||
u64 misc_en;
|
||||
|
||||
if (!cpu_feature_enabled(X86_FEATURE_IDA))
|
||||
return true;
|
||||
|
||||
rdmsrl(MSR_IA32_MISC_ENABLE, misc_en);
|
||||
|
||||
return !!(misc_en & MSR_IA32_MISC_ENABLE_TURBO_DISABLE);
|
||||
|
|
|
@ -99,7 +99,7 @@ static irqreturn_t altr_sdram_mc_err_handler(int irq, void *dev_id)
|
|||
if (status & priv->ecc_stat_ce_mask) {
|
||||
regmap_read(drvdata->mc_vbase, priv->ecc_saddr_offset,
|
||||
&err_addr);
|
||||
if (priv->ecc_uecnt_offset)
|
||||
if (priv->ecc_cecnt_offset)
|
||||
regmap_read(drvdata->mc_vbase, priv->ecc_cecnt_offset,
|
||||
&err_count);
|
||||
edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, err_count,
|
||||
|
@ -1005,9 +1005,6 @@ altr_init_a10_ecc_block(struct device_node *np, u32 irq_mask,
|
|||
}
|
||||
}
|
||||
|
||||
/* Interrupt mode set to every SBERR */
|
||||
regmap_write(ecc_mgr_map, ALTR_A10_ECC_INTMODE_OFST,
|
||||
ALTR_A10_ECC_INTMODE);
|
||||
/* Enable ECC */
|
||||
ecc_set_bits(ecc_ctrl_en_mask, (ecc_block_base +
|
||||
ALTR_A10_ECC_CTRL_OFST));
|
||||
|
@ -2127,6 +2124,10 @@ static int altr_edac_a10_probe(struct platform_device *pdev)
|
|||
return PTR_ERR(edac->ecc_mgr_map);
|
||||
}
|
||||
|
||||
/* Set irq mask for DDR SBE to avoid any pending irq before registration */
|
||||
regmap_write(edac->ecc_mgr_map, A10_SYSMGR_ECC_INTMASK_SET_OFST,
|
||||
(A10_SYSMGR_ECC_INTMASK_SDMMCB | A10_SYSMGR_ECC_INTMASK_DDR0));
|
||||
|
||||
edac->irq_chip.name = pdev->dev.of_node->name;
|
||||
edac->irq_chip.irq_mask = a10_eccmgr_irq_mask;
|
||||
edac->irq_chip.irq_unmask = a10_eccmgr_irq_unmask;
|
||||
|
|
|
@ -249,6 +249,8 @@ struct altr_sdram_mc_data {
|
|||
#define A10_SYSMGR_ECC_INTMASK_SET_OFST 0x94
|
||||
#define A10_SYSMGR_ECC_INTMASK_CLR_OFST 0x98
|
||||
#define A10_SYSMGR_ECC_INTMASK_OCRAM BIT(1)
|
||||
#define A10_SYSMGR_ECC_INTMASK_SDMMCB BIT(16)
|
||||
#define A10_SYSMGR_ECC_INTMASK_DDR0 BIT(17)
|
||||
|
||||
#define A10_SYSMGR_ECC_INTSTAT_SERR_OFST 0x9C
|
||||
#define A10_SYSMGR_ECC_INTSTAT_DERR_OFST 0xA0
|
||||
|
|
|
@ -299,7 +299,8 @@ __ffa_partition_info_get(u32 uuid0, u32 uuid1, u32 uuid2, u32 uuid3,
|
|||
import_uuid(&buf->uuid, (u8 *)&rx_buf->uuid);
|
||||
}
|
||||
|
||||
ffa_rx_release();
|
||||
if (!(flags & PARTITION_INFO_GET_RETURN_COUNT_ONLY))
|
||||
ffa_rx_release();
|
||||
|
||||
mutex_unlock(&drv_info->rx_lock);
|
||||
|
||||
|
|
|
@ -255,6 +255,9 @@ static struct scmi_device *scmi_child_dev_find(struct device *parent,
|
|||
if (!dev)
|
||||
return NULL;
|
||||
|
||||
/* Drop the refcnt bumped implicitly by device_find_child */
|
||||
put_device(dev);
|
||||
|
||||
return to_scmi_dev(dev);
|
||||
}
|
||||
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Reference in a new issue