mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-09-18 22:14:16 +00:00
Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
Cross-merge networking fixes after downstream PR (net-6.15-rc7). Conflicts: tools/testing/selftests/drivers/net/hw/ncdevmem.c97c4e094a4
("tests/ncdevmem: Fix double-free of queue array")2f1a805f32
("selftests: ncdevmem: Implement devmem TCP TX") https://lore.kernel.org/20250514122900.1e77d62d@canb.auug.org.au Adjacent changes: net/core/devmem.c net/core/devmem.h0afc44d8cd
("net: devmem: fix kernel panic when netlink socket close after module unload")bd61848900
("net: devmem: Implement TX path") Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
commit
bebd7b2626
387 changed files with 4771 additions and 1578 deletions
|
@ -7,5 +7,5 @@ check-private-items = true
|
|||
disallowed-macros = [
|
||||
# The `clippy::dbg_macro` lint only works with `std::dbg!`, thus we simulate
|
||||
# it here, see: https://github.com/rust-lang/rust-clippy/issues/11303.
|
||||
{ path = "kernel::dbg", reason = "the `dbg!` macro is intended as a debugging tool" },
|
||||
{ path = "kernel::dbg", reason = "the `dbg!` macro is intended as a debugging tool", allow-invalid = true },
|
||||
]
|
||||
|
|
5
.mailmap
5
.mailmap
|
@ -102,6 +102,7 @@ Ard Biesheuvel <ardb@kernel.org> <ard.biesheuvel@linaro.org>
|
|||
Arnaud Patard <arnaud.patard@rtp-net.org>
|
||||
Arnd Bergmann <arnd@arndb.de>
|
||||
Arun Kumar Neelakantam <quic_aneela@quicinc.com> <aneela@codeaurora.org>
|
||||
Asahi Lina <lina+kernel@asahilina.net> <lina@asahilina.net>
|
||||
Ashok Raj Nagarajan <quic_arnagara@quicinc.com> <arnagara@codeaurora.org>
|
||||
Ashwin Chaugule <quic_ashwinc@quicinc.com> <ashwinc@codeaurora.org>
|
||||
Asutosh Das <quic_asutoshd@quicinc.com> <asutoshd@codeaurora.org>
|
||||
|
@ -447,6 +448,8 @@ Luca Ceresoli <luca.ceresoli@bootlin.com> <luca@lucaceresoli.net>
|
|||
Luca Weiss <luca@lucaweiss.eu> <luca@z3ntu.xyz>
|
||||
Lukasz Luba <lukasz.luba@arm.com> <l.luba@partner.samsung.com>
|
||||
Luo Jie <quic_luoj@quicinc.com> <luoj@codeaurora.org>
|
||||
Lance Yang <lance.yang@linux.dev> <ioworker0@gmail.com>
|
||||
Lance Yang <lance.yang@linux.dev> <mingzhe.yang@ly.com>
|
||||
Maciej W. Rozycki <macro@mips.com> <macro@imgtec.com>
|
||||
Maciej W. Rozycki <macro@orcam.me.uk> <macro@linux-mips.org>
|
||||
Maharaja Kennadyrajan <quic_mkenna@quicinc.com> <mkenna@codeaurora.org>
|
||||
|
@ -483,6 +486,7 @@ Matthias Fuchs <socketcan@esd.eu> <matthias.fuchs@esd.eu>
|
|||
Matthieu Baerts <matttbe@kernel.org> <matthieu.baerts@tessares.net>
|
||||
Matthieu CASTET <castet.matthieu@free.fr>
|
||||
Matti Vaittinen <mazziesaccount@gmail.com> <matti.vaittinen@fi.rohmeurope.com>
|
||||
Mattijs Korpershoek <mkorpershoek@kernel.org> <mkorpershoek@baylibre.com>
|
||||
Matt Ranostay <matt@ranostay.sg> <matt.ranostay@konsulko.com>
|
||||
Matt Ranostay <matt@ranostay.sg> <matt@ranostay.consulting>
|
||||
Matt Ranostay <matt@ranostay.sg> Matthew Ranostay <mranostay@embeddedalley.com>
|
||||
|
@ -749,6 +753,7 @@ Tvrtko Ursulin <tursulin@ursulin.net> <tvrtko@ursulin.net>
|
|||
Tycho Andersen <tycho@tycho.pizza> <tycho@tycho.ws>
|
||||
Tzung-Bi Shih <tzungbi@kernel.org> <tzungbi@google.com>
|
||||
Uwe Kleine-König <ukleinek@informatik.uni-freiburg.de>
|
||||
Uwe Kleine-König <u.kleine-koenig@baylibre.com> <ukleinek@baylibre.com>
|
||||
Uwe Kleine-König <u.kleine-koenig@pengutronix.de>
|
||||
Uwe Kleine-König <ukleinek@strlen.de>
|
||||
Uwe Kleine-König <ukl@pengutronix.de>
|
||||
|
|
|
@ -511,6 +511,7 @@ Description: information about CPUs heterogeneity.
|
|||
|
||||
What: /sys/devices/system/cpu/vulnerabilities
|
||||
/sys/devices/system/cpu/vulnerabilities/gather_data_sampling
|
||||
/sys/devices/system/cpu/vulnerabilities/indirect_target_selection
|
||||
/sys/devices/system/cpu/vulnerabilities/itlb_multihit
|
||||
/sys/devices/system/cpu/vulnerabilities/l1tf
|
||||
/sys/devices/system/cpu/vulnerabilities/mds
|
||||
|
|
|
@ -23,3 +23,4 @@ are configurable at compile, boot or run time.
|
|||
gather_data_sampling
|
||||
reg-file-data-sampling
|
||||
rsb
|
||||
indirect-target-selection
|
||||
|
|
168
Documentation/admin-guide/hw-vuln/indirect-target-selection.rst
Normal file
168
Documentation/admin-guide/hw-vuln/indirect-target-selection.rst
Normal file
|
@ -0,0 +1,168 @@
|
|||
.. SPDX-License-Identifier: GPL-2.0
|
||||
|
||||
Indirect Target Selection (ITS)
|
||||
===============================
|
||||
|
||||
ITS is a vulnerability in some Intel CPUs that support Enhanced IBRS and were
|
||||
released before Alder Lake. ITS may allow an attacker to control the prediction
|
||||
of indirect branches and RETs located in the lower half of a cacheline.
|
||||
|
||||
ITS is assigned CVE-2024-28956 with a CVSS score of 4.7 (Medium).
|
||||
|
||||
Scope of Impact
|
||||
---------------
|
||||
- **eIBRS Guest/Host Isolation**: Indirect branches in KVM/kernel may still be
|
||||
predicted with unintended target corresponding to a branch in the guest.
|
||||
|
||||
- **Intra-Mode BTI**: In-kernel training such as through cBPF or other native
|
||||
gadgets.
|
||||
|
||||
- **Indirect Branch Prediction Barrier (IBPB)**: After an IBPB, indirect
|
||||
branches may still be predicted with targets corresponding to direct branches
|
||||
executed prior to the IBPB. This is fixed by the IPU 2025.1 microcode, which
|
||||
should be available via distro updates. Alternatively microcode can be
|
||||
obtained from Intel's github repository [#f1]_.
|
||||
|
||||
Affected CPUs
|
||||
-------------
|
||||
Below is the list of ITS affected CPUs [#f2]_ [#f3]_:
|
||||
|
||||
======================== ============ ==================== ===============
|
||||
Common name Family_Model eIBRS Intra-mode BTI
|
||||
Guest/Host Isolation
|
||||
======================== ============ ==================== ===============
|
||||
SKYLAKE_X (step >= 6) 06_55H Affected Affected
|
||||
ICELAKE_X 06_6AH Not affected Affected
|
||||
ICELAKE_D 06_6CH Not affected Affected
|
||||
ICELAKE_L 06_7EH Not affected Affected
|
||||
TIGERLAKE_L 06_8CH Not affected Affected
|
||||
TIGERLAKE 06_8DH Not affected Affected
|
||||
KABYLAKE_L (step >= 12) 06_8EH Affected Affected
|
||||
KABYLAKE (step >= 13) 06_9EH Affected Affected
|
||||
COMETLAKE 06_A5H Affected Affected
|
||||
COMETLAKE_L 06_A6H Affected Affected
|
||||
ROCKETLAKE 06_A7H Not affected Affected
|
||||
======================== ============ ==================== ===============
|
||||
|
||||
- All affected CPUs enumerate Enhanced IBRS feature.
|
||||
- IBPB isolation is affected on all ITS affected CPUs, and need a microcode
|
||||
update for mitigation.
|
||||
- None of the affected CPUs enumerate BHI_CTRL which was introduced in Golden
|
||||
Cove (Alder Lake and Sapphire Rapids). This can help guests to determine the
|
||||
host's affected status.
|
||||
- Intel Atom CPUs are not affected by ITS.
|
||||
|
||||
Mitigation
|
||||
----------
|
||||
As only the indirect branches and RETs that have their last byte of instruction
|
||||
in the lower half of the cacheline are vulnerable to ITS, the basic idea behind
|
||||
the mitigation is to not allow indirect branches in the lower half.
|
||||
|
||||
This is achieved by relying on existing retpoline support in the kernel, and in
|
||||
compilers. ITS-vulnerable retpoline sites are runtime patched to point to newly
|
||||
added ITS-safe thunks. These safe thunks consists of indirect branch in the
|
||||
second half of the cacheline. Not all retpoline sites are patched to thunks, if
|
||||
a retpoline site is evaluated to be ITS-safe, it is replaced with an inline
|
||||
indirect branch.
|
||||
|
||||
Dynamic thunks
|
||||
~~~~~~~~~~~~~~
|
||||
From a dynamically allocated pool of safe-thunks, each vulnerable site is
|
||||
replaced with a new thunk, such that they get a unique address. This could
|
||||
improve the branch prediction accuracy. Also, it is a defense-in-depth measure
|
||||
against aliasing.
|
||||
|
||||
Note, for simplicity, indirect branches in eBPF programs are always replaced
|
||||
with a jump to a static thunk in __x86_indirect_its_thunk_array. If required,
|
||||
in future this can be changed to use dynamic thunks.
|
||||
|
||||
All vulnerable RETs are replaced with a static thunk, they do not use dynamic
|
||||
thunks. This is because RETs get their prediction from RSB mostly that does not
|
||||
depend on source address. RETs that underflow RSB may benefit from dynamic
|
||||
thunks. But, RETs significantly outnumber indirect branches, and any benefit
|
||||
from a unique source address could be outweighed by the increased icache
|
||||
footprint and iTLB pressure.
|
||||
|
||||
Retpoline
|
||||
~~~~~~~~~
|
||||
Retpoline sequence also mitigates ITS-unsafe indirect branches. For this
|
||||
reason, when retpoline is enabled, ITS mitigation only relocates the RETs to
|
||||
safe thunks. Unless user requested the RSB-stuffing mitigation.
|
||||
|
||||
RSB Stuffing
|
||||
~~~~~~~~~~~~
|
||||
RSB-stuffing via Call Depth Tracking is a mitigation for Retbleed RSB-underflow
|
||||
attacks. And it also mitigates RETs that are vulnerable to ITS.
|
||||
|
||||
Mitigation in guests
|
||||
^^^^^^^^^^^^^^^^^^^^
|
||||
All guests deploy ITS mitigation by default, irrespective of eIBRS enumeration
|
||||
and Family/Model of the guest. This is because eIBRS feature could be hidden
|
||||
from a guest. One exception to this is when a guest enumerates BHI_DIS_S, which
|
||||
indicates that the guest is running on an unaffected host.
|
||||
|
||||
To prevent guests from unnecessarily deploying the mitigation on unaffected
|
||||
platforms, Intel has defined ITS_NO bit(62) in MSR IA32_ARCH_CAPABILITIES. When
|
||||
a guest sees this bit set, it should not enumerate the ITS bug. Note, this bit
|
||||
is not set by any hardware, but is **intended for VMMs to synthesize** it for
|
||||
guests as per the host's affected status.
|
||||
|
||||
Mitigation options
|
||||
^^^^^^^^^^^^^^^^^^
|
||||
The ITS mitigation can be controlled using the "indirect_target_selection"
|
||||
kernel parameter. The available options are:
|
||||
|
||||
======== ===================================================================
|
||||
on (default) Deploy the "Aligned branch/return thunks" mitigation.
|
||||
If spectre_v2 mitigation enables retpoline, aligned-thunks are only
|
||||
deployed for the affected RET instructions. Retpoline mitigates
|
||||
indirect branches.
|
||||
|
||||
off Disable ITS mitigation.
|
||||
|
||||
vmexit Equivalent to "=on" if the CPU is affected by guest/host isolation
|
||||
part of ITS. Otherwise, mitigation is not deployed. This option is
|
||||
useful when host userspace is not in the threat model, and only
|
||||
attacks from guest to host are considered.
|
||||
|
||||
stuff Deploy RSB-fill mitigation when retpoline is also deployed.
|
||||
Otherwise, deploy the default mitigation. When retpoline mitigation
|
||||
is enabled, RSB-stuffing via Call-Depth-Tracking also mitigates
|
||||
ITS.
|
||||
|
||||
force Force the ITS bug and deploy the default mitigation.
|
||||
======== ===================================================================
|
||||
|
||||
Sysfs reporting
|
||||
---------------
|
||||
|
||||
The sysfs file showing ITS mitigation status is:
|
||||
|
||||
/sys/devices/system/cpu/vulnerabilities/indirect_target_selection
|
||||
|
||||
Note, microcode mitigation status is not reported in this file.
|
||||
|
||||
The possible values in this file are:
|
||||
|
||||
.. list-table::
|
||||
|
||||
* - Not affected
|
||||
- The processor is not vulnerable.
|
||||
* - Vulnerable
|
||||
- System is vulnerable and no mitigation has been applied.
|
||||
* - Vulnerable, KVM: Not affected
|
||||
- System is vulnerable to intra-mode BTI, but not affected by eIBRS
|
||||
guest/host isolation.
|
||||
* - Mitigation: Aligned branch/return thunks
|
||||
- The mitigation is enabled, affected indirect branches and RETs are
|
||||
relocated to safe thunks.
|
||||
* - Mitigation: Retpolines, Stuffing RSB
|
||||
- The mitigation is enabled using retpoline and RSB stuffing.
|
||||
|
||||
References
|
||||
----------
|
||||
.. [#f1] Microcode repository - https://github.com/intel/Intel-Linux-Processor-Microcode-Data-Files
|
||||
|
||||
.. [#f2] Affected Processors list - https://www.intel.com/content/www/us/en/developer/topic-technology/software-security-guidance/processors-affected-consolidated-product-cpu-model.html
|
||||
|
||||
.. [#f3] Affected Processors list (machine readable) - https://github.com/intel/Intel-affected-processor-list
|
|
@ -2202,6 +2202,23 @@
|
|||
different crypto accelerators. This option can be used
|
||||
to achieve best performance for particular HW.
|
||||
|
||||
indirect_target_selection= [X86,Intel] Mitigation control for Indirect
|
||||
Target Selection(ITS) bug in Intel CPUs. Updated
|
||||
microcode is also required for a fix in IBPB.
|
||||
|
||||
on: Enable mitigation (default).
|
||||
off: Disable mitigation.
|
||||
force: Force the ITS bug and deploy default
|
||||
mitigation.
|
||||
vmexit: Only deploy mitigation if CPU is affected by
|
||||
guest/host isolation part of ITS.
|
||||
stuff: Deploy RSB-fill mitigation when retpoline is
|
||||
also deployed. Otherwise, deploy the default
|
||||
mitigation.
|
||||
|
||||
For details see:
|
||||
Documentation/admin-guide/hw-vuln/indirect-target-selection.rst
|
||||
|
||||
init= [KNL]
|
||||
Format: <full_path>
|
||||
Run specified binary instead of /sbin/init as init
|
||||
|
@ -3693,6 +3710,7 @@
|
|||
expose users to several CPU vulnerabilities.
|
||||
Equivalent to: if nokaslr then kpti=0 [ARM64]
|
||||
gather_data_sampling=off [X86]
|
||||
indirect_target_selection=off [X86]
|
||||
kvm.nx_huge_pages=off [X86]
|
||||
l1tf=off [X86]
|
||||
mds=off [X86]
|
||||
|
|
|
@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
|
|||
title: Mediatek's Keypad Controller
|
||||
|
||||
maintainers:
|
||||
- Mattijs Korpershoek <mkorpershoek@baylibre.com>
|
||||
- Mattijs Korpershoek <mkorpershoek@kernel.org>
|
||||
|
||||
allOf:
|
||||
- $ref: /schemas/input/matrix-keymap.yaml#
|
||||
|
|
|
@ -46,6 +46,21 @@ The kernel embeds the building user and host names in
|
|||
`KBUILD_BUILD_USER and KBUILD_BUILD_HOST`_ variables. If you are
|
||||
building from a git commit, you could use its committer address.
|
||||
|
||||
Absolute filenames
|
||||
------------------
|
||||
|
||||
When the kernel is built out-of-tree, debug information may include
|
||||
absolute filenames for the source files. This must be overridden by
|
||||
including the ``-fdebug-prefix-map`` option in the `KCFLAGS`_ variable.
|
||||
|
||||
Depending on the compiler used, the ``__FILE__`` macro may also expand
|
||||
to an absolute filename in an out-of-tree build. Kbuild automatically
|
||||
uses the ``-fmacro-prefix-map`` option to prevent this, if it is
|
||||
supported.
|
||||
|
||||
The Reproducible Builds web site has more information about these
|
||||
`prefix-map options`_.
|
||||
|
||||
Generated files in source packages
|
||||
----------------------------------
|
||||
|
||||
|
@ -116,5 +131,7 @@ See ``scripts/setlocalversion`` for details.
|
|||
|
||||
.. _KBUILD_BUILD_TIMESTAMP: kbuild.html#kbuild-build-timestamp
|
||||
.. _KBUILD_BUILD_USER and KBUILD_BUILD_HOST: kbuild.html#kbuild-build-user-kbuild-build-host
|
||||
.. _KCFLAGS: kbuild.html#kcflags
|
||||
.. _prefix-map options: https://reproducible-builds.org/docs/build-path/
|
||||
.. _Reproducible Builds project: https://reproducible-builds.org/
|
||||
.. _SOURCE_DATE_EPOCH: https://reproducible-builds.org/docs/source-date-epoch/
|
||||
|
|
|
@ -2017,7 +2017,8 @@ attribute-sets:
|
|||
attributes:
|
||||
-
|
||||
name: act
|
||||
type: nest
|
||||
type: indexed-array
|
||||
sub-type: nest
|
||||
nested-attributes: tc-act-attrs
|
||||
-
|
||||
name: police
|
||||
|
@ -2250,7 +2251,8 @@ attribute-sets:
|
|||
attributes:
|
||||
-
|
||||
name: act
|
||||
type: nest
|
||||
type: indexed-array
|
||||
sub-type: nest
|
||||
nested-attributes: tc-act-attrs
|
||||
-
|
||||
name: police
|
||||
|
@ -2745,7 +2747,7 @@ attribute-sets:
|
|||
type: u16
|
||||
byte-order: big-endian
|
||||
-
|
||||
name: key-l2-tpv3-sid
|
||||
name: key-l2tpv3-sid
|
||||
type: u32
|
||||
byte-order: big-endian
|
||||
-
|
||||
|
@ -3504,7 +3506,7 @@ attribute-sets:
|
|||
name: rate64
|
||||
type: u64
|
||||
-
|
||||
name: prate4
|
||||
name: prate64
|
||||
type: u64
|
||||
-
|
||||
name: burst
|
||||
|
|
|
@ -811,11 +811,9 @@ Documentation/devicetree/bindings/ptp/timestamper.txt for more details.
|
|||
3.2.4 Other caveats for MAC drivers
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Stacked PHCs, especially DSA (but not only) - since that doesn't require any
|
||||
modification to MAC drivers, so it is more difficult to ensure correctness of
|
||||
all possible code paths - is that they uncover bugs which were impossible to
|
||||
trigger before the existence of stacked PTP clocks. One example has to do with
|
||||
this line of code, already presented earlier::
|
||||
The use of stacked PHCs may uncover MAC driver bugs which were impossible to
|
||||
trigger without them. One example has to do with this line of code, already
|
||||
presented earlier::
|
||||
|
||||
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
|
||||
|
||||
|
|
70
MAINTAINERS
70
MAINTAINERS
|
@ -11229,7 +11229,6 @@ S: Maintained
|
|||
F: drivers/i2c/busses/i2c-cht-wc.c
|
||||
|
||||
I2C/SMBUS ISMT DRIVER
|
||||
M: Seth Heasley <seth.heasley@intel.com>
|
||||
M: Neil Horman <nhorman@tuxdriver.com>
|
||||
L: linux-i2c@vger.kernel.org
|
||||
F: Documentation/i2c/busses/i2c-ismt.rst
|
||||
|
@ -15065,7 +15064,7 @@ F: Documentation/devicetree/bindings/media/mediatek-jpeg-*.yaml
|
|||
F: drivers/media/platform/mediatek/jpeg/
|
||||
|
||||
MEDIATEK KEYPAD DRIVER
|
||||
M: Mattijs Korpershoek <mkorpershoek@baylibre.com>
|
||||
M: Mattijs Korpershoek <mkorpershoek@kernel.org>
|
||||
S: Supported
|
||||
F: Documentation/devicetree/bindings/input/mediatek,mt6779-keypad.yaml
|
||||
F: drivers/input/keyboard/mt6779-keypad.c
|
||||
|
@ -15488,24 +15487,45 @@ F: Documentation/mm/
|
|||
F: include/linux/gfp.h
|
||||
F: include/linux/gfp_types.h
|
||||
F: include/linux/memfd.h
|
||||
F: include/linux/memory.h
|
||||
F: include/linux/memory_hotplug.h
|
||||
F: include/linux/memory-tiers.h
|
||||
F: include/linux/mempolicy.h
|
||||
F: include/linux/mempool.h
|
||||
F: include/linux/memremap.h
|
||||
F: include/linux/mm.h
|
||||
F: include/linux/mm_*.h
|
||||
F: include/linux/mmzone.h
|
||||
F: include/linux/mmu_notifier.h
|
||||
F: include/linux/pagewalk.h
|
||||
F: include/linux/rmap.h
|
||||
F: include/trace/events/ksm.h
|
||||
F: mm/
|
||||
F: tools/mm/
|
||||
F: tools/testing/selftests/mm/
|
||||
N: include/linux/page[-_]*
|
||||
|
||||
MEMORY MANAGEMENT - CORE
|
||||
M: Andrew Morton <akpm@linux-foundation.org>
|
||||
M: David Hildenbrand <david@redhat.com>
|
||||
R: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
|
||||
R: Liam R. Howlett <Liam.Howlett@oracle.com>
|
||||
R: Vlastimil Babka <vbabka@suse.cz>
|
||||
R: Mike Rapoport <rppt@kernel.org>
|
||||
R: Suren Baghdasaryan <surenb@google.com>
|
||||
R: Michal Hocko <mhocko@suse.com>
|
||||
L: linux-mm@kvack.org
|
||||
S: Maintained
|
||||
W: http://www.linux-mm.org
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
|
||||
F: include/linux/memory.h
|
||||
F: include/linux/mm.h
|
||||
F: include/linux/mm_*.h
|
||||
F: include/linux/mmdebug.h
|
||||
F: include/linux/pagewalk.h
|
||||
F: mm/Kconfig
|
||||
F: mm/debug.c
|
||||
F: mm/init-mm.c
|
||||
F: mm/memory.c
|
||||
F: mm/pagewalk.c
|
||||
F: mm/util.c
|
||||
|
||||
MEMORY MANAGEMENT - EXECMEM
|
||||
M: Andrew Morton <akpm@linux-foundation.org>
|
||||
M: Mike Rapoport <rppt@kernel.org>
|
||||
|
@ -15539,6 +15559,19 @@ F: mm/page_alloc.c
|
|||
F: include/linux/gfp.h
|
||||
F: include/linux/compaction.h
|
||||
|
||||
MEMORY MANAGEMENT - RMAP (REVERSE MAPPING)
|
||||
M: Andrew Morton <akpm@linux-foundation.org>
|
||||
M: David Hildenbrand <david@redhat.com>
|
||||
M: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
|
||||
R: Rik van Riel <riel@surriel.com>
|
||||
R: Liam R. Howlett <Liam.Howlett@oracle.com>
|
||||
R: Vlastimil Babka <vbabka@suse.cz>
|
||||
R: Harry Yoo <harry.yoo@oracle.com>
|
||||
L: linux-mm@kvack.org
|
||||
S: Maintained
|
||||
F: include/linux/rmap.h
|
||||
F: mm/rmap.c
|
||||
|
||||
MEMORY MANAGEMENT - SECRETMEM
|
||||
M: Andrew Morton <akpm@linux-foundation.org>
|
||||
M: Mike Rapoport <rppt@kernel.org>
|
||||
|
@ -15547,6 +15580,30 @@ S: Maintained
|
|||
F: include/linux/secretmem.h
|
||||
F: mm/secretmem.c
|
||||
|
||||
MEMORY MANAGEMENT - THP (TRANSPARENT HUGE PAGE)
|
||||
M: Andrew Morton <akpm@linux-foundation.org>
|
||||
M: David Hildenbrand <david@redhat.com>
|
||||
R: Zi Yan <ziy@nvidia.com>
|
||||
R: Baolin Wang <baolin.wang@linux.alibaba.com>
|
||||
R: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
|
||||
R: Liam R. Howlett <Liam.Howlett@oracle.com>
|
||||
R: Nico Pache <npache@redhat.com>
|
||||
R: Ryan Roberts <ryan.roberts@arm.com>
|
||||
R: Dev Jain <dev.jain@arm.com>
|
||||
L: linux-mm@kvack.org
|
||||
S: Maintained
|
||||
W: http://www.linux-mm.org
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
|
||||
F: Documentation/admin-guide/mm/transhuge.rst
|
||||
F: include/linux/huge_mm.h
|
||||
F: include/linux/khugepaged.h
|
||||
F: include/trace/events/huge_memory.h
|
||||
F: mm/huge_memory.c
|
||||
F: mm/khugepaged.c
|
||||
F: tools/testing/selftests/mm/khugepaged.c
|
||||
F: tools/testing/selftests/mm/split_huge_page_test.c
|
||||
F: tools/testing/selftests/mm/transhuge-stress.c
|
||||
|
||||
MEMORY MANAGEMENT - USERFAULTFD
|
||||
M: Andrew Morton <akpm@linux-foundation.org>
|
||||
R: Peter Xu <peterx@redhat.com>
|
||||
|
@ -22870,7 +22927,6 @@ F: drivers/accessibility/speakup/
|
|||
|
||||
SPEAR PLATFORM/CLOCK/PINCTRL SUPPORT
|
||||
M: Viresh Kumar <vireshk@kernel.org>
|
||||
M: Shiraz Hashim <shiraz.linux.kernel@gmail.com>
|
||||
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
||||
L: soc@lists.linux.dev
|
||||
S: Maintained
|
||||
|
|
5
Makefile
5
Makefile
|
@ -2,7 +2,7 @@
|
|||
VERSION = 6
|
||||
PATCHLEVEL = 15
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc5
|
||||
EXTRAVERSION = -rc6
|
||||
NAME = Baby Opossum Posse
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
@ -1068,8 +1068,7 @@ KBUILD_CFLAGS += -fno-builtin-wcslen
|
|||
|
||||
# change __FILE__ to the relative path to the source directory
|
||||
ifdef building_out_of_srctree
|
||||
KBUILD_CPPFLAGS += $(call cc-option,-ffile-prefix-map=$(srcroot)/=)
|
||||
KBUILD_RUSTFLAGS += --remap-path-prefix=$(srcroot)/=
|
||||
KBUILD_CPPFLAGS += $(call cc-option,-fmacro-prefix-map=$(srcroot)/=)
|
||||
endif
|
||||
|
||||
# include additional Makefiles when needed
|
||||
|
|
|
@ -451,7 +451,7 @@
|
|||
pwm_ef: pwm@86c0 {
|
||||
compatible = "amlogic,meson8-pwm-v2";
|
||||
clocks = <&xtal>,
|
||||
<>, /* unknown/untested, the datasheet calls it "Video PLL" */
|
||||
<0>, /* unknown/untested, the datasheet calls it "Video PLL" */
|
||||
<&clkc CLKID_FCLK_DIV4>,
|
||||
<&clkc CLKID_FCLK_DIV3>;
|
||||
reg = <0x86c0 0x10>;
|
||||
|
@ -705,7 +705,7 @@
|
|||
&pwm_ab {
|
||||
compatible = "amlogic,meson8-pwm-v2";
|
||||
clocks = <&xtal>,
|
||||
<>, /* unknown/untested, the datasheet calls it "Video PLL" */
|
||||
<0>, /* unknown/untested, the datasheet calls it "Video PLL" */
|
||||
<&clkc CLKID_FCLK_DIV4>,
|
||||
<&clkc CLKID_FCLK_DIV3>;
|
||||
};
|
||||
|
@ -713,7 +713,7 @@
|
|||
&pwm_cd {
|
||||
compatible = "amlogic,meson8-pwm-v2";
|
||||
clocks = <&xtal>,
|
||||
<>, /* unknown/untested, the datasheet calls it "Video PLL" */
|
||||
<0>, /* unknown/untested, the datasheet calls it "Video PLL" */
|
||||
<&clkc CLKID_FCLK_DIV4>,
|
||||
<&clkc CLKID_FCLK_DIV3>;
|
||||
};
|
||||
|
|
|
@ -406,7 +406,7 @@
|
|||
compatible = "amlogic,meson8b-pwm-v2", "amlogic,meson8-pwm-v2";
|
||||
reg = <0x86c0 0x10>;
|
||||
clocks = <&xtal>,
|
||||
<>, /* unknown/untested, the datasheet calls it "Video PLL" */
|
||||
<0>, /* unknown/untested, the datasheet calls it "Video PLL" */
|
||||
<&clkc CLKID_FCLK_DIV4>,
|
||||
<&clkc CLKID_FCLK_DIV3>;
|
||||
#pwm-cells = <3>;
|
||||
|
@ -680,7 +680,7 @@
|
|||
&pwm_ab {
|
||||
compatible = "amlogic,meson8b-pwm-v2", "amlogic,meson8-pwm-v2";
|
||||
clocks = <&xtal>,
|
||||
<>, /* unknown/untested, the datasheet calls it "Video PLL" */
|
||||
<0>, /* unknown/untested, the datasheet calls it "Video PLL" */
|
||||
<&clkc CLKID_FCLK_DIV4>,
|
||||
<&clkc CLKID_FCLK_DIV3>;
|
||||
};
|
||||
|
@ -688,7 +688,7 @@
|
|||
&pwm_cd {
|
||||
compatible = "amlogic,meson8b-pwm-v2", "amlogic,meson8-pwm-v2";
|
||||
clocks = <&xtal>,
|
||||
<>, /* unknown/untested, the datasheet calls it "Video PLL" */
|
||||
<0>, /* unknown/untested, the datasheet calls it "Video PLL" */
|
||||
<&clkc CLKID_FCLK_DIV4>,
|
||||
<&clkc CLKID_FCLK_DIV3>;
|
||||
};
|
||||
|
|
|
@ -151,7 +151,7 @@
|
|||
al,msi-num-spis = <160>;
|
||||
};
|
||||
|
||||
io-fabric@fc000000 {
|
||||
io-bus@fc000000 {
|
||||
compatible = "simple-bus";
|
||||
#address-cells = <1>;
|
||||
#size-cells = <1>;
|
||||
|
|
|
@ -361,7 +361,7 @@
|
|||
interrupt-parent = <&gic>;
|
||||
};
|
||||
|
||||
io-fabric@fc000000 {
|
||||
io-bus@fc000000 {
|
||||
compatible = "simple-bus";
|
||||
#address-cells = <1>;
|
||||
#size-cells = <1>;
|
||||
|
|
|
@ -2313,7 +2313,7 @@
|
|||
"amlogic,meson8-pwm-v2";
|
||||
reg = <0x0 0x19000 0x0 0x20>;
|
||||
clocks = <&xtal>,
|
||||
<>, /* unknown/untested, the datasheet calls it "vid_pll" */
|
||||
<0>, /* unknown/untested, the datasheet calls it "vid_pll" */
|
||||
<&clkc CLKID_FCLK_DIV4>,
|
||||
<&clkc CLKID_FCLK_DIV3>;
|
||||
#pwm-cells = <3>;
|
||||
|
@ -2325,7 +2325,7 @@
|
|||
"amlogic,meson8-pwm-v2";
|
||||
reg = <0x0 0x1a000 0x0 0x20>;
|
||||
clocks = <&xtal>,
|
||||
<>, /* unknown/untested, the datasheet calls it "vid_pll" */
|
||||
<0>, /* unknown/untested, the datasheet calls it "vid_pll" */
|
||||
<&clkc CLKID_FCLK_DIV4>,
|
||||
<&clkc CLKID_FCLK_DIV3>;
|
||||
#pwm-cells = <3>;
|
||||
|
@ -2337,7 +2337,7 @@
|
|||
"amlogic,meson8-pwm-v2";
|
||||
reg = <0x0 0x1b000 0x0 0x20>;
|
||||
clocks = <&xtal>,
|
||||
<>, /* unknown/untested, the datasheet calls it "vid_pll" */
|
||||
<0>, /* unknown/untested, the datasheet calls it "vid_pll" */
|
||||
<&clkc CLKID_FCLK_DIV4>,
|
||||
<&clkc CLKID_FCLK_DIV3>;
|
||||
#pwm-cells = <3>;
|
||||
|
|
|
@ -116,6 +116,10 @@
|
|||
status = "okay";
|
||||
};
|
||||
|
||||
&clkc_audio {
|
||||
status = "okay";
|
||||
};
|
||||
|
||||
&frddr_a {
|
||||
status = "okay";
|
||||
};
|
||||
|
|
|
@ -741,7 +741,7 @@
|
|||
|
||||
&pwm_ab {
|
||||
clocks = <&xtal>,
|
||||
<>, /* unknown/untested, the datasheet calls it "vid_pll" */
|
||||
<0>, /* unknown/untested, the datasheet calls it "vid_pll" */
|
||||
<&clkc CLKID_FCLK_DIV4>,
|
||||
<&clkc CLKID_FCLK_DIV3>;
|
||||
};
|
||||
|
@ -752,14 +752,14 @@
|
|||
|
||||
&pwm_cd {
|
||||
clocks = <&xtal>,
|
||||
<>, /* unknown/untested, the datasheet calls it "vid_pll" */
|
||||
<0>, /* unknown/untested, the datasheet calls it "vid_pll" */
|
||||
<&clkc CLKID_FCLK_DIV4>,
|
||||
<&clkc CLKID_FCLK_DIV3>;
|
||||
};
|
||||
|
||||
&pwm_ef {
|
||||
clocks = <&xtal>,
|
||||
<>, /* unknown/untested, the datasheet calls it "vid_pll" */
|
||||
<0>, /* unknown/untested, the datasheet calls it "vid_pll" */
|
||||
<&clkc CLKID_FCLK_DIV4>,
|
||||
<&clkc CLKID_FCLK_DIV3>;
|
||||
};
|
||||
|
|
|
@ -811,7 +811,7 @@
|
|||
|
||||
&pwm_ab {
|
||||
clocks = <&xtal>,
|
||||
<>, /* unknown/untested, the datasheet calls it "vid_pll" */
|
||||
<0>, /* unknown/untested, the datasheet calls it "vid_pll" */
|
||||
<&clkc CLKID_FCLK_DIV4>,
|
||||
<&clkc CLKID_FCLK_DIV3>;
|
||||
};
|
||||
|
@ -822,14 +822,14 @@
|
|||
|
||||
&pwm_cd {
|
||||
clocks = <&xtal>,
|
||||
<>, /* unknown/untested, the datasheet calls it "vid_pll" */
|
||||
<0>, /* unknown/untested, the datasheet calls it "vid_pll" */
|
||||
<&clkc CLKID_FCLK_DIV4>,
|
||||
<&clkc CLKID_FCLK_DIV3>;
|
||||
};
|
||||
|
||||
&pwm_ef {
|
||||
clocks = <&xtal>,
|
||||
<>, /* unknown/untested, the datasheet calls it "vid_pll" */
|
||||
<0>, /* unknown/untested, the datasheet calls it "vid_pll" */
|
||||
<&clkc CLKID_FCLK_DIV4>,
|
||||
<&clkc CLKID_FCLK_DIV3>;
|
||||
};
|
||||
|
|
|
@ -77,6 +77,16 @@
|
|||
};
|
||||
};
|
||||
|
||||
/*
|
||||
* The driver depends on boot loader initialized state which resets when this
|
||||
* power-domain is powered off. This happens on suspend or when the driver is
|
||||
* missing during boot. Mark the domain as always on until the driver can
|
||||
* handle this.
|
||||
*/
|
||||
&ps_dispdfr_be {
|
||||
apple,always-on;
|
||||
};
|
||||
|
||||
&display_dfr {
|
||||
status = "okay";
|
||||
};
|
||||
|
|
|
@ -40,6 +40,16 @@
|
|||
};
|
||||
};
|
||||
|
||||
/*
|
||||
* The driver depends on boot loader initialized state which resets when this
|
||||
* power-domain is powered off. This happens on suspend or when the driver is
|
||||
* missing during boot. Mark the domain as always on until the driver can
|
||||
* handle this.
|
||||
*/
|
||||
&ps_dispdfr_be {
|
||||
apple,always-on;
|
||||
};
|
||||
|
||||
&display_dfr {
|
||||
status = "okay";
|
||||
};
|
||||
|
|
|
@ -88,3 +88,5 @@
|
|||
<0>, <0>, <400000000>,
|
||||
<1039500000>;
|
||||
};
|
||||
|
||||
/delete-node/ &{noc_opp_table/opp-1000000000};
|
||||
|
|
|
@ -35,7 +35,6 @@
|
|||
<0x1 0x00000000 0 0xc0000000>;
|
||||
};
|
||||
|
||||
|
||||
reg_usdhc2_vmmc: regulator-usdhc2-vmmc {
|
||||
compatible = "regulator-fixed";
|
||||
regulator-name = "VSD_3V3";
|
||||
|
@ -46,6 +45,16 @@
|
|||
startup-delay-us = <100>;
|
||||
off-on-delay-us = <12000>;
|
||||
};
|
||||
|
||||
reg_usdhc2_vqmmc: regulator-usdhc2-vqmmc {
|
||||
compatible = "regulator-gpio";
|
||||
regulator-name = "VSD_VSEL";
|
||||
regulator-min-microvolt = <1800000>;
|
||||
regulator-max-microvolt = <3300000>;
|
||||
gpios = <&gpio2 12 GPIO_ACTIVE_HIGH>;
|
||||
states = <3300000 0x0 1800000 0x1>;
|
||||
vin-supply = <&ldo5>;
|
||||
};
|
||||
};
|
||||
|
||||
&A53_0 {
|
||||
|
@ -205,6 +214,7 @@
|
|||
pinctrl-2 = <&pinctrl_usdhc2_200mhz>, <&pinctrl_usdhc2_gpio>;
|
||||
cd-gpios = <&gpio1 14 GPIO_ACTIVE_LOW>;
|
||||
vmmc-supply = <®_usdhc2_vmmc>;
|
||||
vqmmc-supply = <®_usdhc2_vqmmc>;
|
||||
bus-width = <4>;
|
||||
status = "okay";
|
||||
};
|
||||
|
|
|
@ -1645,6 +1645,12 @@
|
|||
opp-hz = /bits/ 64 <200000000>;
|
||||
};
|
||||
|
||||
/* Nominal drive mode maximum */
|
||||
opp-800000000 {
|
||||
opp-hz = /bits/ 64 <800000000>;
|
||||
};
|
||||
|
||||
/* Overdrive mode maximum */
|
||||
opp-1000000000 {
|
||||
opp-hz = /bits/ 64 <1000000000>;
|
||||
};
|
||||
|
|
|
@ -31,7 +31,7 @@
|
|||
};
|
||||
|
||||
vcc3v3_btreg: vcc3v3-btreg {
|
||||
compatible = "regulator-gpio";
|
||||
compatible = "regulator-fixed";
|
||||
enable-active-high;
|
||||
pinctrl-names = "default";
|
||||
pinctrl-0 = <&bt_enable_h>;
|
||||
|
@ -39,7 +39,6 @@
|
|||
regulator-min-microvolt = <3300000>;
|
||||
regulator-max-microvolt = <3300000>;
|
||||
regulator-always-on;
|
||||
states = <3300000 0x0>;
|
||||
};
|
||||
|
||||
vcc3v3_rf_aux_mod: regulator-vcc3v3-rf-aux-mod {
|
||||
|
|
|
@ -26,5 +26,5 @@
|
|||
};
|
||||
|
||||
&vcc3v3_btreg {
|
||||
enable-gpios = <&gpio1 RK_PC3 GPIO_ACTIVE_HIGH>;
|
||||
gpios = <&gpio1 RK_PC3 GPIO_ACTIVE_HIGH>;
|
||||
};
|
||||
|
|
|
@ -39,5 +39,5 @@
|
|||
};
|
||||
|
||||
&vcc3v3_btreg {
|
||||
enable-gpios = <&gpio1 RK_PC2 GPIO_ACTIVE_HIGH>;
|
||||
gpios = <&gpio1 RK_PC2 GPIO_ACTIVE_HIGH>;
|
||||
};
|
||||
|
|
|
@ -43,7 +43,7 @@
|
|||
sdio_pwrseq: sdio-pwrseq {
|
||||
compatible = "mmc-pwrseq-simple";
|
||||
clocks = <&rk808 1>;
|
||||
clock-names = "lpo";
|
||||
clock-names = "ext_clock";
|
||||
pinctrl-names = "default";
|
||||
pinctrl-0 = <&wifi_enable_h>;
|
||||
reset-gpios = <&gpio0 RK_PB2 GPIO_ACTIVE_LOW>;
|
||||
|
|
|
@ -775,7 +775,7 @@
|
|||
rockchip,default-sample-phase = <90>;
|
||||
status = "okay";
|
||||
|
||||
sdio-wifi@1 {
|
||||
wifi@1 {
|
||||
compatible = "brcm,bcm4329-fmac";
|
||||
reg = <1>;
|
||||
interrupt-parent = <&gpio2>;
|
||||
|
|
|
@ -619,6 +619,8 @@
|
|||
bus-width = <8>;
|
||||
max-frequency = <200000000>;
|
||||
non-removable;
|
||||
pinctrl-names = "default";
|
||||
pinctrl-0 = <&emmc_bus8 &emmc_clk &emmc_cmd &emmc_datastrobe>;
|
||||
status = "okay";
|
||||
};
|
||||
|
||||
|
|
|
@ -610,7 +610,7 @@
|
|||
reg = <0x51>;
|
||||
clock-output-names = "hym8563";
|
||||
interrupt-parent = <&gpio0>;
|
||||
interrupts = <RK_PB0 IRQ_TYPE_LEVEL_LOW>;
|
||||
interrupts = <RK_PA0 IRQ_TYPE_LEVEL_LOW>;
|
||||
pinctrl-names = "default";
|
||||
pinctrl-0 = <&hym8563_int>;
|
||||
wakeup-source;
|
||||
|
|
|
@ -222,6 +222,10 @@
|
|||
compatible = "realtek,rt5616";
|
||||
reg = <0x1b>;
|
||||
#sound-dai-cells = <0>;
|
||||
assigned-clocks = <&cru I2S0_8CH_MCLKOUT>;
|
||||
assigned-clock-rates = <12288000>;
|
||||
clocks = <&cru I2S0_8CH_MCLKOUT>;
|
||||
clock-names = "mclk";
|
||||
};
|
||||
};
|
||||
|
||||
|
|
|
@ -214,6 +214,8 @@
|
|||
};
|
||||
|
||||
&package_thermal {
|
||||
polling-delay = <1000>;
|
||||
|
||||
trips {
|
||||
package_active1: trip-active1 {
|
||||
temperature = <45000>;
|
||||
|
|
|
@ -11,20 +11,15 @@
|
|||
compatible = "operating-points-v2";
|
||||
opp-shared;
|
||||
|
||||
opp-1416000000 {
|
||||
opp-hz = /bits/ 64 <1416000000>;
|
||||
opp-1200000000 {
|
||||
opp-hz = /bits/ 64 <1200000000>;
|
||||
opp-microvolt = <750000 750000 950000>;
|
||||
clock-latency-ns = <40000>;
|
||||
opp-suspend;
|
||||
};
|
||||
opp-1608000000 {
|
||||
opp-hz = /bits/ 64 <1608000000>;
|
||||
opp-microvolt = <887500 887500 950000>;
|
||||
clock-latency-ns = <40000>;
|
||||
};
|
||||
opp-1704000000 {
|
||||
opp-hz = /bits/ 64 <1704000000>;
|
||||
opp-microvolt = <937500 937500 950000>;
|
||||
opp-1296000000 {
|
||||
opp-hz = /bits/ 64 <1296000000>;
|
||||
opp-microvolt = <775000 775000 950000>;
|
||||
clock-latency-ns = <40000>;
|
||||
};
|
||||
};
|
||||
|
@ -33,9 +28,14 @@
|
|||
compatible = "operating-points-v2";
|
||||
opp-shared;
|
||||
|
||||
opp-1200000000{
|
||||
opp-hz = /bits/ 64 <1200000000>;
|
||||
opp-microvolt = <750000 750000 950000>;
|
||||
clock-latency-ns = <40000>;
|
||||
};
|
||||
opp-1416000000 {
|
||||
opp-hz = /bits/ 64 <1416000000>;
|
||||
opp-microvolt = <750000 750000 950000>;
|
||||
opp-microvolt = <762500 762500 950000>;
|
||||
clock-latency-ns = <40000>;
|
||||
};
|
||||
opp-1608000000 {
|
||||
|
@ -43,25 +43,20 @@
|
|||
opp-microvolt = <787500 787500 950000>;
|
||||
clock-latency-ns = <40000>;
|
||||
};
|
||||
opp-1800000000 {
|
||||
opp-hz = /bits/ 64 <1800000000>;
|
||||
opp-microvolt = <875000 875000 950000>;
|
||||
clock-latency-ns = <40000>;
|
||||
};
|
||||
opp-2016000000 {
|
||||
opp-hz = /bits/ 64 <2016000000>;
|
||||
opp-microvolt = <950000 950000 950000>;
|
||||
clock-latency-ns = <40000>;
|
||||
};
|
||||
};
|
||||
|
||||
cluster2_opp_table: opp-table-cluster2 {
|
||||
compatible = "operating-points-v2";
|
||||
opp-shared;
|
||||
|
||||
opp-1200000000{
|
||||
opp-hz = /bits/ 64 <1200000000>;
|
||||
opp-microvolt = <750000 750000 950000>;
|
||||
clock-latency-ns = <40000>;
|
||||
};
|
||||
opp-1416000000 {
|
||||
opp-hz = /bits/ 64 <1416000000>;
|
||||
opp-microvolt = <750000 750000 950000>;
|
||||
opp-microvolt = <762500 762500 950000>;
|
||||
clock-latency-ns = <40000>;
|
||||
};
|
||||
opp-1608000000 {
|
||||
|
@ -69,16 +64,6 @@
|
|||
opp-microvolt = <787500 787500 950000>;
|
||||
clock-latency-ns = <40000>;
|
||||
};
|
||||
opp-1800000000 {
|
||||
opp-hz = /bits/ 64 <1800000000>;
|
||||
opp-microvolt = <875000 875000 950000>;
|
||||
clock-latency-ns = <40000>;
|
||||
};
|
||||
opp-2016000000 {
|
||||
opp-hz = /bits/ 64 <2016000000>;
|
||||
opp-microvolt = <950000 950000 950000>;
|
||||
clock-latency-ns = <40000>;
|
||||
};
|
||||
};
|
||||
|
||||
gpu_opp_table: opp-table {
|
||||
|
@ -104,10 +89,6 @@
|
|||
opp-hz = /bits/ 64 <700000000>;
|
||||
opp-microvolt = <750000 750000 850000>;
|
||||
};
|
||||
opp-850000000 {
|
||||
opp-hz = /bits/ 64 <800000000>;
|
||||
opp-microvolt = <787500 787500 850000>;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
|
|
|
@ -81,6 +81,7 @@
|
|||
#define ARM_CPU_PART_CORTEX_A78AE 0xD42
|
||||
#define ARM_CPU_PART_CORTEX_X1 0xD44
|
||||
#define ARM_CPU_PART_CORTEX_A510 0xD46
|
||||
#define ARM_CPU_PART_CORTEX_X1C 0xD4C
|
||||
#define ARM_CPU_PART_CORTEX_A520 0xD80
|
||||
#define ARM_CPU_PART_CORTEX_A710 0xD47
|
||||
#define ARM_CPU_PART_CORTEX_A715 0xD4D
|
||||
|
@ -168,6 +169,7 @@
|
|||
#define MIDR_CORTEX_A78AE MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A78AE)
|
||||
#define MIDR_CORTEX_X1 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X1)
|
||||
#define MIDR_CORTEX_A510 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A510)
|
||||
#define MIDR_CORTEX_X1C MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X1C)
|
||||
#define MIDR_CORTEX_A520 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A520)
|
||||
#define MIDR_CORTEX_A710 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A710)
|
||||
#define MIDR_CORTEX_A715 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A715)
|
||||
|
|
|
@ -52,7 +52,7 @@
|
|||
mrs x0, id_aa64mmfr1_el1
|
||||
ubfx x0, x0, #ID_AA64MMFR1_EL1_HCX_SHIFT, #4
|
||||
cbz x0, .Lskip_hcrx_\@
|
||||
mov_q x0, HCRX_HOST_FLAGS
|
||||
mov_q x0, (HCRX_EL2_MSCEn | HCRX_EL2_TCR2En | HCRX_EL2_EnFPM)
|
||||
|
||||
/* Enable GCS if supported */
|
||||
mrs_s x1, SYS_ID_AA64PFR1_EL1
|
||||
|
|
|
@ -706,6 +706,7 @@ u32 aarch64_insn_gen_cas(enum aarch64_insn_register result,
|
|||
}
|
||||
#endif
|
||||
u32 aarch64_insn_gen_dmb(enum aarch64_insn_mb_type type);
|
||||
u32 aarch64_insn_gen_dsb(enum aarch64_insn_mb_type type);
|
||||
u32 aarch64_insn_gen_mrs(enum aarch64_insn_register result,
|
||||
enum aarch64_insn_system_register sysreg);
|
||||
|
||||
|
|
|
@ -100,9 +100,8 @@
|
|||
HCR_FMO | HCR_IMO | HCR_PTW | HCR_TID3 | HCR_TID1)
|
||||
#define HCR_HOST_NVHE_FLAGS (HCR_RW | HCR_API | HCR_APK | HCR_ATA)
|
||||
#define HCR_HOST_NVHE_PROTECTED_FLAGS (HCR_HOST_NVHE_FLAGS | HCR_TSC)
|
||||
#define HCR_HOST_VHE_FLAGS (HCR_RW | HCR_TGE | HCR_E2H)
|
||||
#define HCR_HOST_VHE_FLAGS (HCR_RW | HCR_TGE | HCR_E2H | HCR_AMO | HCR_IMO | HCR_FMO)
|
||||
|
||||
#define HCRX_HOST_FLAGS (HCRX_EL2_MSCEn | HCRX_EL2_TCR2En | HCRX_EL2_EnFPM)
|
||||
#define MPAMHCR_HOST_FLAGS 0
|
||||
|
||||
/* TCR_EL2 Registers bits */
|
||||
|
|
|
@ -97,6 +97,9 @@ enum mitigation_state arm64_get_meltdown_state(void);
|
|||
|
||||
enum mitigation_state arm64_get_spectre_bhb_state(void);
|
||||
bool is_spectre_bhb_affected(const struct arm64_cpu_capabilities *entry, int scope);
|
||||
extern bool __nospectre_bhb;
|
||||
u8 get_spectre_bhb_loop_value(void);
|
||||
bool is_spectre_bhb_fw_mitigated(void);
|
||||
void spectre_bhb_enable_mitigation(const struct arm64_cpu_capabilities *__unused);
|
||||
bool try_emulate_el1_ssbs(struct pt_regs *regs, u32 instr);
|
||||
|
||||
|
|
|
@ -99,6 +99,19 @@ static __always_inline u64 __arch_get_hw_counter(s32 clock_mode,
|
|||
return res;
|
||||
}
|
||||
|
||||
#if IS_ENABLED(CONFIG_CC_IS_GCC) && IS_ENABLED(CONFIG_PAGE_SIZE_64KB)
|
||||
static __always_inline const struct vdso_time_data *__arch_get_vdso_u_time_data(void)
|
||||
{
|
||||
const struct vdso_time_data *ret = &vdso_u_time_data;
|
||||
|
||||
/* Work around invalid absolute relocations */
|
||||
OPTIMIZER_HIDE_VAR(ret);
|
||||
|
||||
return ret;
|
||||
}
|
||||
#define __arch_get_vdso_u_time_data __arch_get_vdso_u_time_data
|
||||
#endif /* IS_ENABLED(CONFIG_CC_IS_GCC) && IS_ENABLED(CONFIG_PAGE_SIZE_64KB) */
|
||||
|
||||
#endif /* !__ASSEMBLY__ */
|
||||
|
||||
#endif /* __ASM_VDSO_GETTIMEOFDAY_H */
|
||||
|
|
|
@ -114,7 +114,14 @@ static struct arm64_cpu_capabilities const __ro_after_init *cpucap_ptrs[ARM64_NC
|
|||
|
||||
DECLARE_BITMAP(boot_cpucaps, ARM64_NCAPS);
|
||||
|
||||
bool arm64_use_ng_mappings = false;
|
||||
/*
|
||||
* arm64_use_ng_mappings must be placed in the .data section, otherwise it
|
||||
* ends up in the .bss section where it is initialized in early_map_kernel()
|
||||
* after the MMU (with the idmap) was enabled. create_init_idmap() - which
|
||||
* runs before early_map_kernel() and reads the variable via PTE_MAYBE_NG -
|
||||
* may end up generating an incorrect idmap page table attributes.
|
||||
*/
|
||||
bool arm64_use_ng_mappings __read_mostly = false;
|
||||
EXPORT_SYMBOL(arm64_use_ng_mappings);
|
||||
|
||||
DEFINE_PER_CPU_READ_MOSTLY(const char *, this_cpu_vector) = vectors;
|
||||
|
|
|
@ -891,6 +891,7 @@ static u8 spectre_bhb_loop_affected(void)
|
|||
MIDR_ALL_VERSIONS(MIDR_CORTEX_A78AE),
|
||||
MIDR_ALL_VERSIONS(MIDR_CORTEX_A78C),
|
||||
MIDR_ALL_VERSIONS(MIDR_CORTEX_X1),
|
||||
MIDR_ALL_VERSIONS(MIDR_CORTEX_X1C),
|
||||
MIDR_ALL_VERSIONS(MIDR_CORTEX_A710),
|
||||
MIDR_ALL_VERSIONS(MIDR_CORTEX_X2),
|
||||
MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2),
|
||||
|
@ -999,6 +1000,11 @@ bool is_spectre_bhb_affected(const struct arm64_cpu_capabilities *entry,
|
|||
return true;
|
||||
}
|
||||
|
||||
u8 get_spectre_bhb_loop_value(void)
|
||||
{
|
||||
return max_bhb_k;
|
||||
}
|
||||
|
||||
static void this_cpu_set_vectors(enum arm64_bp_harden_el1_vectors slot)
|
||||
{
|
||||
const char *v = arm64_get_bp_hardening_vector(slot);
|
||||
|
@ -1016,7 +1022,7 @@ static void this_cpu_set_vectors(enum arm64_bp_harden_el1_vectors slot)
|
|||
isb();
|
||||
}
|
||||
|
||||
static bool __read_mostly __nospectre_bhb;
|
||||
bool __read_mostly __nospectre_bhb;
|
||||
static int __init parse_spectre_bhb_param(char *str)
|
||||
{
|
||||
__nospectre_bhb = true;
|
||||
|
@ -1094,6 +1100,11 @@ void spectre_bhb_enable_mitigation(const struct arm64_cpu_capabilities *entry)
|
|||
update_mitigation_state(&spectre_bhb_state, state);
|
||||
}
|
||||
|
||||
bool is_spectre_bhb_fw_mitigated(void)
|
||||
{
|
||||
return test_bit(BHB_FW, &system_bhb_mitigations);
|
||||
}
|
||||
|
||||
/* Patched to NOP when enabled */
|
||||
void noinstr spectre_bhb_patch_loop_mitigation_enable(struct alt_instr *alt,
|
||||
__le32 *origptr,
|
||||
|
|
|
@ -235,6 +235,8 @@ static inline void __deactivate_traps_mpam(void)
|
|||
|
||||
static inline void __activate_traps_common(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvm_cpu_context *hctxt = host_data_ptr(host_ctxt);
|
||||
|
||||
/* Trap on AArch32 cp15 c15 (impdef sysregs) accesses (EL1 or EL0) */
|
||||
write_sysreg(1 << 15, hstr_el2);
|
||||
|
||||
|
@ -245,11 +247,8 @@ static inline void __activate_traps_common(struct kvm_vcpu *vcpu)
|
|||
* EL1 instead of being trapped to EL2.
|
||||
*/
|
||||
if (system_supports_pmuv3()) {
|
||||
struct kvm_cpu_context *hctxt;
|
||||
|
||||
write_sysreg(0, pmselr_el0);
|
||||
|
||||
hctxt = host_data_ptr(host_ctxt);
|
||||
ctxt_sys_reg(hctxt, PMUSERENR_EL0) = read_sysreg(pmuserenr_el0);
|
||||
write_sysreg(ARMV8_PMU_USERENR_MASK, pmuserenr_el0);
|
||||
vcpu_set_flag(vcpu, PMUSERENR_ON_CPU);
|
||||
|
@ -269,6 +268,7 @@ static inline void __activate_traps_common(struct kvm_vcpu *vcpu)
|
|||
hcrx &= ~clr;
|
||||
}
|
||||
|
||||
ctxt_sys_reg(hctxt, HCRX_EL2) = read_sysreg_s(SYS_HCRX_EL2);
|
||||
write_sysreg_s(hcrx, SYS_HCRX_EL2);
|
||||
}
|
||||
|
||||
|
@ -278,19 +278,18 @@ static inline void __activate_traps_common(struct kvm_vcpu *vcpu)
|
|||
|
||||
static inline void __deactivate_traps_common(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvm_cpu_context *hctxt = host_data_ptr(host_ctxt);
|
||||
|
||||
write_sysreg(*host_data_ptr(host_debug_state.mdcr_el2), mdcr_el2);
|
||||
|
||||
write_sysreg(0, hstr_el2);
|
||||
if (system_supports_pmuv3()) {
|
||||
struct kvm_cpu_context *hctxt;
|
||||
|
||||
hctxt = host_data_ptr(host_ctxt);
|
||||
write_sysreg(ctxt_sys_reg(hctxt, PMUSERENR_EL0), pmuserenr_el0);
|
||||
vcpu_clear_flag(vcpu, PMUSERENR_ON_CPU);
|
||||
}
|
||||
|
||||
if (cpus_have_final_cap(ARM64_HAS_HCX))
|
||||
write_sysreg_s(HCRX_HOST_FLAGS, SYS_HCRX_EL2);
|
||||
write_sysreg_s(ctxt_sys_reg(hctxt, HCRX_EL2), SYS_HCRX_EL2);
|
||||
|
||||
__deactivate_traps_hfgxtr(vcpu);
|
||||
__deactivate_traps_mpam();
|
||||
|
|
|
@ -503,7 +503,7 @@ int host_stage2_set_owner_locked(phys_addr_t addr, u64 size, u8 owner_id)
|
|||
{
|
||||
int ret;
|
||||
|
||||
if (!addr_is_memory(addr))
|
||||
if (!range_is_memory(addr, addr + size))
|
||||
return -EPERM;
|
||||
|
||||
ret = host_stage2_try(kvm_pgtable_stage2_set_owner, &host_mmu.pgt,
|
||||
|
|
|
@ -429,23 +429,27 @@ u64 __vgic_v3_get_gic_config(void)
|
|||
/*
|
||||
* To check whether we have a MMIO-based (GICv2 compatible)
|
||||
* CPU interface, we need to disable the system register
|
||||
* view. To do that safely, we have to prevent any interrupt
|
||||
* from firing (which would be deadly).
|
||||
* view.
|
||||
*
|
||||
* Note that this only makes sense on VHE, as interrupts are
|
||||
* already masked for nVHE as part of the exception entry to
|
||||
* EL2.
|
||||
*/
|
||||
if (has_vhe())
|
||||
flags = local_daif_save();
|
||||
|
||||
/*
|
||||
* Table 11-2 "Permitted ICC_SRE_ELx.SRE settings" indicates
|
||||
* that to be able to set ICC_SRE_EL1.SRE to 0, all the
|
||||
* interrupt overrides must be set. You've got to love this.
|
||||
*
|
||||
* As we always run VHE with HCR_xMO set, no extra xMO
|
||||
* manipulation is required in that case.
|
||||
*
|
||||
* To safely disable SRE, we have to prevent any interrupt
|
||||
* from firing (which would be deadly). This only makes sense
|
||||
* on VHE, as interrupts are already masked for nVHE as part
|
||||
* of the exception entry to EL2.
|
||||
*/
|
||||
sysreg_clear_set(hcr_el2, 0, HCR_AMO | HCR_FMO | HCR_IMO);
|
||||
isb();
|
||||
if (has_vhe()) {
|
||||
flags = local_daif_save();
|
||||
} else {
|
||||
sysreg_clear_set(hcr_el2, 0, HCR_AMO | HCR_FMO | HCR_IMO);
|
||||
isb();
|
||||
}
|
||||
|
||||
write_gicreg(0, ICC_SRE_EL1);
|
||||
isb();
|
||||
|
||||
|
@ -453,11 +457,13 @@ u64 __vgic_v3_get_gic_config(void)
|
|||
|
||||
write_gicreg(sre, ICC_SRE_EL1);
|
||||
isb();
|
||||
sysreg_clear_set(hcr_el2, HCR_AMO | HCR_FMO | HCR_IMO, 0);
|
||||
isb();
|
||||
|
||||
if (has_vhe())
|
||||
if (has_vhe()) {
|
||||
local_daif_restore(flags);
|
||||
} else {
|
||||
sysreg_clear_set(hcr_el2, HCR_AMO | HCR_FMO | HCR_IMO, 0);
|
||||
isb();
|
||||
}
|
||||
|
||||
val = (val & ICC_SRE_EL1_SRE) ? 0 : (1ULL << 63);
|
||||
val |= read_gicreg(ICH_VTR_EL2);
|
||||
|
|
|
@ -1501,6 +1501,11 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
|
|||
return -EFAULT;
|
||||
}
|
||||
|
||||
if (!is_protected_kvm_enabled())
|
||||
memcache = &vcpu->arch.mmu_page_cache;
|
||||
else
|
||||
memcache = &vcpu->arch.pkvm_memcache;
|
||||
|
||||
/*
|
||||
* Permission faults just need to update the existing leaf entry,
|
||||
* and so normally don't require allocations from the memcache. The
|
||||
|
@ -1510,13 +1515,11 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
|
|||
if (!fault_is_perm || (logging_active && write_fault)) {
|
||||
int min_pages = kvm_mmu_cache_min_pages(vcpu->arch.hw_mmu);
|
||||
|
||||
if (!is_protected_kvm_enabled()) {
|
||||
memcache = &vcpu->arch.mmu_page_cache;
|
||||
if (!is_protected_kvm_enabled())
|
||||
ret = kvm_mmu_topup_memory_cache(memcache, min_pages);
|
||||
} else {
|
||||
memcache = &vcpu->arch.pkvm_memcache;
|
||||
else
|
||||
ret = topup_hyp_memcache(memcache, min_pages);
|
||||
}
|
||||
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -1945,6 +1945,12 @@ static int set_id_aa64pfr0_el1(struct kvm_vcpu *vcpu,
|
|||
if ((hw_val & mpam_mask) == (user_val & mpam_mask))
|
||||
user_val &= ~ID_AA64PFR0_EL1_MPAM_MASK;
|
||||
|
||||
/* Fail the guest's request to disable the AA64 ISA at EL{0,1,2} */
|
||||
if (!FIELD_GET(ID_AA64PFR0_EL1_EL0, user_val) ||
|
||||
!FIELD_GET(ID_AA64PFR0_EL1_EL1, user_val) ||
|
||||
(vcpu_has_nv(vcpu) && !FIELD_GET(ID_AA64PFR0_EL1_EL2, user_val)))
|
||||
return -EINVAL;
|
||||
|
||||
return set_id_reg(vcpu, rd, user_val);
|
||||
}
|
||||
|
||||
|
|
|
@ -5,6 +5,7 @@
|
|||
*
|
||||
* Copyright (C) 2014-2016 Zi Shen Lim <zlim.lnx@gmail.com>
|
||||
*/
|
||||
#include <linux/bitfield.h>
|
||||
#include <linux/bitops.h>
|
||||
#include <linux/bug.h>
|
||||
#include <linux/printk.h>
|
||||
|
@ -1500,43 +1501,41 @@ u32 aarch64_insn_gen_extr(enum aarch64_insn_variant variant,
|
|||
return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, Rm);
|
||||
}
|
||||
|
||||
static u32 __get_barrier_crm_val(enum aarch64_insn_mb_type type)
|
||||
{
|
||||
switch (type) {
|
||||
case AARCH64_INSN_MB_SY:
|
||||
return 0xf;
|
||||
case AARCH64_INSN_MB_ST:
|
||||
return 0xe;
|
||||
case AARCH64_INSN_MB_LD:
|
||||
return 0xd;
|
||||
case AARCH64_INSN_MB_ISH:
|
||||
return 0xb;
|
||||
case AARCH64_INSN_MB_ISHST:
|
||||
return 0xa;
|
||||
case AARCH64_INSN_MB_ISHLD:
|
||||
return 0x9;
|
||||
case AARCH64_INSN_MB_NSH:
|
||||
return 0x7;
|
||||
case AARCH64_INSN_MB_NSHST:
|
||||
return 0x6;
|
||||
case AARCH64_INSN_MB_NSHLD:
|
||||
return 0x5;
|
||||
default:
|
||||
pr_err("%s: unknown barrier type %d\n", __func__, type);
|
||||
return AARCH64_BREAK_FAULT;
|
||||
}
|
||||
}
|
||||
|
||||
u32 aarch64_insn_gen_dmb(enum aarch64_insn_mb_type type)
|
||||
{
|
||||
u32 opt;
|
||||
u32 insn;
|
||||
|
||||
switch (type) {
|
||||
case AARCH64_INSN_MB_SY:
|
||||
opt = 0xf;
|
||||
break;
|
||||
case AARCH64_INSN_MB_ST:
|
||||
opt = 0xe;
|
||||
break;
|
||||
case AARCH64_INSN_MB_LD:
|
||||
opt = 0xd;
|
||||
break;
|
||||
case AARCH64_INSN_MB_ISH:
|
||||
opt = 0xb;
|
||||
break;
|
||||
case AARCH64_INSN_MB_ISHST:
|
||||
opt = 0xa;
|
||||
break;
|
||||
case AARCH64_INSN_MB_ISHLD:
|
||||
opt = 0x9;
|
||||
break;
|
||||
case AARCH64_INSN_MB_NSH:
|
||||
opt = 0x7;
|
||||
break;
|
||||
case AARCH64_INSN_MB_NSHST:
|
||||
opt = 0x6;
|
||||
break;
|
||||
case AARCH64_INSN_MB_NSHLD:
|
||||
opt = 0x5;
|
||||
break;
|
||||
default:
|
||||
pr_err("%s: unknown dmb type %d\n", __func__, type);
|
||||
opt = __get_barrier_crm_val(type);
|
||||
if (opt == AARCH64_BREAK_FAULT)
|
||||
return AARCH64_BREAK_FAULT;
|
||||
}
|
||||
|
||||
insn = aarch64_insn_get_dmb_value();
|
||||
insn &= ~GENMASK(11, 8);
|
||||
|
@ -1545,6 +1544,21 @@ u32 aarch64_insn_gen_dmb(enum aarch64_insn_mb_type type)
|
|||
return insn;
|
||||
}
|
||||
|
||||
u32 aarch64_insn_gen_dsb(enum aarch64_insn_mb_type type)
|
||||
{
|
||||
u32 opt, insn;
|
||||
|
||||
opt = __get_barrier_crm_val(type);
|
||||
if (opt == AARCH64_BREAK_FAULT)
|
||||
return AARCH64_BREAK_FAULT;
|
||||
|
||||
insn = aarch64_insn_get_dsb_base_value();
|
||||
insn &= ~GENMASK(11, 8);
|
||||
insn |= (opt << 8);
|
||||
|
||||
return insn;
|
||||
}
|
||||
|
||||
u32 aarch64_insn_gen_mrs(enum aarch64_insn_register result,
|
||||
enum aarch64_insn_system_register sysreg)
|
||||
{
|
||||
|
|
|
@ -7,6 +7,7 @@
|
|||
|
||||
#define pr_fmt(fmt) "bpf_jit: " fmt
|
||||
|
||||
#include <linux/arm-smccc.h>
|
||||
#include <linux/bitfield.h>
|
||||
#include <linux/bpf.h>
|
||||
#include <linux/filter.h>
|
||||
|
@ -17,6 +18,7 @@
|
|||
#include <asm/asm-extable.h>
|
||||
#include <asm/byteorder.h>
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/cpufeature.h>
|
||||
#include <asm/debug-monitors.h>
|
||||
#include <asm/insn.h>
|
||||
#include <asm/text-patching.h>
|
||||
|
@ -939,7 +941,51 @@ static void build_plt(struct jit_ctx *ctx)
|
|||
plt->target = (u64)&dummy_tramp;
|
||||
}
|
||||
|
||||
static void build_epilogue(struct jit_ctx *ctx)
|
||||
/* Clobbers BPF registers 1-4, aka x0-x3 */
|
||||
static void __maybe_unused build_bhb_mitigation(struct jit_ctx *ctx)
|
||||
{
|
||||
const u8 r1 = bpf2a64[BPF_REG_1]; /* aka x0 */
|
||||
u8 k = get_spectre_bhb_loop_value();
|
||||
|
||||
if (!IS_ENABLED(CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY) ||
|
||||
cpu_mitigations_off() || __nospectre_bhb ||
|
||||
arm64_get_spectre_v2_state() == SPECTRE_VULNERABLE)
|
||||
return;
|
||||
|
||||
if (capable(CAP_SYS_ADMIN))
|
||||
return;
|
||||
|
||||
if (supports_clearbhb(SCOPE_SYSTEM)) {
|
||||
emit(aarch64_insn_gen_hint(AARCH64_INSN_HINT_CLEARBHB), ctx);
|
||||
return;
|
||||
}
|
||||
|
||||
if (k) {
|
||||
emit_a64_mov_i64(r1, k, ctx);
|
||||
emit(A64_B(1), ctx);
|
||||
emit(A64_SUBS_I(true, r1, r1, 1), ctx);
|
||||
emit(A64_B_(A64_COND_NE, -2), ctx);
|
||||
emit(aarch64_insn_gen_dsb(AARCH64_INSN_MB_ISH), ctx);
|
||||
emit(aarch64_insn_get_isb_value(), ctx);
|
||||
}
|
||||
|
||||
if (is_spectre_bhb_fw_mitigated()) {
|
||||
emit(A64_ORR_I(false, r1, AARCH64_INSN_REG_ZR,
|
||||
ARM_SMCCC_ARCH_WORKAROUND_3), ctx);
|
||||
switch (arm_smccc_1_1_get_conduit()) {
|
||||
case SMCCC_CONDUIT_HVC:
|
||||
emit(aarch64_insn_get_hvc_value(), ctx);
|
||||
break;
|
||||
case SMCCC_CONDUIT_SMC:
|
||||
emit(aarch64_insn_get_smc_value(), ctx);
|
||||
break;
|
||||
default:
|
||||
pr_err_once("Firmware mitigation enabled with unknown conduit\n");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void build_epilogue(struct jit_ctx *ctx, bool was_classic)
|
||||
{
|
||||
const u8 r0 = bpf2a64[BPF_REG_0];
|
||||
const u8 ptr = bpf2a64[TCCNT_PTR];
|
||||
|
@ -952,10 +998,13 @@ static void build_epilogue(struct jit_ctx *ctx)
|
|||
|
||||
emit(A64_POP(A64_ZR, ptr, A64_SP), ctx);
|
||||
|
||||
if (was_classic)
|
||||
build_bhb_mitigation(ctx);
|
||||
|
||||
/* Restore FP/LR registers */
|
||||
emit(A64_POP(A64_FP, A64_LR, A64_SP), ctx);
|
||||
|
||||
/* Set return value */
|
||||
/* Move the return value from bpf:r0 (aka x7) to x0 */
|
||||
emit(A64_MOV(1, A64_R(0), r0), ctx);
|
||||
|
||||
/* Authenticate lr */
|
||||
|
@ -1898,7 +1947,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
|
|||
}
|
||||
|
||||
ctx.epilogue_offset = ctx.idx;
|
||||
build_epilogue(&ctx);
|
||||
build_epilogue(&ctx, was_classic);
|
||||
build_plt(&ctx);
|
||||
|
||||
extable_align = __alignof__(struct exception_table_entry);
|
||||
|
@ -1961,7 +2010,7 @@ skip_init_ctx:
|
|||
goto out_free_hdr;
|
||||
}
|
||||
|
||||
build_epilogue(&ctx);
|
||||
build_epilogue(&ctx, was_classic);
|
||||
build_plt(&ctx);
|
||||
|
||||
/* Extra pass to validate JITed code. */
|
||||
|
|
|
@ -6,11 +6,10 @@
|
|||
#include <linux/linkage.h>
|
||||
|
||||
extern void (*cpu_wait)(void);
|
||||
extern void r4k_wait(void);
|
||||
extern asmlinkage void __r4k_wait(void);
|
||||
extern asmlinkage void r4k_wait(void);
|
||||
extern void r4k_wait_irqoff(void);
|
||||
|
||||
static inline int using_rollback_handler(void)
|
||||
static inline int using_skipover_handler(void)
|
||||
{
|
||||
return cpu_wait == r4k_wait;
|
||||
}
|
||||
|
|
|
@ -65,7 +65,8 @@ static inline void instruction_pointer_set(struct pt_regs *regs,
|
|||
|
||||
/* Query offset/name of register from its name/offset */
|
||||
extern int regs_query_register_offset(const char *name);
|
||||
#define MAX_REG_OFFSET (offsetof(struct pt_regs, __last))
|
||||
#define MAX_REG_OFFSET \
|
||||
(offsetof(struct pt_regs, __last) - sizeof(unsigned long))
|
||||
|
||||
/**
|
||||
* regs_get_register() - get register value from its offset
|
||||
|
|
|
@ -104,48 +104,59 @@ handle_vcei:
|
|||
|
||||
__FINIT
|
||||
|
||||
.align 5 /* 32 byte rollback region */
|
||||
LEAF(__r4k_wait)
|
||||
.set push
|
||||
.set noreorder
|
||||
/* start of rollback region */
|
||||
LONG_L t0, TI_FLAGS($28)
|
||||
nop
|
||||
andi t0, _TIF_NEED_RESCHED
|
||||
bnez t0, 1f
|
||||
nop
|
||||
nop
|
||||
nop
|
||||
#ifdef CONFIG_CPU_MICROMIPS
|
||||
nop
|
||||
nop
|
||||
nop
|
||||
nop
|
||||
#endif
|
||||
.section .cpuidle.text,"ax"
|
||||
/* Align to 32 bytes for the maximum idle interrupt region size. */
|
||||
.align 5
|
||||
LEAF(r4k_wait)
|
||||
/* Keep the ISA bit clear for calculations on local labels here. */
|
||||
0: .fill 0
|
||||
/* Start of idle interrupt region. */
|
||||
local_irq_enable
|
||||
/*
|
||||
* If an interrupt lands here, before going idle on the next
|
||||
* instruction, we must *NOT* go idle since the interrupt could
|
||||
* have set TIF_NEED_RESCHED or caused a timer to need resched.
|
||||
* Fall through -- see skipover_handler below -- and have the
|
||||
* idle loop take care of things.
|
||||
*/
|
||||
1: .fill 0
|
||||
/* The R2 EI/EHB sequence takes 8 bytes, otherwise pad up. */
|
||||
.if 1b - 0b > 32
|
||||
.error "overlong idle interrupt region"
|
||||
.elseif 1b - 0b > 8
|
||||
.align 4
|
||||
.endif
|
||||
2: .fill 0
|
||||
.equ r4k_wait_idle_size, 2b - 0b
|
||||
/* End of idle interrupt region; size has to be a power of 2. */
|
||||
.set MIPS_ISA_ARCH_LEVEL_RAW
|
||||
r4k_wait_insn:
|
||||
wait
|
||||
/* end of rollback region (the region size must be power of two) */
|
||||
1:
|
||||
r4k_wait_exit:
|
||||
.set mips0
|
||||
local_irq_disable
|
||||
jr ra
|
||||
nop
|
||||
.set pop
|
||||
END(__r4k_wait)
|
||||
END(r4k_wait)
|
||||
.previous
|
||||
|
||||
.macro BUILD_ROLLBACK_PROLOGUE handler
|
||||
FEXPORT(rollback_\handler)
|
||||
.macro BUILD_SKIPOVER_PROLOGUE handler
|
||||
FEXPORT(skipover_\handler)
|
||||
.set push
|
||||
.set noat
|
||||
MFC0 k0, CP0_EPC
|
||||
PTR_LA k1, __r4k_wait
|
||||
ori k0, 0x1f /* 32 byte rollback region */
|
||||
xori k0, 0x1f
|
||||
/* Subtract/add 2 to let the ISA bit propagate through the mask. */
|
||||
PTR_LA k1, r4k_wait_insn - 2
|
||||
ori k0, r4k_wait_idle_size - 2
|
||||
.set noreorder
|
||||
bne k0, k1, \handler
|
||||
PTR_ADDIU k0, r4k_wait_exit - r4k_wait_insn + 2
|
||||
.set reorder
|
||||
MTC0 k0, CP0_EPC
|
||||
.set pop
|
||||
.endm
|
||||
|
||||
.align 5
|
||||
BUILD_ROLLBACK_PROLOGUE handle_int
|
||||
BUILD_SKIPOVER_PROLOGUE handle_int
|
||||
NESTED(handle_int, PT_SIZE, sp)
|
||||
.cfi_signal_frame
|
||||
#ifdef CONFIG_TRACE_IRQFLAGS
|
||||
|
@ -265,7 +276,7 @@ NESTED(except_vec_ejtag_debug, 0, sp)
|
|||
* This prototype is copied to ebase + n*IntCtl.VS and patched
|
||||
* to invoke the handler
|
||||
*/
|
||||
BUILD_ROLLBACK_PROLOGUE except_vec_vi
|
||||
BUILD_SKIPOVER_PROLOGUE except_vec_vi
|
||||
NESTED(except_vec_vi, 0, sp)
|
||||
SAVE_SOME docfi=1
|
||||
SAVE_AT docfi=1
|
||||
|
|
|
@ -35,13 +35,6 @@ static void __cpuidle r3081_wait(void)
|
|||
write_c0_conf(cfg | R30XX_CONF_HALT);
|
||||
}
|
||||
|
||||
void __cpuidle r4k_wait(void)
|
||||
{
|
||||
raw_local_irq_enable();
|
||||
__r4k_wait();
|
||||
raw_local_irq_disable();
|
||||
}
|
||||
|
||||
/*
|
||||
* This variant is preferable as it allows testing need_resched and going to
|
||||
* sleep depending on the outcome atomically. Unfortunately the "It is
|
||||
|
|
|
@ -332,6 +332,8 @@ static void __init cps_prepare_cpus(unsigned int max_cpus)
|
|||
mips_cps_cluster_bootcfg = kcalloc(nclusters,
|
||||
sizeof(*mips_cps_cluster_bootcfg),
|
||||
GFP_KERNEL);
|
||||
if (!mips_cps_cluster_bootcfg)
|
||||
goto err_out;
|
||||
|
||||
if (nclusters > 1)
|
||||
mips_cm_update_property();
|
||||
|
@ -348,6 +350,8 @@ static void __init cps_prepare_cpus(unsigned int max_cpus)
|
|||
mips_cps_cluster_bootcfg[cl].core_power =
|
||||
kcalloc(BITS_TO_LONGS(ncores), sizeof(unsigned long),
|
||||
GFP_KERNEL);
|
||||
if (!mips_cps_cluster_bootcfg[cl].core_power)
|
||||
goto err_out;
|
||||
|
||||
/* Allocate VPE boot configuration structs */
|
||||
for (c = 0; c < ncores; c++) {
|
||||
|
|
|
@ -77,7 +77,7 @@
|
|||
#include "access-helper.h"
|
||||
|
||||
extern void check_wait(void);
|
||||
extern asmlinkage void rollback_handle_int(void);
|
||||
extern asmlinkage void skipover_handle_int(void);
|
||||
extern asmlinkage void handle_int(void);
|
||||
extern asmlinkage void handle_adel(void);
|
||||
extern asmlinkage void handle_ades(void);
|
||||
|
@ -2066,7 +2066,7 @@ void *set_vi_handler(int n, vi_handler_t addr)
|
|||
{
|
||||
extern const u8 except_vec_vi[];
|
||||
extern const u8 except_vec_vi_ori[], except_vec_vi_end[];
|
||||
extern const u8 rollback_except_vec_vi[];
|
||||
extern const u8 skipover_except_vec_vi[];
|
||||
unsigned long handler;
|
||||
unsigned long old_handler = vi_handlers[n];
|
||||
int srssets = current_cpu_data.srsets;
|
||||
|
@ -2095,7 +2095,7 @@ void *set_vi_handler(int n, vi_handler_t addr)
|
|||
change_c0_srsmap(0xf << n*4, 0 << n*4);
|
||||
}
|
||||
|
||||
vec_start = using_rollback_handler() ? rollback_except_vec_vi :
|
||||
vec_start = using_skipover_handler() ? skipover_except_vec_vi :
|
||||
except_vec_vi;
|
||||
#if defined(CONFIG_CPU_MICROMIPS) || defined(CONFIG_CPU_BIG_ENDIAN)
|
||||
ori_offset = except_vec_vi_ori - vec_start + 2;
|
||||
|
@ -2426,8 +2426,8 @@ void __init trap_init(void)
|
|||
if (board_be_init)
|
||||
board_be_init();
|
||||
|
||||
set_except_vector(EXCCODE_INT, using_rollback_handler() ?
|
||||
rollback_handle_int : handle_int);
|
||||
set_except_vector(EXCCODE_INT, using_skipover_handler() ?
|
||||
skipover_handle_int : handle_int);
|
||||
set_except_vector(EXCCODE_MOD, handle_tlbm);
|
||||
set_except_vector(EXCCODE_TLBL, handle_tlbl);
|
||||
set_except_vector(EXCCODE_TLBS, handle_tlbs);
|
||||
|
|
|
@ -341,7 +341,7 @@
|
|||
1024 1024 1024 1024>;
|
||||
snps,priority = <0 1 2 3 4 5 6 7>;
|
||||
snps,dma-masters = <2>;
|
||||
snps,data-width = <4>;
|
||||
snps,data-width = <2>;
|
||||
status = "disabled";
|
||||
};
|
||||
|
||||
|
|
|
@ -275,6 +275,9 @@ long set_tagged_addr_ctrl(struct task_struct *task, unsigned long arg)
|
|||
unsigned long pmm;
|
||||
u8 pmlen;
|
||||
|
||||
if (!riscv_has_extension_unlikely(RISCV_ISA_EXT_SUPM))
|
||||
return -EINVAL;
|
||||
|
||||
if (is_compat_thread(ti))
|
||||
return -EINVAL;
|
||||
|
||||
|
@ -330,6 +333,9 @@ long get_tagged_addr_ctrl(struct task_struct *task)
|
|||
struct thread_info *ti = task_thread_info(task);
|
||||
long ret = 0;
|
||||
|
||||
if (!riscv_has_extension_unlikely(RISCV_ISA_EXT_SUPM))
|
||||
return -EINVAL;
|
||||
|
||||
if (is_compat_thread(ti))
|
||||
return -EINVAL;
|
||||
|
||||
|
|
|
@ -198,47 +198,57 @@ asmlinkage __visible __trap_section void do_trap_insn_illegal(struct pt_regs *re
|
|||
DO_ERROR_INFO(do_trap_load_fault,
|
||||
SIGSEGV, SEGV_ACCERR, "load access fault");
|
||||
|
||||
asmlinkage __visible __trap_section void do_trap_load_misaligned(struct pt_regs *regs)
|
||||
enum misaligned_access_type {
|
||||
MISALIGNED_STORE,
|
||||
MISALIGNED_LOAD,
|
||||
};
|
||||
static const struct {
|
||||
const char *type_str;
|
||||
int (*handler)(struct pt_regs *regs);
|
||||
} misaligned_handler[] = {
|
||||
[MISALIGNED_STORE] = {
|
||||
.type_str = "Oops - store (or AMO) address misaligned",
|
||||
.handler = handle_misaligned_store,
|
||||
},
|
||||
[MISALIGNED_LOAD] = {
|
||||
.type_str = "Oops - load address misaligned",
|
||||
.handler = handle_misaligned_load,
|
||||
},
|
||||
};
|
||||
|
||||
static void do_trap_misaligned(struct pt_regs *regs, enum misaligned_access_type type)
|
||||
{
|
||||
irqentry_state_t state;
|
||||
|
||||
if (user_mode(regs)) {
|
||||
irqentry_enter_from_user_mode(regs);
|
||||
local_irq_enable();
|
||||
} else {
|
||||
state = irqentry_nmi_enter(regs);
|
||||
}
|
||||
|
||||
if (handle_misaligned_load(regs))
|
||||
do_trap_error(regs, SIGBUS, BUS_ADRALN, regs->epc,
|
||||
"Oops - load address misaligned");
|
||||
if (misaligned_handler[type].handler(regs))
|
||||
do_trap_error(regs, SIGBUS, BUS_ADRALN, regs->epc,
|
||||
misaligned_handler[type].type_str);
|
||||
|
||||
if (user_mode(regs)) {
|
||||
local_irq_disable();
|
||||
irqentry_exit_to_user_mode(regs);
|
||||
} else {
|
||||
irqentry_state_t state = irqentry_nmi_enter(regs);
|
||||
|
||||
if (handle_misaligned_load(regs))
|
||||
do_trap_error(regs, SIGBUS, BUS_ADRALN, regs->epc,
|
||||
"Oops - load address misaligned");
|
||||
|
||||
irqentry_nmi_exit(regs, state);
|
||||
}
|
||||
}
|
||||
|
||||
asmlinkage __visible __trap_section void do_trap_load_misaligned(struct pt_regs *regs)
|
||||
{
|
||||
do_trap_misaligned(regs, MISALIGNED_LOAD);
|
||||
}
|
||||
|
||||
asmlinkage __visible __trap_section void do_trap_store_misaligned(struct pt_regs *regs)
|
||||
{
|
||||
if (user_mode(regs)) {
|
||||
irqentry_enter_from_user_mode(regs);
|
||||
|
||||
if (handle_misaligned_store(regs))
|
||||
do_trap_error(regs, SIGBUS, BUS_ADRALN, regs->epc,
|
||||
"Oops - store (or AMO) address misaligned");
|
||||
|
||||
irqentry_exit_to_user_mode(regs);
|
||||
} else {
|
||||
irqentry_state_t state = irqentry_nmi_enter(regs);
|
||||
|
||||
if (handle_misaligned_store(regs))
|
||||
do_trap_error(regs, SIGBUS, BUS_ADRALN, regs->epc,
|
||||
"Oops - store (or AMO) address misaligned");
|
||||
|
||||
irqentry_nmi_exit(regs, state);
|
||||
}
|
||||
do_trap_misaligned(regs, MISALIGNED_STORE);
|
||||
}
|
||||
|
||||
DO_ERROR_INFO(do_trap_store_fault,
|
||||
SIGSEGV, SEGV_ACCERR, "store (or AMO) access fault");
|
||||
DO_ERROR_INFO(do_trap_ecall_s,
|
||||
|
|
|
@ -88,6 +88,13 @@
|
|||
#define INSN_MATCH_C_FSWSP 0xe002
|
||||
#define INSN_MASK_C_FSWSP 0xe003
|
||||
|
||||
#define INSN_MATCH_C_LHU 0x8400
|
||||
#define INSN_MASK_C_LHU 0xfc43
|
||||
#define INSN_MATCH_C_LH 0x8440
|
||||
#define INSN_MASK_C_LH 0xfc43
|
||||
#define INSN_MATCH_C_SH 0x8c00
|
||||
#define INSN_MASK_C_SH 0xfc43
|
||||
|
||||
#define INSN_LEN(insn) ((((insn) & 0x3) < 0x3) ? 2 : 4)
|
||||
|
||||
#if defined(CONFIG_64BIT)
|
||||
|
@ -268,7 +275,7 @@ static unsigned long get_f32_rs(unsigned long insn, u8 fp_reg_offset,
|
|||
int __ret; \
|
||||
\
|
||||
if (user_mode(regs)) { \
|
||||
__ret = __get_user(insn, (type __user *) insn_addr); \
|
||||
__ret = get_user(insn, (type __user *) insn_addr); \
|
||||
} else { \
|
||||
insn = *(type *)insn_addr; \
|
||||
__ret = 0; \
|
||||
|
@ -431,6 +438,13 @@ static int handle_scalar_misaligned_load(struct pt_regs *regs)
|
|||
fp = 1;
|
||||
len = 4;
|
||||
#endif
|
||||
} else if ((insn & INSN_MASK_C_LHU) == INSN_MATCH_C_LHU) {
|
||||
len = 2;
|
||||
insn = RVC_RS2S(insn) << SH_RD;
|
||||
} else if ((insn & INSN_MASK_C_LH) == INSN_MATCH_C_LH) {
|
||||
len = 2;
|
||||
shift = 8 * (sizeof(ulong) - len);
|
||||
insn = RVC_RS2S(insn) << SH_RD;
|
||||
} else {
|
||||
regs->epc = epc;
|
||||
return -1;
|
||||
|
@ -530,6 +544,9 @@ static int handle_scalar_misaligned_store(struct pt_regs *regs)
|
|||
len = 4;
|
||||
val.data_ulong = GET_F32_RS2C(insn, regs);
|
||||
#endif
|
||||
} else if ((insn & INSN_MASK_C_SH) == INSN_MATCH_C_SH) {
|
||||
len = 2;
|
||||
val.data_ulong = GET_RS2S(insn, regs);
|
||||
} else {
|
||||
regs->epc = epc;
|
||||
return -1;
|
||||
|
|
|
@ -77,6 +77,8 @@ static void kvm_riscv_reset_vcpu(struct kvm_vcpu *vcpu)
|
|||
memcpy(cntx, reset_cntx, sizeof(*cntx));
|
||||
spin_unlock(&vcpu->arch.reset_cntx_lock);
|
||||
|
||||
memset(&vcpu->arch.smstateen_csr, 0, sizeof(vcpu->arch.smstateen_csr));
|
||||
|
||||
kvm_riscv_vcpu_fp_reset(vcpu);
|
||||
|
||||
kvm_riscv_vcpu_vector_reset(vcpu);
|
||||
|
|
|
@ -154,5 +154,6 @@ MRPROPER_FILES += $(HOST_DIR)/include/generated
|
|||
archclean:
|
||||
@find . \( -name '*.bb' -o -name '*.bbg' -o -name '*.da' \
|
||||
-o -name '*.gcov' \) -type f -print | xargs rm -f
|
||||
$(Q)$(MAKE) -f $(srctree)/Makefile ARCH=$(HEADER_ARCH) clean
|
||||
|
||||
export HEADER_ARCH SUBARCH USER_CFLAGS CFLAGS_NO_HARDENING DEV_NULL_PATH
|
||||
|
|
|
@ -2368,6 +2368,7 @@ config STRICT_SIGALTSTACK_SIZE
|
|||
config CFI_AUTO_DEFAULT
|
||||
bool "Attempt to use FineIBT by default at boot time"
|
||||
depends on FINEIBT
|
||||
depends on !RUST || RUSTC_VERSION >= 108800
|
||||
default y
|
||||
help
|
||||
Attempt to use FineIBT by default at boot time. If enabled,
|
||||
|
@ -2710,6 +2711,18 @@ config MITIGATION_SSB
|
|||
of speculative execution in a similar way to the Meltdown and Spectre
|
||||
security vulnerabilities.
|
||||
|
||||
config MITIGATION_ITS
|
||||
bool "Enable Indirect Target Selection mitigation"
|
||||
depends on CPU_SUP_INTEL && X86_64
|
||||
depends on MITIGATION_RETPOLINE && MITIGATION_RETHUNK
|
||||
select EXECMEM
|
||||
default y
|
||||
help
|
||||
Enable Indirect Target Selection (ITS) mitigation. ITS is a bug in
|
||||
BPU on some Intel CPUs that may allow Spectre V2 style attacks. If
|
||||
disabled, mitigation cannot be enabled via cmdline.
|
||||
See <file:Documentation/admin-guide/hw-vuln/indirect-target-selection.rst>
|
||||
|
||||
endif
|
||||
|
||||
config ARCH_HAS_ADD_PAGES
|
||||
|
|
|
@ -1525,7 +1525,9 @@ SYM_CODE_END(rewind_stack_and_make_dead)
|
|||
* ORC to unwind properly.
|
||||
*
|
||||
* The alignment is for performance and not for safety, and may be safely
|
||||
* refactored in the future if needed.
|
||||
* refactored in the future if needed. The .skips are for safety, to ensure
|
||||
* that all RETs are in the second half of a cacheline to mitigate Indirect
|
||||
* Target Selection, rather than taking the slowpath via its_return_thunk.
|
||||
*/
|
||||
SYM_FUNC_START(clear_bhb_loop)
|
||||
ANNOTATE_NOENDBR
|
||||
|
@ -1536,10 +1538,22 @@ SYM_FUNC_START(clear_bhb_loop)
|
|||
call 1f
|
||||
jmp 5f
|
||||
.align 64, 0xcc
|
||||
/*
|
||||
* Shift instructions so that the RET is in the upper half of the
|
||||
* cacheline and don't take the slowpath to its_return_thunk.
|
||||
*/
|
||||
.skip 32 - (.Lret1 - 1f), 0xcc
|
||||
ANNOTATE_INTRA_FUNCTION_CALL
|
||||
1: call 2f
|
||||
RET
|
||||
.Lret1: RET
|
||||
.align 64, 0xcc
|
||||
/*
|
||||
* As above shift instructions for RET at .Lret2 as well.
|
||||
*
|
||||
* This should be ideally be: .skip 32 - (.Lret2 - 2f), 0xcc
|
||||
* but some Clang versions (e.g. 18) don't like this.
|
||||
*/
|
||||
.skip 32 - 18, 0xcc
|
||||
2: movl $5, %eax
|
||||
3: jmp 4f
|
||||
nop
|
||||
|
@ -1547,7 +1561,7 @@ SYM_FUNC_START(clear_bhb_loop)
|
|||
jnz 3b
|
||||
sub $1, %ecx
|
||||
jnz 1b
|
||||
RET
|
||||
.Lret2: RET
|
||||
5: lfence
|
||||
pop %rbp
|
||||
RET
|
||||
|
|
|
@ -6,6 +6,7 @@
|
|||
#include <linux/stringify.h>
|
||||
#include <linux/objtool.h>
|
||||
#include <asm/asm.h>
|
||||
#include <asm/bug.h>
|
||||
|
||||
#define ALT_FLAGS_SHIFT 16
|
||||
|
||||
|
@ -124,6 +125,37 @@ static __always_inline int x86_call_depth_emit_accounting(u8 **pprog,
|
|||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_MITIGATION_ITS
|
||||
extern void its_init_mod(struct module *mod);
|
||||
extern void its_fini_mod(struct module *mod);
|
||||
extern void its_free_mod(struct module *mod);
|
||||
extern u8 *its_static_thunk(int reg);
|
||||
#else /* CONFIG_MITIGATION_ITS */
|
||||
static inline void its_init_mod(struct module *mod) { }
|
||||
static inline void its_fini_mod(struct module *mod) { }
|
||||
static inline void its_free_mod(struct module *mod) { }
|
||||
static inline u8 *its_static_thunk(int reg)
|
||||
{
|
||||
WARN_ONCE(1, "ITS not compiled in");
|
||||
|
||||
return NULL;
|
||||
}
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_MITIGATION_RETHUNK) && defined(CONFIG_OBJTOOL)
|
||||
extern bool cpu_wants_rethunk(void);
|
||||
extern bool cpu_wants_rethunk_at(void *addr);
|
||||
#else
|
||||
static __always_inline bool cpu_wants_rethunk(void)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
static __always_inline bool cpu_wants_rethunk_at(void *addr)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
extern void alternatives_smp_module_add(struct module *mod, char *name,
|
||||
void *locks, void *locks_end,
|
||||
|
|
|
@ -481,6 +481,7 @@
|
|||
#define X86_FEATURE_AMD_HETEROGENEOUS_CORES (21*32 + 6) /* Heterogeneous Core Topology */
|
||||
#define X86_FEATURE_AMD_WORKLOAD_CLASS (21*32 + 7) /* Workload Classification */
|
||||
#define X86_FEATURE_PREFER_YMM (21*32 + 8) /* Avoid ZMM registers due to downclocking */
|
||||
#define X86_FEATURE_INDIRECT_THUNK_ITS (21*32 + 9) /* Use thunk for indirect branches in lower half of cacheline */
|
||||
|
||||
/*
|
||||
* BUG word(s)
|
||||
|
@ -533,4 +534,6 @@
|
|||
#define X86_BUG_BHI X86_BUG(1*32 + 3) /* "bhi" CPU is affected by Branch History Injection */
|
||||
#define X86_BUG_IBPB_NO_RET X86_BUG(1*32 + 4) /* "ibpb_no_ret" IBPB omits return target predictions */
|
||||
#define X86_BUG_SPECTRE_V2_USER X86_BUG(1*32 + 5) /* "spectre_v2_user" CPU is affected by Spectre variant 2 attack between user processes */
|
||||
#define X86_BUG_ITS X86_BUG(1*32 + 6) /* "its" CPU is affected by Indirect Target Selection */
|
||||
#define X86_BUG_ITS_NATIVE_ONLY X86_BUG(1*32 + 7) /* "its_native_only" CPU is affected by ITS, VMX is not affected */
|
||||
#endif /* _ASM_X86_CPUFEATURES_H */
|
||||
|
|
|
@ -17,10 +17,12 @@ struct ucode_cpu_info {
|
|||
void load_ucode_bsp(void);
|
||||
void load_ucode_ap(void);
|
||||
void microcode_bsp_resume(void);
|
||||
bool __init microcode_loader_disabled(void);
|
||||
#else
|
||||
static inline void load_ucode_bsp(void) { }
|
||||
static inline void load_ucode_ap(void) { }
|
||||
static inline void microcode_bsp_resume(void) { }
|
||||
static inline bool __init microcode_loader_disabled(void) { return false; }
|
||||
#endif
|
||||
|
||||
extern unsigned long initrd_start_early;
|
||||
|
|
|
@ -211,6 +211,14 @@
|
|||
* VERW clears CPU Register
|
||||
* File.
|
||||
*/
|
||||
#define ARCH_CAP_ITS_NO BIT_ULL(62) /*
|
||||
* Not susceptible to
|
||||
* Indirect Target Selection.
|
||||
* This bit is not set by
|
||||
* HW, but is synthesized by
|
||||
* VMMs for guests to know
|
||||
* their affected status.
|
||||
*/
|
||||
|
||||
#define MSR_IA32_FLUSH_CMD 0x0000010b
|
||||
#define L1D_FLUSH BIT(0) /*
|
||||
|
|
|
@ -336,10 +336,14 @@
|
|||
|
||||
#else /* __ASSEMBLER__ */
|
||||
|
||||
#define ITS_THUNK_SIZE 64
|
||||
|
||||
typedef u8 retpoline_thunk_t[RETPOLINE_THUNK_SIZE];
|
||||
typedef u8 its_thunk_t[ITS_THUNK_SIZE];
|
||||
extern retpoline_thunk_t __x86_indirect_thunk_array[];
|
||||
extern retpoline_thunk_t __x86_indirect_call_thunk_array[];
|
||||
extern retpoline_thunk_t __x86_indirect_jump_thunk_array[];
|
||||
extern its_thunk_t __x86_indirect_its_thunk_array[];
|
||||
|
||||
#ifdef CONFIG_MITIGATION_RETHUNK
|
||||
extern void __x86_return_thunk(void);
|
||||
|
@ -363,6 +367,12 @@ static inline void srso_return_thunk(void) {}
|
|||
static inline void srso_alias_return_thunk(void) {}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_MITIGATION_ITS
|
||||
extern void its_return_thunk(void);
|
||||
#else
|
||||
static inline void its_return_thunk(void) {}
|
||||
#endif
|
||||
|
||||
extern void retbleed_return_thunk(void);
|
||||
extern void srso_return_thunk(void);
|
||||
extern void srso_alias_return_thunk(void);
|
||||
|
|
|
@ -18,6 +18,7 @@
|
|||
#include <linux/mmu_context.h>
|
||||
#include <linux/bsearch.h>
|
||||
#include <linux/sync_core.h>
|
||||
#include <linux/execmem.h>
|
||||
#include <asm/text-patching.h>
|
||||
#include <asm/alternative.h>
|
||||
#include <asm/sections.h>
|
||||
|
@ -31,6 +32,8 @@
|
|||
#include <asm/paravirt.h>
|
||||
#include <asm/asm-prototypes.h>
|
||||
#include <asm/cfi.h>
|
||||
#include <asm/ibt.h>
|
||||
#include <asm/set_memory.h>
|
||||
|
||||
int __read_mostly alternatives_patched;
|
||||
|
||||
|
@ -124,6 +127,171 @@ const unsigned char * const x86_nops[ASM_NOP_MAX+1] =
|
|||
#endif
|
||||
};
|
||||
|
||||
#ifdef CONFIG_FINEIBT
|
||||
static bool cfi_paranoid __ro_after_init;
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_MITIGATION_ITS
|
||||
|
||||
#ifdef CONFIG_MODULES
|
||||
static struct module *its_mod;
|
||||
#endif
|
||||
static void *its_page;
|
||||
static unsigned int its_offset;
|
||||
|
||||
/* Initialize a thunk with the "jmp *reg; int3" instructions. */
|
||||
static void *its_init_thunk(void *thunk, int reg)
|
||||
{
|
||||
u8 *bytes = thunk;
|
||||
int offset = 0;
|
||||
int i = 0;
|
||||
|
||||
#ifdef CONFIG_FINEIBT
|
||||
if (cfi_paranoid) {
|
||||
/*
|
||||
* When ITS uses indirect branch thunk the fineibt_paranoid
|
||||
* caller sequence doesn't fit in the caller site. So put the
|
||||
* remaining part of the sequence (<ea> + JNE) into the ITS
|
||||
* thunk.
|
||||
*/
|
||||
bytes[i++] = 0xea; /* invalid instruction */
|
||||
bytes[i++] = 0x75; /* JNE */
|
||||
bytes[i++] = 0xfd;
|
||||
|
||||
offset = 1;
|
||||
}
|
||||
#endif
|
||||
|
||||
if (reg >= 8) {
|
||||
bytes[i++] = 0x41; /* REX.B prefix */
|
||||
reg -= 8;
|
||||
}
|
||||
bytes[i++] = 0xff;
|
||||
bytes[i++] = 0xe0 + reg; /* jmp *reg */
|
||||
bytes[i++] = 0xcc;
|
||||
|
||||
return thunk + offset;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_MODULES
|
||||
void its_init_mod(struct module *mod)
|
||||
{
|
||||
if (!cpu_feature_enabled(X86_FEATURE_INDIRECT_THUNK_ITS))
|
||||
return;
|
||||
|
||||
mutex_lock(&text_mutex);
|
||||
its_mod = mod;
|
||||
its_page = NULL;
|
||||
}
|
||||
|
||||
void its_fini_mod(struct module *mod)
|
||||
{
|
||||
if (!cpu_feature_enabled(X86_FEATURE_INDIRECT_THUNK_ITS))
|
||||
return;
|
||||
|
||||
WARN_ON_ONCE(its_mod != mod);
|
||||
|
||||
its_mod = NULL;
|
||||
its_page = NULL;
|
||||
mutex_unlock(&text_mutex);
|
||||
|
||||
for (int i = 0; i < mod->its_num_pages; i++) {
|
||||
void *page = mod->its_page_array[i];
|
||||
execmem_restore_rox(page, PAGE_SIZE);
|
||||
}
|
||||
}
|
||||
|
||||
void its_free_mod(struct module *mod)
|
||||
{
|
||||
if (!cpu_feature_enabled(X86_FEATURE_INDIRECT_THUNK_ITS))
|
||||
return;
|
||||
|
||||
for (int i = 0; i < mod->its_num_pages; i++) {
|
||||
void *page = mod->its_page_array[i];
|
||||
execmem_free(page);
|
||||
}
|
||||
kfree(mod->its_page_array);
|
||||
}
|
||||
#endif /* CONFIG_MODULES */
|
||||
|
||||
static void *its_alloc(void)
|
||||
{
|
||||
void *page __free(execmem) = execmem_alloc(EXECMEM_MODULE_TEXT, PAGE_SIZE);
|
||||
|
||||
if (!page)
|
||||
return NULL;
|
||||
|
||||
#ifdef CONFIG_MODULES
|
||||
if (its_mod) {
|
||||
void *tmp = krealloc(its_mod->its_page_array,
|
||||
(its_mod->its_num_pages+1) * sizeof(void *),
|
||||
GFP_KERNEL);
|
||||
if (!tmp)
|
||||
return NULL;
|
||||
|
||||
its_mod->its_page_array = tmp;
|
||||
its_mod->its_page_array[its_mod->its_num_pages++] = page;
|
||||
|
||||
execmem_make_temp_rw(page, PAGE_SIZE);
|
||||
}
|
||||
#endif /* CONFIG_MODULES */
|
||||
|
||||
return no_free_ptr(page);
|
||||
}
|
||||
|
||||
static void *its_allocate_thunk(int reg)
|
||||
{
|
||||
int size = 3 + (reg / 8);
|
||||
void *thunk;
|
||||
|
||||
#ifdef CONFIG_FINEIBT
|
||||
/*
|
||||
* The ITS thunk contains an indirect jump and an int3 instruction so
|
||||
* its size is 3 or 4 bytes depending on the register used. If CFI
|
||||
* paranoid is used then 3 extra bytes are added in the ITS thunk to
|
||||
* complete the fineibt_paranoid caller sequence.
|
||||
*/
|
||||
if (cfi_paranoid)
|
||||
size += 3;
|
||||
#endif
|
||||
|
||||
if (!its_page || (its_offset + size - 1) >= PAGE_SIZE) {
|
||||
its_page = its_alloc();
|
||||
if (!its_page) {
|
||||
pr_err("ITS page allocation failed\n");
|
||||
return NULL;
|
||||
}
|
||||
memset(its_page, INT3_INSN_OPCODE, PAGE_SIZE);
|
||||
its_offset = 32;
|
||||
}
|
||||
|
||||
/*
|
||||
* If the indirect branch instruction will be in the lower half
|
||||
* of a cacheline, then update the offset to reach the upper half.
|
||||
*/
|
||||
if ((its_offset + size - 1) % 64 < 32)
|
||||
its_offset = ((its_offset - 1) | 0x3F) + 33;
|
||||
|
||||
thunk = its_page + its_offset;
|
||||
its_offset += size;
|
||||
|
||||
return its_init_thunk(thunk, reg);
|
||||
}
|
||||
|
||||
u8 *its_static_thunk(int reg)
|
||||
{
|
||||
u8 *thunk = __x86_indirect_its_thunk_array[reg];
|
||||
|
||||
#ifdef CONFIG_FINEIBT
|
||||
/* Paranoid thunk starts 2 bytes before */
|
||||
if (cfi_paranoid)
|
||||
return thunk - 2;
|
||||
#endif
|
||||
return thunk;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Nomenclature for variable names to simplify and clarify this code and ease
|
||||
* any potential staring at it:
|
||||
|
@ -581,7 +749,8 @@ static int emit_indirect(int op, int reg, u8 *bytes)
|
|||
return i;
|
||||
}
|
||||
|
||||
static int emit_call_track_retpoline(void *addr, struct insn *insn, int reg, u8 *bytes)
|
||||
static int __emit_trampoline(void *addr, struct insn *insn, u8 *bytes,
|
||||
void *call_dest, void *jmp_dest)
|
||||
{
|
||||
u8 op = insn->opcode.bytes[0];
|
||||
int i = 0;
|
||||
|
@ -602,7 +771,7 @@ static int emit_call_track_retpoline(void *addr, struct insn *insn, int reg, u8
|
|||
switch (op) {
|
||||
case CALL_INSN_OPCODE:
|
||||
__text_gen_insn(bytes+i, op, addr+i,
|
||||
__x86_indirect_call_thunk_array[reg],
|
||||
call_dest,
|
||||
CALL_INSN_SIZE);
|
||||
i += CALL_INSN_SIZE;
|
||||
break;
|
||||
|
@ -610,7 +779,7 @@ static int emit_call_track_retpoline(void *addr, struct insn *insn, int reg, u8
|
|||
case JMP32_INSN_OPCODE:
|
||||
clang_jcc:
|
||||
__text_gen_insn(bytes+i, op, addr+i,
|
||||
__x86_indirect_jump_thunk_array[reg],
|
||||
jmp_dest,
|
||||
JMP32_INSN_SIZE);
|
||||
i += JMP32_INSN_SIZE;
|
||||
break;
|
||||
|
@ -625,6 +794,48 @@ clang_jcc:
|
|||
return i;
|
||||
}
|
||||
|
||||
static int emit_call_track_retpoline(void *addr, struct insn *insn, int reg, u8 *bytes)
|
||||
{
|
||||
return __emit_trampoline(addr, insn, bytes,
|
||||
__x86_indirect_call_thunk_array[reg],
|
||||
__x86_indirect_jump_thunk_array[reg]);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_MITIGATION_ITS
|
||||
static int emit_its_trampoline(void *addr, struct insn *insn, int reg, u8 *bytes)
|
||||
{
|
||||
u8 *thunk = __x86_indirect_its_thunk_array[reg];
|
||||
u8 *tmp = its_allocate_thunk(reg);
|
||||
|
||||
if (tmp)
|
||||
thunk = tmp;
|
||||
|
||||
return __emit_trampoline(addr, insn, bytes, thunk, thunk);
|
||||
}
|
||||
|
||||
/* Check if an indirect branch is at ITS-unsafe address */
|
||||
static bool cpu_wants_indirect_its_thunk_at(unsigned long addr, int reg)
|
||||
{
|
||||
if (!cpu_feature_enabled(X86_FEATURE_INDIRECT_THUNK_ITS))
|
||||
return false;
|
||||
|
||||
/* Indirect branch opcode is 2 or 3 bytes depending on reg */
|
||||
addr += 1 + reg / 8;
|
||||
|
||||
/* Lower-half of the cacheline? */
|
||||
return !(addr & 0x20);
|
||||
}
|
||||
#else /* CONFIG_MITIGATION_ITS */
|
||||
|
||||
#ifdef CONFIG_FINEIBT
|
||||
static bool cpu_wants_indirect_its_thunk_at(unsigned long addr, int reg)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* CONFIG_MITIGATION_ITS */
|
||||
|
||||
/*
|
||||
* Rewrite the compiler generated retpoline thunk calls.
|
||||
*
|
||||
|
@ -699,6 +910,15 @@ static int patch_retpoline(void *addr, struct insn *insn, u8 *bytes)
|
|||
bytes[i++] = 0xe8; /* LFENCE */
|
||||
}
|
||||
|
||||
#ifdef CONFIG_MITIGATION_ITS
|
||||
/*
|
||||
* Check if the address of last byte of emitted-indirect is in
|
||||
* lower-half of the cacheline. Such branches need ITS mitigation.
|
||||
*/
|
||||
if (cpu_wants_indirect_its_thunk_at((unsigned long)addr + i, reg))
|
||||
return emit_its_trampoline(addr, insn, reg, bytes);
|
||||
#endif
|
||||
|
||||
ret = emit_indirect(op, reg, bytes + i);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
@ -732,6 +952,7 @@ void __init_or_module noinline apply_retpolines(s32 *start, s32 *end)
|
|||
int len, ret;
|
||||
u8 bytes[16];
|
||||
u8 op1, op2;
|
||||
u8 *dest;
|
||||
|
||||
ret = insn_decode_kernel(&insn, addr);
|
||||
if (WARN_ON_ONCE(ret < 0))
|
||||
|
@ -748,6 +969,12 @@ void __init_or_module noinline apply_retpolines(s32 *start, s32 *end)
|
|||
|
||||
case CALL_INSN_OPCODE:
|
||||
case JMP32_INSN_OPCODE:
|
||||
/* Check for cfi_paranoid + ITS */
|
||||
dest = addr + insn.length + insn.immediate.value;
|
||||
if (dest[-1] == 0xea && (dest[0] & 0xf0) == 0x70) {
|
||||
WARN_ON_ONCE(cfi_mode != CFI_FINEIBT);
|
||||
continue;
|
||||
}
|
||||
break;
|
||||
|
||||
case 0x0f: /* escape */
|
||||
|
@ -775,6 +1002,21 @@ void __init_or_module noinline apply_retpolines(s32 *start, s32 *end)
|
|||
|
||||
#ifdef CONFIG_MITIGATION_RETHUNK
|
||||
|
||||
bool cpu_wants_rethunk(void)
|
||||
{
|
||||
return cpu_feature_enabled(X86_FEATURE_RETHUNK);
|
||||
}
|
||||
|
||||
bool cpu_wants_rethunk_at(void *addr)
|
||||
{
|
||||
if (!cpu_feature_enabled(X86_FEATURE_RETHUNK))
|
||||
return false;
|
||||
if (x86_return_thunk != its_return_thunk)
|
||||
return true;
|
||||
|
||||
return !((unsigned long)addr & 0x20);
|
||||
}
|
||||
|
||||
/*
|
||||
* Rewrite the compiler generated return thunk tail-calls.
|
||||
*
|
||||
|
@ -791,7 +1033,7 @@ static int patch_return(void *addr, struct insn *insn, u8 *bytes)
|
|||
int i = 0;
|
||||
|
||||
/* Patch the custom return thunks... */
|
||||
if (cpu_feature_enabled(X86_FEATURE_RETHUNK)) {
|
||||
if (cpu_wants_rethunk_at(addr)) {
|
||||
i = JMP32_INSN_SIZE;
|
||||
__text_gen_insn(bytes, JMP32_INSN_OPCODE, addr, x86_return_thunk, i);
|
||||
} else {
|
||||
|
@ -808,7 +1050,7 @@ void __init_or_module noinline apply_returns(s32 *start, s32 *end)
|
|||
{
|
||||
s32 *s;
|
||||
|
||||
if (cpu_feature_enabled(X86_FEATURE_RETHUNK))
|
||||
if (cpu_wants_rethunk())
|
||||
static_call_force_reinit();
|
||||
|
||||
for (s = start; s < end; s++) {
|
||||
|
@ -1022,8 +1264,6 @@ int cfi_get_func_arity(void *func)
|
|||
static bool cfi_rand __ro_after_init = true;
|
||||
static u32 cfi_seed __ro_after_init;
|
||||
|
||||
static bool cfi_paranoid __ro_after_init = false;
|
||||
|
||||
/*
|
||||
* Re-hash the CFI hash with a boot-time seed while making sure the result is
|
||||
* not a valid ENDBR instruction.
|
||||
|
@ -1436,6 +1676,19 @@ static int cfi_rand_callers(s32 *start, s32 *end)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int emit_paranoid_trampoline(void *addr, struct insn *insn, int reg, u8 *bytes)
|
||||
{
|
||||
u8 *thunk = (void *)__x86_indirect_its_thunk_array[reg] - 2;
|
||||
|
||||
#ifdef CONFIG_MITIGATION_ITS
|
||||
u8 *tmp = its_allocate_thunk(reg);
|
||||
if (tmp)
|
||||
thunk = tmp;
|
||||
#endif
|
||||
|
||||
return __emit_trampoline(addr, insn, bytes, thunk, thunk);
|
||||
}
|
||||
|
||||
static int cfi_rewrite_callers(s32 *start, s32 *end)
|
||||
{
|
||||
s32 *s;
|
||||
|
@ -1477,9 +1730,14 @@ static int cfi_rewrite_callers(s32 *start, s32 *end)
|
|||
memcpy(bytes, fineibt_paranoid_start, fineibt_paranoid_size);
|
||||
memcpy(bytes + fineibt_caller_hash, &hash, 4);
|
||||
|
||||
ret = emit_indirect(op, 11, bytes + fineibt_paranoid_ind);
|
||||
if (WARN_ON_ONCE(ret != 3))
|
||||
continue;
|
||||
if (cpu_wants_indirect_its_thunk_at((unsigned long)addr + fineibt_paranoid_ind, 11)) {
|
||||
emit_paranoid_trampoline(addr + fineibt_caller_size,
|
||||
&insn, 11, bytes + fineibt_caller_size);
|
||||
} else {
|
||||
ret = emit_indirect(op, 11, bytes + fineibt_paranoid_ind);
|
||||
if (WARN_ON_ONCE(ret != 3))
|
||||
continue;
|
||||
}
|
||||
|
||||
text_poke_early(addr, bytes, fineibt_paranoid_size);
|
||||
}
|
||||
|
@ -1706,29 +1964,66 @@ Efault:
|
|||
return false;
|
||||
}
|
||||
|
||||
static bool is_paranoid_thunk(unsigned long addr)
|
||||
{
|
||||
u32 thunk;
|
||||
|
||||
__get_kernel_nofault(&thunk, (u32 *)addr, u32, Efault);
|
||||
return (thunk & 0x00FFFFFF) == 0xfd75ea;
|
||||
|
||||
Efault:
|
||||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
* regs->ip points to a LOCK Jcc.d8 instruction from the fineibt_paranoid_start[]
|
||||
* sequence.
|
||||
* sequence, or to an invalid instruction (0xea) + Jcc.d8 for cfi_paranoid + ITS
|
||||
* thunk.
|
||||
*/
|
||||
static bool decode_fineibt_paranoid(struct pt_regs *regs, unsigned long *target, u32 *type)
|
||||
{
|
||||
unsigned long addr = regs->ip - fineibt_paranoid_ud;
|
||||
u32 hash;
|
||||
|
||||
if (!cfi_paranoid || !is_cfi_trap(addr + fineibt_caller_size - LEN_UD2))
|
||||
if (!cfi_paranoid)
|
||||
return false;
|
||||
|
||||
__get_kernel_nofault(&hash, addr + fineibt_caller_hash, u32, Efault);
|
||||
*target = regs->r11 + fineibt_preamble_size;
|
||||
*type = regs->r10;
|
||||
if (is_cfi_trap(addr + fineibt_caller_size - LEN_UD2)) {
|
||||
*target = regs->r11 + fineibt_preamble_size;
|
||||
*type = regs->r10;
|
||||
|
||||
/*
|
||||
* Since the trapping instruction is the exact, but LOCK prefixed,
|
||||
* Jcc.d8 that got us here, the normal fixup will work.
|
||||
*/
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* Since the trapping instruction is the exact, but LOCK prefixed,
|
||||
* Jcc.d8 that got us here, the normal fixup will work.
|
||||
* The cfi_paranoid + ITS thunk combination results in:
|
||||
*
|
||||
* 0: 41 ba 78 56 34 12 mov $0x12345678, %r10d
|
||||
* 6: 45 3b 53 f7 cmp -0x9(%r11), %r10d
|
||||
* a: 4d 8d 5b f0 lea -0x10(%r11), %r11
|
||||
* e: 2e e8 XX XX XX XX cs call __x86_indirect_paranoid_thunk_r11
|
||||
*
|
||||
* Where the paranoid_thunk looks like:
|
||||
*
|
||||
* 1d: <ea> (bad)
|
||||
* __x86_indirect_paranoid_thunk_r11:
|
||||
* 1e: 75 fd jne 1d
|
||||
* __x86_indirect_its_thunk_r11:
|
||||
* 20: 41 ff eb jmp *%r11
|
||||
* 23: cc int3
|
||||
*
|
||||
*/
|
||||
return true;
|
||||
if (is_paranoid_thunk(regs->ip)) {
|
||||
*target = regs->r11 + fineibt_preamble_size;
|
||||
*type = regs->r10;
|
||||
|
||||
regs->ip = *target;
|
||||
return true;
|
||||
}
|
||||
|
||||
Efault:
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -2031,6 +2326,8 @@ static noinline void __init alt_reloc_selftest(void)
|
|||
|
||||
void __init alternative_instructions(void)
|
||||
{
|
||||
u64 ibt;
|
||||
|
||||
int3_selftest();
|
||||
|
||||
/*
|
||||
|
@ -2057,6 +2354,9 @@ void __init alternative_instructions(void)
|
|||
*/
|
||||
paravirt_set_cap();
|
||||
|
||||
/* Keep CET-IBT disabled until caller/callee are patched */
|
||||
ibt = ibt_save(/*disable*/ true);
|
||||
|
||||
__apply_fineibt(__retpoline_sites, __retpoline_sites_end,
|
||||
__cfi_sites, __cfi_sites_end, true);
|
||||
|
||||
|
@ -2080,6 +2380,8 @@ void __init alternative_instructions(void)
|
|||
*/
|
||||
apply_seal_endbr(__ibt_endbr_seal, __ibt_endbr_seal_end);
|
||||
|
||||
ibt_restore(ibt);
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
/* Patch to UP if other cpus not imminent. */
|
||||
if (!noreplace_smp && (num_present_cpus() == 1 || setup_max_cpus <= 1)) {
|
||||
|
|
|
@ -49,6 +49,7 @@ static void __init srbds_select_mitigation(void);
|
|||
static void __init l1d_flush_select_mitigation(void);
|
||||
static void __init srso_select_mitigation(void);
|
||||
static void __init gds_select_mitigation(void);
|
||||
static void __init its_select_mitigation(void);
|
||||
|
||||
/* The base value of the SPEC_CTRL MSR without task-specific bits set */
|
||||
u64 x86_spec_ctrl_base;
|
||||
|
@ -66,6 +67,14 @@ static DEFINE_MUTEX(spec_ctrl_mutex);
|
|||
|
||||
void (*x86_return_thunk)(void) __ro_after_init = __x86_return_thunk;
|
||||
|
||||
static void __init set_return_thunk(void *thunk)
|
||||
{
|
||||
if (x86_return_thunk != __x86_return_thunk)
|
||||
pr_warn("x86/bugs: return thunk changed\n");
|
||||
|
||||
x86_return_thunk = thunk;
|
||||
}
|
||||
|
||||
/* Update SPEC_CTRL MSR and its cached copy unconditionally */
|
||||
static void update_spec_ctrl(u64 val)
|
||||
{
|
||||
|
@ -178,6 +187,7 @@ void __init cpu_select_mitigations(void)
|
|||
*/
|
||||
srso_select_mitigation();
|
||||
gds_select_mitigation();
|
||||
its_select_mitigation();
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1118,7 +1128,7 @@ do_cmd_auto:
|
|||
setup_force_cpu_cap(X86_FEATURE_RETHUNK);
|
||||
setup_force_cpu_cap(X86_FEATURE_UNRET);
|
||||
|
||||
x86_return_thunk = retbleed_return_thunk;
|
||||
set_return_thunk(retbleed_return_thunk);
|
||||
|
||||
if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
|
||||
boot_cpu_data.x86_vendor != X86_VENDOR_HYGON)
|
||||
|
@ -1153,7 +1163,7 @@ do_cmd_auto:
|
|||
setup_force_cpu_cap(X86_FEATURE_RETHUNK);
|
||||
setup_force_cpu_cap(X86_FEATURE_CALL_DEPTH);
|
||||
|
||||
x86_return_thunk = call_depth_return_thunk;
|
||||
set_return_thunk(call_depth_return_thunk);
|
||||
break;
|
||||
|
||||
default:
|
||||
|
@ -1187,6 +1197,145 @@ do_cmd_auto:
|
|||
pr_info("%s\n", retbleed_strings[retbleed_mitigation]);
|
||||
}
|
||||
|
||||
#undef pr_fmt
|
||||
#define pr_fmt(fmt) "ITS: " fmt
|
||||
|
||||
enum its_mitigation_cmd {
|
||||
ITS_CMD_OFF,
|
||||
ITS_CMD_ON,
|
||||
ITS_CMD_VMEXIT,
|
||||
ITS_CMD_RSB_STUFF,
|
||||
};
|
||||
|
||||
enum its_mitigation {
|
||||
ITS_MITIGATION_OFF,
|
||||
ITS_MITIGATION_VMEXIT_ONLY,
|
||||
ITS_MITIGATION_ALIGNED_THUNKS,
|
||||
ITS_MITIGATION_RETPOLINE_STUFF,
|
||||
};
|
||||
|
||||
static const char * const its_strings[] = {
|
||||
[ITS_MITIGATION_OFF] = "Vulnerable",
|
||||
[ITS_MITIGATION_VMEXIT_ONLY] = "Mitigation: Vulnerable, KVM: Not affected",
|
||||
[ITS_MITIGATION_ALIGNED_THUNKS] = "Mitigation: Aligned branch/return thunks",
|
||||
[ITS_MITIGATION_RETPOLINE_STUFF] = "Mitigation: Retpolines, Stuffing RSB",
|
||||
};
|
||||
|
||||
static enum its_mitigation its_mitigation __ro_after_init = ITS_MITIGATION_ALIGNED_THUNKS;
|
||||
|
||||
static enum its_mitigation_cmd its_cmd __ro_after_init =
|
||||
IS_ENABLED(CONFIG_MITIGATION_ITS) ? ITS_CMD_ON : ITS_CMD_OFF;
|
||||
|
||||
static int __init its_parse_cmdline(char *str)
|
||||
{
|
||||
if (!str)
|
||||
return -EINVAL;
|
||||
|
||||
if (!IS_ENABLED(CONFIG_MITIGATION_ITS)) {
|
||||
pr_err("Mitigation disabled at compile time, ignoring option (%s)", str);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (!strcmp(str, "off")) {
|
||||
its_cmd = ITS_CMD_OFF;
|
||||
} else if (!strcmp(str, "on")) {
|
||||
its_cmd = ITS_CMD_ON;
|
||||
} else if (!strcmp(str, "force")) {
|
||||
its_cmd = ITS_CMD_ON;
|
||||
setup_force_cpu_bug(X86_BUG_ITS);
|
||||
} else if (!strcmp(str, "vmexit")) {
|
||||
its_cmd = ITS_CMD_VMEXIT;
|
||||
} else if (!strcmp(str, "stuff")) {
|
||||
its_cmd = ITS_CMD_RSB_STUFF;
|
||||
} else {
|
||||
pr_err("Ignoring unknown indirect_target_selection option (%s).", str);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
early_param("indirect_target_selection", its_parse_cmdline);
|
||||
|
||||
static void __init its_select_mitigation(void)
|
||||
{
|
||||
enum its_mitigation_cmd cmd = its_cmd;
|
||||
|
||||
if (!boot_cpu_has_bug(X86_BUG_ITS) || cpu_mitigations_off()) {
|
||||
its_mitigation = ITS_MITIGATION_OFF;
|
||||
return;
|
||||
}
|
||||
|
||||
/* Retpoline+CDT mitigates ITS, bail out */
|
||||
if (boot_cpu_has(X86_FEATURE_RETPOLINE) &&
|
||||
boot_cpu_has(X86_FEATURE_CALL_DEPTH)) {
|
||||
its_mitigation = ITS_MITIGATION_RETPOLINE_STUFF;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* Exit early to avoid irrelevant warnings */
|
||||
if (cmd == ITS_CMD_OFF) {
|
||||
its_mitigation = ITS_MITIGATION_OFF;
|
||||
goto out;
|
||||
}
|
||||
if (spectre_v2_enabled == SPECTRE_V2_NONE) {
|
||||
pr_err("WARNING: Spectre-v2 mitigation is off, disabling ITS\n");
|
||||
its_mitigation = ITS_MITIGATION_OFF;
|
||||
goto out;
|
||||
}
|
||||
if (!IS_ENABLED(CONFIG_MITIGATION_RETPOLINE) ||
|
||||
!IS_ENABLED(CONFIG_MITIGATION_RETHUNK)) {
|
||||
pr_err("WARNING: ITS mitigation depends on retpoline and rethunk support\n");
|
||||
its_mitigation = ITS_MITIGATION_OFF;
|
||||
goto out;
|
||||
}
|
||||
if (IS_ENABLED(CONFIG_DEBUG_FORCE_FUNCTION_ALIGN_64B)) {
|
||||
pr_err("WARNING: ITS mitigation is not compatible with CONFIG_DEBUG_FORCE_FUNCTION_ALIGN_64B\n");
|
||||
its_mitigation = ITS_MITIGATION_OFF;
|
||||
goto out;
|
||||
}
|
||||
if (boot_cpu_has(X86_FEATURE_RETPOLINE_LFENCE)) {
|
||||
pr_err("WARNING: ITS mitigation is not compatible with lfence mitigation\n");
|
||||
its_mitigation = ITS_MITIGATION_OFF;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (cmd == ITS_CMD_RSB_STUFF &&
|
||||
(!boot_cpu_has(X86_FEATURE_RETPOLINE) || !IS_ENABLED(CONFIG_MITIGATION_CALL_DEPTH_TRACKING))) {
|
||||
pr_err("RSB stuff mitigation not supported, using default\n");
|
||||
cmd = ITS_CMD_ON;
|
||||
}
|
||||
|
||||
switch (cmd) {
|
||||
case ITS_CMD_OFF:
|
||||
its_mitigation = ITS_MITIGATION_OFF;
|
||||
break;
|
||||
case ITS_CMD_VMEXIT:
|
||||
if (boot_cpu_has_bug(X86_BUG_ITS_NATIVE_ONLY)) {
|
||||
its_mitigation = ITS_MITIGATION_VMEXIT_ONLY;
|
||||
goto out;
|
||||
}
|
||||
fallthrough;
|
||||
case ITS_CMD_ON:
|
||||
its_mitigation = ITS_MITIGATION_ALIGNED_THUNKS;
|
||||
if (!boot_cpu_has(X86_FEATURE_RETPOLINE))
|
||||
setup_force_cpu_cap(X86_FEATURE_INDIRECT_THUNK_ITS);
|
||||
setup_force_cpu_cap(X86_FEATURE_RETHUNK);
|
||||
set_return_thunk(its_return_thunk);
|
||||
break;
|
||||
case ITS_CMD_RSB_STUFF:
|
||||
its_mitigation = ITS_MITIGATION_RETPOLINE_STUFF;
|
||||
setup_force_cpu_cap(X86_FEATURE_RETHUNK);
|
||||
setup_force_cpu_cap(X86_FEATURE_CALL_DEPTH);
|
||||
set_return_thunk(call_depth_return_thunk);
|
||||
if (retbleed_mitigation == RETBLEED_MITIGATION_NONE) {
|
||||
retbleed_mitigation = RETBLEED_MITIGATION_STUFF;
|
||||
pr_info("Retbleed mitigation updated to stuffing\n");
|
||||
}
|
||||
break;
|
||||
}
|
||||
out:
|
||||
pr_info("%s\n", its_strings[its_mitigation]);
|
||||
}
|
||||
|
||||
#undef pr_fmt
|
||||
#define pr_fmt(fmt) "Spectre V2 : " fmt
|
||||
|
||||
|
@ -1697,11 +1846,11 @@ static void __init bhi_select_mitigation(void)
|
|||
return;
|
||||
}
|
||||
|
||||
/* Mitigate in hardware if supported */
|
||||
if (spec_ctrl_bhi_dis())
|
||||
if (!IS_ENABLED(CONFIG_X86_64))
|
||||
return;
|
||||
|
||||
if (!IS_ENABLED(CONFIG_X86_64))
|
||||
/* Mitigate in hardware if supported */
|
||||
if (spec_ctrl_bhi_dis())
|
||||
return;
|
||||
|
||||
if (bhi_mitigation == BHI_MITIGATION_VMEXIT_ONLY) {
|
||||
|
@ -2607,10 +2756,10 @@ static void __init srso_select_mitigation(void)
|
|||
|
||||
if (boot_cpu_data.x86 == 0x19) {
|
||||
setup_force_cpu_cap(X86_FEATURE_SRSO_ALIAS);
|
||||
x86_return_thunk = srso_alias_return_thunk;
|
||||
set_return_thunk(srso_alias_return_thunk);
|
||||
} else {
|
||||
setup_force_cpu_cap(X86_FEATURE_SRSO);
|
||||
x86_return_thunk = srso_return_thunk;
|
||||
set_return_thunk(srso_return_thunk);
|
||||
}
|
||||
if (has_microcode)
|
||||
srso_mitigation = SRSO_MITIGATION_SAFE_RET;
|
||||
|
@ -2800,6 +2949,11 @@ static ssize_t rfds_show_state(char *buf)
|
|||
return sysfs_emit(buf, "%s\n", rfds_strings[rfds_mitigation]);
|
||||
}
|
||||
|
||||
static ssize_t its_show_state(char *buf)
|
||||
{
|
||||
return sysfs_emit(buf, "%s\n", its_strings[its_mitigation]);
|
||||
}
|
||||
|
||||
static char *stibp_state(void)
|
||||
{
|
||||
if (spectre_v2_in_eibrs_mode(spectre_v2_enabled) &&
|
||||
|
@ -2982,6 +3136,9 @@ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr
|
|||
case X86_BUG_RFDS:
|
||||
return rfds_show_state(buf);
|
||||
|
||||
case X86_BUG_ITS:
|
||||
return its_show_state(buf);
|
||||
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
@ -3061,6 +3218,11 @@ ssize_t cpu_show_reg_file_data_sampling(struct device *dev, struct device_attrib
|
|||
{
|
||||
return cpu_show_common(dev, attr, buf, X86_BUG_RFDS);
|
||||
}
|
||||
|
||||
ssize_t cpu_show_indirect_target_selection(struct device *dev, struct device_attribute *attr, char *buf)
|
||||
{
|
||||
return cpu_show_common(dev, attr, buf, X86_BUG_ITS);
|
||||
}
|
||||
#endif
|
||||
|
||||
void __warn_thunk(void)
|
||||
|
|
|
@ -1227,6 +1227,10 @@ static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = {
|
|||
#define GDS BIT(6)
|
||||
/* CPU is affected by Register File Data Sampling */
|
||||
#define RFDS BIT(7)
|
||||
/* CPU is affected by Indirect Target Selection */
|
||||
#define ITS BIT(8)
|
||||
/* CPU is affected by Indirect Target Selection, but guest-host isolation is not affected */
|
||||
#define ITS_NATIVE_ONLY BIT(9)
|
||||
|
||||
static const struct x86_cpu_id cpu_vuln_blacklist[] __initconst = {
|
||||
VULNBL_INTEL_STEPS(INTEL_IVYBRIDGE, X86_STEP_MAX, SRBDS),
|
||||
|
@ -1238,22 +1242,25 @@ static const struct x86_cpu_id cpu_vuln_blacklist[] __initconst = {
|
|||
VULNBL_INTEL_STEPS(INTEL_BROADWELL_G, X86_STEP_MAX, SRBDS),
|
||||
VULNBL_INTEL_STEPS(INTEL_BROADWELL_X, X86_STEP_MAX, MMIO),
|
||||
VULNBL_INTEL_STEPS(INTEL_BROADWELL, X86_STEP_MAX, SRBDS),
|
||||
VULNBL_INTEL_STEPS(INTEL_SKYLAKE_X, X86_STEP_MAX, MMIO | RETBLEED | GDS),
|
||||
VULNBL_INTEL_STEPS(INTEL_SKYLAKE_X, 0x5, MMIO | RETBLEED | GDS),
|
||||
VULNBL_INTEL_STEPS(INTEL_SKYLAKE_X, X86_STEP_MAX, MMIO | RETBLEED | GDS | ITS),
|
||||
VULNBL_INTEL_STEPS(INTEL_SKYLAKE_L, X86_STEP_MAX, MMIO | RETBLEED | GDS | SRBDS),
|
||||
VULNBL_INTEL_STEPS(INTEL_SKYLAKE, X86_STEP_MAX, MMIO | RETBLEED | GDS | SRBDS),
|
||||
VULNBL_INTEL_STEPS(INTEL_KABYLAKE_L, X86_STEP_MAX, MMIO | RETBLEED | GDS | SRBDS),
|
||||
VULNBL_INTEL_STEPS(INTEL_KABYLAKE, X86_STEP_MAX, MMIO | RETBLEED | GDS | SRBDS),
|
||||
VULNBL_INTEL_STEPS(INTEL_KABYLAKE_L, 0xb, MMIO | RETBLEED | GDS | SRBDS),
|
||||
VULNBL_INTEL_STEPS(INTEL_KABYLAKE_L, X86_STEP_MAX, MMIO | RETBLEED | GDS | SRBDS | ITS),
|
||||
VULNBL_INTEL_STEPS(INTEL_KABYLAKE, 0xc, MMIO | RETBLEED | GDS | SRBDS),
|
||||
VULNBL_INTEL_STEPS(INTEL_KABYLAKE, X86_STEP_MAX, MMIO | RETBLEED | GDS | SRBDS | ITS),
|
||||
VULNBL_INTEL_STEPS(INTEL_CANNONLAKE_L, X86_STEP_MAX, RETBLEED),
|
||||
VULNBL_INTEL_STEPS(INTEL_ICELAKE_L, X86_STEP_MAX, MMIO | MMIO_SBDS | RETBLEED | GDS),
|
||||
VULNBL_INTEL_STEPS(INTEL_ICELAKE_D, X86_STEP_MAX, MMIO | GDS),
|
||||
VULNBL_INTEL_STEPS(INTEL_ICELAKE_X, X86_STEP_MAX, MMIO | GDS),
|
||||
VULNBL_INTEL_STEPS(INTEL_COMETLAKE, X86_STEP_MAX, MMIO | MMIO_SBDS | RETBLEED | GDS),
|
||||
VULNBL_INTEL_STEPS(INTEL_COMETLAKE_L, 0x0, MMIO | RETBLEED),
|
||||
VULNBL_INTEL_STEPS(INTEL_COMETLAKE_L, X86_STEP_MAX, MMIO | MMIO_SBDS | RETBLEED | GDS),
|
||||
VULNBL_INTEL_STEPS(INTEL_TIGERLAKE_L, X86_STEP_MAX, GDS),
|
||||
VULNBL_INTEL_STEPS(INTEL_TIGERLAKE, X86_STEP_MAX, GDS),
|
||||
VULNBL_INTEL_STEPS(INTEL_ICELAKE_L, X86_STEP_MAX, MMIO | MMIO_SBDS | RETBLEED | GDS | ITS | ITS_NATIVE_ONLY),
|
||||
VULNBL_INTEL_STEPS(INTEL_ICELAKE_D, X86_STEP_MAX, MMIO | GDS | ITS | ITS_NATIVE_ONLY),
|
||||
VULNBL_INTEL_STEPS(INTEL_ICELAKE_X, X86_STEP_MAX, MMIO | GDS | ITS | ITS_NATIVE_ONLY),
|
||||
VULNBL_INTEL_STEPS(INTEL_COMETLAKE, X86_STEP_MAX, MMIO | MMIO_SBDS | RETBLEED | GDS | ITS),
|
||||
VULNBL_INTEL_STEPS(INTEL_COMETLAKE_L, 0x0, MMIO | RETBLEED | ITS),
|
||||
VULNBL_INTEL_STEPS(INTEL_COMETLAKE_L, X86_STEP_MAX, MMIO | MMIO_SBDS | RETBLEED | GDS | ITS),
|
||||
VULNBL_INTEL_STEPS(INTEL_TIGERLAKE_L, X86_STEP_MAX, GDS | ITS | ITS_NATIVE_ONLY),
|
||||
VULNBL_INTEL_STEPS(INTEL_TIGERLAKE, X86_STEP_MAX, GDS | ITS | ITS_NATIVE_ONLY),
|
||||
VULNBL_INTEL_STEPS(INTEL_LAKEFIELD, X86_STEP_MAX, MMIO | MMIO_SBDS | RETBLEED),
|
||||
VULNBL_INTEL_STEPS(INTEL_ROCKETLAKE, X86_STEP_MAX, MMIO | RETBLEED | GDS),
|
||||
VULNBL_INTEL_STEPS(INTEL_ROCKETLAKE, X86_STEP_MAX, MMIO | RETBLEED | GDS | ITS | ITS_NATIVE_ONLY),
|
||||
VULNBL_INTEL_TYPE(INTEL_ALDERLAKE, ATOM, RFDS),
|
||||
VULNBL_INTEL_STEPS(INTEL_ALDERLAKE_L, X86_STEP_MAX, RFDS),
|
||||
VULNBL_INTEL_TYPE(INTEL_RAPTORLAKE, ATOM, RFDS),
|
||||
|
@ -1318,6 +1325,32 @@ static bool __init vulnerable_to_rfds(u64 x86_arch_cap_msr)
|
|||
return cpu_matches(cpu_vuln_blacklist, RFDS);
|
||||
}
|
||||
|
||||
static bool __init vulnerable_to_its(u64 x86_arch_cap_msr)
|
||||
{
|
||||
/* The "immunity" bit trumps everything else: */
|
||||
if (x86_arch_cap_msr & ARCH_CAP_ITS_NO)
|
||||
return false;
|
||||
if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
|
||||
return false;
|
||||
|
||||
/* None of the affected CPUs have BHI_CTRL */
|
||||
if (boot_cpu_has(X86_FEATURE_BHI_CTRL))
|
||||
return false;
|
||||
|
||||
/*
|
||||
* If a VMM did not expose ITS_NO, assume that a guest could
|
||||
* be running on a vulnerable hardware or may migrate to such
|
||||
* hardware.
|
||||
*/
|
||||
if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
|
||||
return true;
|
||||
|
||||
if (cpu_matches(cpu_vuln_blacklist, ITS))
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
|
||||
{
|
||||
u64 x86_arch_cap_msr = x86_read_arch_cap_msr();
|
||||
|
@ -1439,9 +1472,12 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
|
|||
if (vulnerable_to_rfds(x86_arch_cap_msr))
|
||||
setup_force_cpu_bug(X86_BUG_RFDS);
|
||||
|
||||
/* When virtualized, eIBRS could be hidden, assume vulnerable */
|
||||
if (!(x86_arch_cap_msr & ARCH_CAP_BHI_NO) &&
|
||||
!cpu_matches(cpu_vuln_whitelist, NO_BHI) &&
|
||||
/*
|
||||
* Intel parts with eIBRS are vulnerable to BHI attacks. Parts with
|
||||
* BHI_NO still need to use the BHI mitigation to prevent Intra-mode
|
||||
* attacks. When virtualized, eIBRS could be hidden, assume vulnerable.
|
||||
*/
|
||||
if (!cpu_matches(cpu_vuln_whitelist, NO_BHI) &&
|
||||
(boot_cpu_has(X86_FEATURE_IBRS_ENHANCED) ||
|
||||
boot_cpu_has(X86_FEATURE_HYPERVISOR)))
|
||||
setup_force_cpu_bug(X86_BUG_BHI);
|
||||
|
@ -1449,6 +1485,12 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
|
|||
if (cpu_has(c, X86_FEATURE_AMD_IBPB) && !cpu_has(c, X86_FEATURE_AMD_IBPB_RET))
|
||||
setup_force_cpu_bug(X86_BUG_IBPB_NO_RET);
|
||||
|
||||
if (vulnerable_to_its(x86_arch_cap_msr)) {
|
||||
setup_force_cpu_bug(X86_BUG_ITS);
|
||||
if (cpu_matches(cpu_vuln_blacklist, ITS_NATIVE_ONLY))
|
||||
setup_force_cpu_bug(X86_BUG_ITS_NATIVE_ONLY);
|
||||
}
|
||||
|
||||
if (cpu_matches(cpu_vuln_whitelist, NO_MELTDOWN))
|
||||
return;
|
||||
|
||||
|
|
|
@ -1098,15 +1098,17 @@ static enum ucode_state load_microcode_amd(u8 family, const u8 *data, size_t siz
|
|||
|
||||
static int __init save_microcode_in_initrd(void)
|
||||
{
|
||||
unsigned int cpuid_1_eax = native_cpuid_eax(1);
|
||||
struct cpuinfo_x86 *c = &boot_cpu_data;
|
||||
struct cont_desc desc = { 0 };
|
||||
unsigned int cpuid_1_eax;
|
||||
enum ucode_state ret;
|
||||
struct cpio_data cp;
|
||||
|
||||
if (dis_ucode_ldr || c->x86_vendor != X86_VENDOR_AMD || c->x86 < 0x10)
|
||||
if (microcode_loader_disabled() || c->x86_vendor != X86_VENDOR_AMD || c->x86 < 0x10)
|
||||
return 0;
|
||||
|
||||
cpuid_1_eax = native_cpuid_eax(1);
|
||||
|
||||
if (!find_blobs_in_containers(&cp))
|
||||
return -EINVAL;
|
||||
|
||||
|
|
|
@ -41,8 +41,8 @@
|
|||
|
||||
#include "internal.h"
|
||||
|
||||
static struct microcode_ops *microcode_ops;
|
||||
bool dis_ucode_ldr = true;
|
||||
static struct microcode_ops *microcode_ops;
|
||||
static bool dis_ucode_ldr = false;
|
||||
|
||||
bool force_minrev = IS_ENABLED(CONFIG_MICROCODE_LATE_FORCE_MINREV);
|
||||
module_param(force_minrev, bool, S_IRUSR | S_IWUSR);
|
||||
|
@ -84,6 +84,9 @@ static bool amd_check_current_patch_level(void)
|
|||
u32 lvl, dummy, i;
|
||||
u32 *levels;
|
||||
|
||||
if (x86_cpuid_vendor() != X86_VENDOR_AMD)
|
||||
return false;
|
||||
|
||||
native_rdmsr(MSR_AMD64_PATCH_LEVEL, lvl, dummy);
|
||||
|
||||
levels = final_levels;
|
||||
|
@ -95,27 +98,29 @@ static bool amd_check_current_patch_level(void)
|
|||
return false;
|
||||
}
|
||||
|
||||
static bool __init check_loader_disabled_bsp(void)
|
||||
bool __init microcode_loader_disabled(void)
|
||||
{
|
||||
static const char *__dis_opt_str = "dis_ucode_ldr";
|
||||
const char *cmdline = boot_command_line;
|
||||
const char *option = __dis_opt_str;
|
||||
|
||||
/*
|
||||
* CPUID(1).ECX[31]: reserved for hypervisor use. This is still not
|
||||
* completely accurate as xen pv guests don't see that CPUID bit set but
|
||||
* that's good enough as they don't land on the BSP path anyway.
|
||||
*/
|
||||
if (native_cpuid_ecx(1) & BIT(31))
|
||||
if (dis_ucode_ldr)
|
||||
return true;
|
||||
|
||||
if (x86_cpuid_vendor() == X86_VENDOR_AMD) {
|
||||
if (amd_check_current_patch_level())
|
||||
return true;
|
||||
}
|
||||
|
||||
if (cmdline_find_option_bool(cmdline, option) <= 0)
|
||||
dis_ucode_ldr = false;
|
||||
/*
|
||||
* Disable when:
|
||||
*
|
||||
* 1) The CPU does not support CPUID.
|
||||
*
|
||||
* 2) Bit 31 in CPUID[1]:ECX is clear
|
||||
* The bit is reserved for hypervisor use. This is still not
|
||||
* completely accurate as XEN PV guests don't see that CPUID bit
|
||||
* set, but that's good enough as they don't land on the BSP
|
||||
* path anyway.
|
||||
*
|
||||
* 3) Certain AMD patch levels are not allowed to be
|
||||
* overwritten.
|
||||
*/
|
||||
if (!have_cpuid_p() ||
|
||||
native_cpuid_ecx(1) & BIT(31) ||
|
||||
amd_check_current_patch_level())
|
||||
dis_ucode_ldr = true;
|
||||
|
||||
return dis_ucode_ldr;
|
||||
}
|
||||
|
@ -125,7 +130,10 @@ void __init load_ucode_bsp(void)
|
|||
unsigned int cpuid_1_eax;
|
||||
bool intel = true;
|
||||
|
||||
if (!have_cpuid_p())
|
||||
if (cmdline_find_option_bool(boot_command_line, "dis_ucode_ldr") > 0)
|
||||
dis_ucode_ldr = true;
|
||||
|
||||
if (microcode_loader_disabled())
|
||||
return;
|
||||
|
||||
cpuid_1_eax = native_cpuid_eax(1);
|
||||
|
@ -146,9 +154,6 @@ void __init load_ucode_bsp(void)
|
|||
return;
|
||||
}
|
||||
|
||||
if (check_loader_disabled_bsp())
|
||||
return;
|
||||
|
||||
if (intel)
|
||||
load_ucode_intel_bsp(&early_data);
|
||||
else
|
||||
|
@ -159,6 +164,11 @@ void load_ucode_ap(void)
|
|||
{
|
||||
unsigned int cpuid_1_eax;
|
||||
|
||||
/*
|
||||
* Can't use microcode_loader_disabled() here - .init section
|
||||
* hell. It doesn't have to either - the BSP variant must've
|
||||
* parsed cmdline already anyway.
|
||||
*/
|
||||
if (dis_ucode_ldr)
|
||||
return;
|
||||
|
||||
|
@ -810,7 +820,7 @@ static int __init microcode_init(void)
|
|||
struct cpuinfo_x86 *c = &boot_cpu_data;
|
||||
int error;
|
||||
|
||||
if (dis_ucode_ldr)
|
||||
if (microcode_loader_disabled())
|
||||
return -EINVAL;
|
||||
|
||||
if (c->x86_vendor == X86_VENDOR_INTEL)
|
||||
|
|
|
@ -389,7 +389,7 @@ static int __init save_builtin_microcode(void)
|
|||
if (xchg(&ucode_patch_va, NULL) != UCODE_BSP_LOADED)
|
||||
return 0;
|
||||
|
||||
if (dis_ucode_ldr || boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
|
||||
if (microcode_loader_disabled() || boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
|
||||
return 0;
|
||||
|
||||
uci.mc = get_microcode_blob(&uci, true);
|
||||
|
|
|
@ -94,7 +94,6 @@ static inline unsigned int x86_cpuid_family(void)
|
|||
return x86_family(eax);
|
||||
}
|
||||
|
||||
extern bool dis_ucode_ldr;
|
||||
extern bool force_minrev;
|
||||
|
||||
#ifdef CONFIG_CPU_SUP_AMD
|
||||
|
|
|
@ -354,7 +354,7 @@ create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size)
|
|||
goto fail;
|
||||
|
||||
ip = trampoline + size;
|
||||
if (cpu_feature_enabled(X86_FEATURE_RETHUNK))
|
||||
if (cpu_wants_rethunk_at(ip))
|
||||
__text_gen_insn(ip, JMP32_INSN_OPCODE, ip, x86_return_thunk, JMP32_INSN_SIZE);
|
||||
else
|
||||
memcpy(ip, retq, sizeof(retq));
|
||||
|
|
|
@ -145,10 +145,6 @@ void __init __no_stack_protector mk_early_pgtbl_32(void)
|
|||
*ptr = (unsigned long)ptep + PAGE_OFFSET;
|
||||
|
||||
#ifdef CONFIG_MICROCODE_INITRD32
|
||||
/* Running on a hypervisor? */
|
||||
if (native_cpuid_ecx(1) & BIT(31))
|
||||
return;
|
||||
|
||||
params = (struct boot_params *)__pa_nodebug(&boot_params);
|
||||
if (!params->hdr.ramdisk_size || !params->hdr.ramdisk_image)
|
||||
return;
|
||||
|
|
|
@ -266,6 +266,8 @@ int module_finalize(const Elf_Ehdr *hdr,
|
|||
ibt_endbr = s;
|
||||
}
|
||||
|
||||
its_init_mod(me);
|
||||
|
||||
if (retpolines || cfi) {
|
||||
void *rseg = NULL, *cseg = NULL;
|
||||
unsigned int rsize = 0, csize = 0;
|
||||
|
@ -286,6 +288,9 @@ int module_finalize(const Elf_Ehdr *hdr,
|
|||
void *rseg = (void *)retpolines->sh_addr;
|
||||
apply_retpolines(rseg, rseg + retpolines->sh_size);
|
||||
}
|
||||
|
||||
its_fini_mod(me);
|
||||
|
||||
if (returns) {
|
||||
void *rseg = (void *)returns->sh_addr;
|
||||
apply_returns(rseg, rseg + returns->sh_size);
|
||||
|
@ -326,4 +331,5 @@ int module_finalize(const Elf_Ehdr *hdr,
|
|||
void module_arch_cleanup(struct module *mod)
|
||||
{
|
||||
alternatives_smp_module_del(mod);
|
||||
its_free_mod(mod);
|
||||
}
|
||||
|
|
|
@ -81,7 +81,7 @@ static void __ref __static_call_transform(void *insn, enum insn_type type,
|
|||
break;
|
||||
|
||||
case RET:
|
||||
if (cpu_feature_enabled(X86_FEATURE_RETHUNK))
|
||||
if (cpu_wants_rethunk_at(insn))
|
||||
code = text_gen_insn(JMP32_INSN_OPCODE, insn, x86_return_thunk);
|
||||
else
|
||||
code = &retinsn;
|
||||
|
@ -90,7 +90,7 @@ static void __ref __static_call_transform(void *insn, enum insn_type type,
|
|||
case JCC:
|
||||
if (!func) {
|
||||
func = __static_call_return;
|
||||
if (cpu_feature_enabled(X86_FEATURE_RETHUNK))
|
||||
if (cpu_wants_rethunk())
|
||||
func = x86_return_thunk;
|
||||
}
|
||||
|
||||
|
|
|
@ -466,10 +466,18 @@ SECTIONS
|
|||
}
|
||||
|
||||
/*
|
||||
* The ASSERT() sink to . is intentional, for binutils 2.14 compatibility:
|
||||
* COMPILE_TEST kernels can be large - CONFIG_KASAN, for example, can cause
|
||||
* this. Let's assume that nobody will be running a COMPILE_TEST kernel and
|
||||
* let's assert that fuller build coverage is more valuable than being able to
|
||||
* run a COMPILE_TEST kernel.
|
||||
*/
|
||||
#ifndef CONFIG_COMPILE_TEST
|
||||
/*
|
||||
* The ASSERT() sync to . is intentional, for binutils 2.14 compatibility:
|
||||
*/
|
||||
. = ASSERT((_end - LOAD_OFFSET <= KERNEL_IMAGE_SIZE),
|
||||
"kernel image bigger than KERNEL_IMAGE_SIZE");
|
||||
#endif
|
||||
|
||||
/* needed for Clang - see arch/x86/entry/entry.S */
|
||||
PROVIDE(__ref_stack_chk_guard = __stack_chk_guard);
|
||||
|
@ -497,6 +505,16 @@ PROVIDE(__ref_stack_chk_guard = __stack_chk_guard);
|
|||
"SRSO function pair won't alias");
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_MITIGATION_ITS) && !defined(CONFIG_DEBUG_FORCE_FUNCTION_ALIGN_64B)
|
||||
. = ASSERT(__x86_indirect_its_thunk_rax & 0x20, "__x86_indirect_thunk_rax not in second half of cacheline");
|
||||
. = ASSERT(((__x86_indirect_its_thunk_rcx - __x86_indirect_its_thunk_rax) % 64) == 0, "Indirect thunks are not cacheline apart");
|
||||
. = ASSERT(__x86_indirect_its_thunk_array == __x86_indirect_its_thunk_rax, "Gap in ITS thunk array");
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_MITIGATION_ITS) && !defined(CONFIG_DEBUG_FORCE_FUNCTION_ALIGN_64B)
|
||||
. = ASSERT(its_return_thunk & 0x20, "its_return_thunk not in second half of cacheline");
|
||||
#endif
|
||||
|
||||
#endif /* CONFIG_X86_64 */
|
||||
|
||||
/*
|
||||
|
|
|
@ -104,6 +104,9 @@ void kvm_mmu_track_write(struct kvm_vcpu *vcpu, gpa_t gpa, const u8 *new,
|
|||
|
||||
static inline int kvm_mmu_reload(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
if (kvm_check_request(KVM_REQ_MMU_FREE_OBSOLETE_ROOTS, vcpu))
|
||||
kvm_mmu_free_obsolete_roots(vcpu);
|
||||
|
||||
/*
|
||||
* Checking root.hpa is sufficient even when KVM has mirror root.
|
||||
* We can have either:
|
||||
|
|
|
@ -5974,6 +5974,7 @@ void kvm_mmu_free_obsolete_roots(struct kvm_vcpu *vcpu)
|
|||
__kvm_mmu_free_obsolete_roots(vcpu->kvm, &vcpu->arch.root_mmu);
|
||||
__kvm_mmu_free_obsolete_roots(vcpu->kvm, &vcpu->arch.guest_mmu);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_mmu_free_obsolete_roots);
|
||||
|
||||
static u64 mmu_pte_write_fetch_gpte(struct kvm_vcpu *vcpu, gpa_t *gpa,
|
||||
int *bytes)
|
||||
|
@ -7669,32 +7670,6 @@ void kvm_mmu_pre_destroy_vm(struct kvm *kvm)
|
|||
}
|
||||
|
||||
#ifdef CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES
|
||||
bool kvm_arch_pre_set_memory_attributes(struct kvm *kvm,
|
||||
struct kvm_gfn_range *range)
|
||||
{
|
||||
/*
|
||||
* Zap SPTEs even if the slot can't be mapped PRIVATE. KVM x86 only
|
||||
* supports KVM_MEMORY_ATTRIBUTE_PRIVATE, and so it *seems* like KVM
|
||||
* can simply ignore such slots. But if userspace is making memory
|
||||
* PRIVATE, then KVM must prevent the guest from accessing the memory
|
||||
* as shared. And if userspace is making memory SHARED and this point
|
||||
* is reached, then at least one page within the range was previously
|
||||
* PRIVATE, i.e. the slot's possible hugepage ranges are changing.
|
||||
* Zapping SPTEs in this case ensures KVM will reassess whether or not
|
||||
* a hugepage can be used for affected ranges.
|
||||
*/
|
||||
if (WARN_ON_ONCE(!kvm_arch_has_private_mem(kvm)))
|
||||
return false;
|
||||
|
||||
/* Unmap the old attribute page. */
|
||||
if (range->arg.attributes & KVM_MEMORY_ATTRIBUTE_PRIVATE)
|
||||
range->attr_filter = KVM_FILTER_SHARED;
|
||||
else
|
||||
range->attr_filter = KVM_FILTER_PRIVATE;
|
||||
|
||||
return kvm_unmap_gfn_range(kvm, range);
|
||||
}
|
||||
|
||||
static bool hugepage_test_mixed(struct kvm_memory_slot *slot, gfn_t gfn,
|
||||
int level)
|
||||
{
|
||||
|
@ -7713,6 +7688,69 @@ static void hugepage_set_mixed(struct kvm_memory_slot *slot, gfn_t gfn,
|
|||
lpage_info_slot(gfn, slot, level)->disallow_lpage |= KVM_LPAGE_MIXED_FLAG;
|
||||
}
|
||||
|
||||
bool kvm_arch_pre_set_memory_attributes(struct kvm *kvm,
|
||||
struct kvm_gfn_range *range)
|
||||
{
|
||||
struct kvm_memory_slot *slot = range->slot;
|
||||
int level;
|
||||
|
||||
/*
|
||||
* Zap SPTEs even if the slot can't be mapped PRIVATE. KVM x86 only
|
||||
* supports KVM_MEMORY_ATTRIBUTE_PRIVATE, and so it *seems* like KVM
|
||||
* can simply ignore such slots. But if userspace is making memory
|
||||
* PRIVATE, then KVM must prevent the guest from accessing the memory
|
||||
* as shared. And if userspace is making memory SHARED and this point
|
||||
* is reached, then at least one page within the range was previously
|
||||
* PRIVATE, i.e. the slot's possible hugepage ranges are changing.
|
||||
* Zapping SPTEs in this case ensures KVM will reassess whether or not
|
||||
* a hugepage can be used for affected ranges.
|
||||
*/
|
||||
if (WARN_ON_ONCE(!kvm_arch_has_private_mem(kvm)))
|
||||
return false;
|
||||
|
||||
if (WARN_ON_ONCE(range->end <= range->start))
|
||||
return false;
|
||||
|
||||
/*
|
||||
* If the head and tail pages of the range currently allow a hugepage,
|
||||
* i.e. reside fully in the slot and don't have mixed attributes, then
|
||||
* add each corresponding hugepage range to the ongoing invalidation,
|
||||
* e.g. to prevent KVM from creating a hugepage in response to a fault
|
||||
* for a gfn whose attributes aren't changing. Note, only the range
|
||||
* of gfns whose attributes are being modified needs to be explicitly
|
||||
* unmapped, as that will unmap any existing hugepages.
|
||||
*/
|
||||
for (level = PG_LEVEL_2M; level <= KVM_MAX_HUGEPAGE_LEVEL; level++) {
|
||||
gfn_t start = gfn_round_for_level(range->start, level);
|
||||
gfn_t end = gfn_round_for_level(range->end - 1, level);
|
||||
gfn_t nr_pages = KVM_PAGES_PER_HPAGE(level);
|
||||
|
||||
if ((start != range->start || start + nr_pages > range->end) &&
|
||||
start >= slot->base_gfn &&
|
||||
start + nr_pages <= slot->base_gfn + slot->npages &&
|
||||
!hugepage_test_mixed(slot, start, level))
|
||||
kvm_mmu_invalidate_range_add(kvm, start, start + nr_pages);
|
||||
|
||||
if (end == start)
|
||||
continue;
|
||||
|
||||
if ((end + nr_pages) > range->end &&
|
||||
(end + nr_pages) <= (slot->base_gfn + slot->npages) &&
|
||||
!hugepage_test_mixed(slot, end, level))
|
||||
kvm_mmu_invalidate_range_add(kvm, end, end + nr_pages);
|
||||
}
|
||||
|
||||
/* Unmap the old attribute page. */
|
||||
if (range->arg.attributes & KVM_MEMORY_ATTRIBUTE_PRIVATE)
|
||||
range->attr_filter = KVM_FILTER_SHARED;
|
||||
else
|
||||
range->attr_filter = KVM_FILTER_PRIVATE;
|
||||
|
||||
return kvm_unmap_gfn_range(kvm, range);
|
||||
}
|
||||
|
||||
|
||||
|
||||
static bool hugepage_has_attrs(struct kvm *kvm, struct kvm_memory_slot *slot,
|
||||
gfn_t gfn, int level, unsigned long attrs)
|
||||
{
|
||||
|
|
|
@ -131,6 +131,7 @@ void kvm_smm_changed(struct kvm_vcpu *vcpu, bool entering_smm)
|
|||
|
||||
kvm_mmu_reset_context(vcpu);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_smm_changed);
|
||||
|
||||
void process_smi(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
|
|
|
@ -3173,9 +3173,14 @@ skip_vmsa_free:
|
|||
kvfree(svm->sev_es.ghcb_sa);
|
||||
}
|
||||
|
||||
static u64 kvm_ghcb_get_sw_exit_code(struct vmcb_control_area *control)
|
||||
{
|
||||
return (((u64)control->exit_code_hi) << 32) | control->exit_code;
|
||||
}
|
||||
|
||||
static void dump_ghcb(struct vcpu_svm *svm)
|
||||
{
|
||||
struct ghcb *ghcb = svm->sev_es.ghcb;
|
||||
struct vmcb_control_area *control = &svm->vmcb->control;
|
||||
unsigned int nbits;
|
||||
|
||||
/* Re-use the dump_invalid_vmcb module parameter */
|
||||
|
@ -3184,18 +3189,24 @@ static void dump_ghcb(struct vcpu_svm *svm)
|
|||
return;
|
||||
}
|
||||
|
||||
nbits = sizeof(ghcb->save.valid_bitmap) * 8;
|
||||
nbits = sizeof(svm->sev_es.valid_bitmap) * 8;
|
||||
|
||||
pr_err("GHCB (GPA=%016llx):\n", svm->vmcb->control.ghcb_gpa);
|
||||
/*
|
||||
* Print KVM's snapshot of the GHCB values that were (unsuccessfully)
|
||||
* used to handle the exit. If the guest has since modified the GHCB
|
||||
* itself, dumping the raw GHCB won't help debug why KVM was unable to
|
||||
* handle the VMGEXIT that KVM observed.
|
||||
*/
|
||||
pr_err("GHCB (GPA=%016llx) snapshot:\n", svm->vmcb->control.ghcb_gpa);
|
||||
pr_err("%-20s%016llx is_valid: %u\n", "sw_exit_code",
|
||||
ghcb->save.sw_exit_code, ghcb_sw_exit_code_is_valid(ghcb));
|
||||
kvm_ghcb_get_sw_exit_code(control), kvm_ghcb_sw_exit_code_is_valid(svm));
|
||||
pr_err("%-20s%016llx is_valid: %u\n", "sw_exit_info_1",
|
||||
ghcb->save.sw_exit_info_1, ghcb_sw_exit_info_1_is_valid(ghcb));
|
||||
control->exit_info_1, kvm_ghcb_sw_exit_info_1_is_valid(svm));
|
||||
pr_err("%-20s%016llx is_valid: %u\n", "sw_exit_info_2",
|
||||
ghcb->save.sw_exit_info_2, ghcb_sw_exit_info_2_is_valid(ghcb));
|
||||
control->exit_info_2, kvm_ghcb_sw_exit_info_2_is_valid(svm));
|
||||
pr_err("%-20s%016llx is_valid: %u\n", "sw_scratch",
|
||||
ghcb->save.sw_scratch, ghcb_sw_scratch_is_valid(ghcb));
|
||||
pr_err("%-20s%*pb\n", "valid_bitmap", nbits, ghcb->save.valid_bitmap);
|
||||
svm->sev_es.sw_scratch, kvm_ghcb_sw_scratch_is_valid(svm));
|
||||
pr_err("%-20s%*pb\n", "valid_bitmap", nbits, svm->sev_es.valid_bitmap);
|
||||
}
|
||||
|
||||
static void sev_es_sync_to_ghcb(struct vcpu_svm *svm)
|
||||
|
@ -3266,11 +3277,6 @@ static void sev_es_sync_from_ghcb(struct vcpu_svm *svm)
|
|||
memset(ghcb->save.valid_bitmap, 0, sizeof(ghcb->save.valid_bitmap));
|
||||
}
|
||||
|
||||
static u64 kvm_ghcb_get_sw_exit_code(struct vmcb_control_area *control)
|
||||
{
|
||||
return (((u64)control->exit_code_hi) << 32) | control->exit_code;
|
||||
}
|
||||
|
||||
static int sev_es_validate_vmgexit(struct vcpu_svm *svm)
|
||||
{
|
||||
struct vmcb_control_area *control = &svm->vmcb->control;
|
||||
|
|
|
@ -607,9 +607,6 @@ static void svm_disable_virtualization_cpu(void)
|
|||
kvm_cpu_svm_disable();
|
||||
|
||||
amd_pmu_disable_virt();
|
||||
|
||||
if (cpu_feature_enabled(X86_FEATURE_SRSO_BP_SPEC_REDUCE))
|
||||
msr_clear_bit(MSR_ZEN4_BP_CFG, MSR_ZEN4_BP_CFG_BP_SPEC_REDUCE_BIT);
|
||||
}
|
||||
|
||||
static int svm_enable_virtualization_cpu(void)
|
||||
|
@ -687,9 +684,6 @@ static int svm_enable_virtualization_cpu(void)
|
|||
rdmsr(MSR_TSC_AUX, sev_es_host_save_area(sd)->tsc_aux, msr_hi);
|
||||
}
|
||||
|
||||
if (cpu_feature_enabled(X86_FEATURE_SRSO_BP_SPEC_REDUCE))
|
||||
msr_set_bit(MSR_ZEN4_BP_CFG, MSR_ZEN4_BP_CFG_BP_SPEC_REDUCE_BIT);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1518,6 +1512,63 @@ static void svm_vcpu_free(struct kvm_vcpu *vcpu)
|
|||
__free_pages(virt_to_page(svm->msrpm), get_order(MSRPM_SIZE));
|
||||
}
|
||||
|
||||
#ifdef CONFIG_CPU_MITIGATIONS
|
||||
static DEFINE_SPINLOCK(srso_lock);
|
||||
static atomic_t srso_nr_vms;
|
||||
|
||||
static void svm_srso_clear_bp_spec_reduce(void *ign)
|
||||
{
|
||||
struct svm_cpu_data *sd = this_cpu_ptr(&svm_data);
|
||||
|
||||
if (!sd->bp_spec_reduce_set)
|
||||
return;
|
||||
|
||||
msr_clear_bit(MSR_ZEN4_BP_CFG, MSR_ZEN4_BP_CFG_BP_SPEC_REDUCE_BIT);
|
||||
sd->bp_spec_reduce_set = false;
|
||||
}
|
||||
|
||||
static void svm_srso_vm_destroy(void)
|
||||
{
|
||||
if (!cpu_feature_enabled(X86_FEATURE_SRSO_BP_SPEC_REDUCE))
|
||||
return;
|
||||
|
||||
if (atomic_dec_return(&srso_nr_vms))
|
||||
return;
|
||||
|
||||
guard(spinlock)(&srso_lock);
|
||||
|
||||
/*
|
||||
* Verify a new VM didn't come along, acquire the lock, and increment
|
||||
* the count before this task acquired the lock.
|
||||
*/
|
||||
if (atomic_read(&srso_nr_vms))
|
||||
return;
|
||||
|
||||
on_each_cpu(svm_srso_clear_bp_spec_reduce, NULL, 1);
|
||||
}
|
||||
|
||||
static void svm_srso_vm_init(void)
|
||||
{
|
||||
if (!cpu_feature_enabled(X86_FEATURE_SRSO_BP_SPEC_REDUCE))
|
||||
return;
|
||||
|
||||
/*
|
||||
* Acquire the lock on 0 => 1 transitions to ensure a potential 1 => 0
|
||||
* transition, i.e. destroying the last VM, is fully complete, e.g. so
|
||||
* that a delayed IPI doesn't clear BP_SPEC_REDUCE after a vCPU runs.
|
||||
*/
|
||||
if (atomic_inc_not_zero(&srso_nr_vms))
|
||||
return;
|
||||
|
||||
guard(spinlock)(&srso_lock);
|
||||
|
||||
atomic_inc(&srso_nr_vms);
|
||||
}
|
||||
#else
|
||||
static void svm_srso_vm_init(void) { }
|
||||
static void svm_srso_vm_destroy(void) { }
|
||||
#endif
|
||||
|
||||
static void svm_prepare_switch_to_guest(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct vcpu_svm *svm = to_svm(vcpu);
|
||||
|
@ -1550,6 +1601,11 @@ static void svm_prepare_switch_to_guest(struct kvm_vcpu *vcpu)
|
|||
(!boot_cpu_has(X86_FEATURE_V_TSC_AUX) || !sev_es_guest(vcpu->kvm)))
|
||||
kvm_set_user_return_msr(tsc_aux_uret_slot, svm->tsc_aux, -1ull);
|
||||
|
||||
if (cpu_feature_enabled(X86_FEATURE_SRSO_BP_SPEC_REDUCE) &&
|
||||
!sd->bp_spec_reduce_set) {
|
||||
sd->bp_spec_reduce_set = true;
|
||||
msr_set_bit(MSR_ZEN4_BP_CFG, MSR_ZEN4_BP_CFG_BP_SPEC_REDUCE_BIT);
|
||||
}
|
||||
svm->guest_state_loaded = true;
|
||||
}
|
||||
|
||||
|
@ -2231,6 +2287,10 @@ static int shutdown_interception(struct kvm_vcpu *vcpu)
|
|||
*/
|
||||
if (!sev_es_guest(vcpu->kvm)) {
|
||||
clear_page(svm->vmcb);
|
||||
#ifdef CONFIG_KVM_SMM
|
||||
if (is_smm(vcpu))
|
||||
kvm_smm_changed(vcpu, false);
|
||||
#endif
|
||||
kvm_vcpu_reset(vcpu, true);
|
||||
}
|
||||
|
||||
|
@ -5036,6 +5096,8 @@ static void svm_vm_destroy(struct kvm *kvm)
|
|||
{
|
||||
avic_vm_destroy(kvm);
|
||||
sev_vm_destroy(kvm);
|
||||
|
||||
svm_srso_vm_destroy();
|
||||
}
|
||||
|
||||
static int svm_vm_init(struct kvm *kvm)
|
||||
|
@ -5061,6 +5123,7 @@ static int svm_vm_init(struct kvm *kvm)
|
|||
return ret;
|
||||
}
|
||||
|
||||
svm_srso_vm_init();
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -335,6 +335,8 @@ struct svm_cpu_data {
|
|||
u32 next_asid;
|
||||
u32 min_asid;
|
||||
|
||||
bool bp_spec_reduce_set;
|
||||
|
||||
struct vmcb *save_area;
|
||||
unsigned long save_area_pa;
|
||||
|
||||
|
|
|
@ -1584,7 +1584,7 @@ EXPORT_SYMBOL_GPL(kvm_emulate_rdpmc);
|
|||
ARCH_CAP_PSCHANGE_MC_NO | ARCH_CAP_TSX_CTRL_MSR | ARCH_CAP_TAA_NO | \
|
||||
ARCH_CAP_SBDR_SSDP_NO | ARCH_CAP_FBSDP_NO | ARCH_CAP_PSDP_NO | \
|
||||
ARCH_CAP_FB_CLEAR | ARCH_CAP_RRSBA | ARCH_CAP_PBRSB_NO | ARCH_CAP_GDS_NO | \
|
||||
ARCH_CAP_RFDS_NO | ARCH_CAP_RFDS_CLEAR | ARCH_CAP_BHI_NO)
|
||||
ARCH_CAP_RFDS_NO | ARCH_CAP_RFDS_CLEAR | ARCH_CAP_BHI_NO | ARCH_CAP_ITS_NO)
|
||||
|
||||
static u64 kvm_get_arch_capabilities(void)
|
||||
{
|
||||
|
@ -1618,6 +1618,8 @@ static u64 kvm_get_arch_capabilities(void)
|
|||
data |= ARCH_CAP_MDS_NO;
|
||||
if (!boot_cpu_has_bug(X86_BUG_RFDS))
|
||||
data |= ARCH_CAP_RFDS_NO;
|
||||
if (!boot_cpu_has_bug(X86_BUG_ITS))
|
||||
data |= ARCH_CAP_ITS_NO;
|
||||
|
||||
if (!boot_cpu_has(X86_FEATURE_RTM)) {
|
||||
/*
|
||||
|
@ -4597,7 +4599,7 @@ static bool kvm_is_vm_type_supported(unsigned long type)
|
|||
return type < 32 && (kvm_caps.supported_vm_types & BIT(type));
|
||||
}
|
||||
|
||||
static inline u32 kvm_sync_valid_fields(struct kvm *kvm)
|
||||
static inline u64 kvm_sync_valid_fields(struct kvm *kvm)
|
||||
{
|
||||
return kvm && kvm->arch.has_protected_state ? 0 : KVM_SYNC_X86_VALID_FIELDS;
|
||||
}
|
||||
|
@ -11493,7 +11495,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
|
|||
{
|
||||
struct kvm_queued_exception *ex = &vcpu->arch.exception;
|
||||
struct kvm_run *kvm_run = vcpu->run;
|
||||
u32 sync_valid_fields;
|
||||
u64 sync_valid_fields;
|
||||
int r;
|
||||
|
||||
r = kvm_mmu_post_init_vm(vcpu->kvm);
|
||||
|
|
|
@ -367,6 +367,54 @@ SYM_FUNC_END(call_depth_return_thunk)
|
|||
|
||||
#endif /* CONFIG_MITIGATION_CALL_DEPTH_TRACKING */
|
||||
|
||||
#ifdef CONFIG_MITIGATION_ITS
|
||||
|
||||
.macro ITS_THUNK reg
|
||||
|
||||
/*
|
||||
* If CFI paranoid is used then the ITS thunk starts with opcodes (0xea; jne 1b)
|
||||
* that complete the fineibt_paranoid caller sequence.
|
||||
*/
|
||||
1: .byte 0xea
|
||||
SYM_INNER_LABEL(__x86_indirect_paranoid_thunk_\reg, SYM_L_GLOBAL)
|
||||
UNWIND_HINT_UNDEFINED
|
||||
ANNOTATE_NOENDBR
|
||||
jne 1b
|
||||
SYM_INNER_LABEL(__x86_indirect_its_thunk_\reg, SYM_L_GLOBAL)
|
||||
UNWIND_HINT_UNDEFINED
|
||||
ANNOTATE_NOENDBR
|
||||
ANNOTATE_RETPOLINE_SAFE
|
||||
jmp *%\reg
|
||||
int3
|
||||
.align 32, 0xcc /* fill to the end of the line */
|
||||
.skip 32 - (__x86_indirect_its_thunk_\reg - 1b), 0xcc /* skip to the next upper half */
|
||||
.endm
|
||||
|
||||
/* ITS mitigation requires thunks be aligned to upper half of cacheline */
|
||||
.align 64, 0xcc
|
||||
.skip 29, 0xcc
|
||||
|
||||
#define GEN(reg) ITS_THUNK reg
|
||||
#include <asm/GEN-for-each-reg.h>
|
||||
#undef GEN
|
||||
|
||||
.align 64, 0xcc
|
||||
SYM_FUNC_ALIAS(__x86_indirect_its_thunk_array, __x86_indirect_its_thunk_rax)
|
||||
SYM_CODE_END(__x86_indirect_its_thunk_array)
|
||||
|
||||
.align 64, 0xcc
|
||||
.skip 32, 0xcc
|
||||
SYM_CODE_START(its_return_thunk)
|
||||
UNWIND_HINT_FUNC
|
||||
ANNOTATE_NOENDBR
|
||||
ANNOTATE_UNRET_SAFE
|
||||
ret
|
||||
int3
|
||||
SYM_CODE_END(its_return_thunk)
|
||||
EXPORT_SYMBOL(its_return_thunk)
|
||||
|
||||
#endif /* CONFIG_MITIGATION_ITS */
|
||||
|
||||
/*
|
||||
* This function name is magical and is used by -mfunction-return=thunk-extern
|
||||
* for the compiler to generate JMPs to it.
|
||||
|
|
|
@ -30,6 +30,7 @@
|
|||
#include <linux/initrd.h>
|
||||
#include <linux/cpumask.h>
|
||||
#include <linux/gfp.h>
|
||||
#include <linux/execmem.h>
|
||||
|
||||
#include <asm/asm.h>
|
||||
#include <asm/bios_ebda.h>
|
||||
|
@ -755,6 +756,8 @@ void mark_rodata_ro(void)
|
|||
pr_info("Write protecting kernel text and read-only data: %luk\n",
|
||||
size >> 10);
|
||||
|
||||
execmem_cache_make_ro();
|
||||
|
||||
kernel_set_to_readonly = 1;
|
||||
|
||||
#ifdef CONFIG_CPA_DEBUG
|
||||
|
|
|
@ -34,6 +34,7 @@
|
|||
#include <linux/gfp.h>
|
||||
#include <linux/kcore.h>
|
||||
#include <linux/bootmem_info.h>
|
||||
#include <linux/execmem.h>
|
||||
|
||||
#include <asm/processor.h>
|
||||
#include <asm/bios_ebda.h>
|
||||
|
@ -1391,6 +1392,8 @@ void mark_rodata_ro(void)
|
|||
(end - start) >> 10);
|
||||
set_memory_ro(start, (end - start) >> PAGE_SHIFT);
|
||||
|
||||
execmem_cache_make_ro();
|
||||
|
||||
kernel_set_to_readonly = 1;
|
||||
|
||||
/*
|
||||
|
|
|
@ -899,8 +899,9 @@ void switch_mm_irqs_off(struct mm_struct *unused, struct mm_struct *next,
|
|||
cond_mitigation(tsk);
|
||||
|
||||
/*
|
||||
* Let nmi_uaccess_okay() and finish_asid_transition()
|
||||
* know that CR3 is changing.
|
||||
* Indicate that CR3 is about to change. nmi_uaccess_okay()
|
||||
* and others are sensitive to the window where mm_cpumask(),
|
||||
* CR3 and cpu_tlbstate.loaded_mm are not all in sync.
|
||||
*/
|
||||
this_cpu_write(cpu_tlbstate.loaded_mm, LOADED_MM_SWITCHING);
|
||||
barrier();
|
||||
|
@ -1204,8 +1205,16 @@ done:
|
|||
|
||||
static bool should_flush_tlb(int cpu, void *data)
|
||||
{
|
||||
struct mm_struct *loaded_mm = per_cpu(cpu_tlbstate.loaded_mm, cpu);
|
||||
struct flush_tlb_info *info = data;
|
||||
|
||||
/*
|
||||
* Order the 'loaded_mm' and 'is_lazy' against their
|
||||
* write ordering in switch_mm_irqs_off(). Ensure
|
||||
* 'is_lazy' is at least as new as 'loaded_mm'.
|
||||
*/
|
||||
smp_rmb();
|
||||
|
||||
/* Lazy TLB will get flushed at the next context switch. */
|
||||
if (per_cpu(cpu_tlbstate_shared.is_lazy, cpu))
|
||||
return false;
|
||||
|
@ -1214,8 +1223,15 @@ static bool should_flush_tlb(int cpu, void *data)
|
|||
if (!info->mm)
|
||||
return true;
|
||||
|
||||
/*
|
||||
* While switching, the remote CPU could have state from
|
||||
* either the prev or next mm. Assume the worst and flush.
|
||||
*/
|
||||
if (loaded_mm == LOADED_MM_SWITCHING)
|
||||
return true;
|
||||
|
||||
/* The target mm is loaded, and the CPU is not lazy. */
|
||||
if (per_cpu(cpu_tlbstate.loaded_mm, cpu) == info->mm)
|
||||
if (loaded_mm == info->mm)
|
||||
return true;
|
||||
|
||||
/* In cpumask, but not the loaded mm? Periodically remove by flushing. */
|
||||
|
|
|
@ -41,6 +41,8 @@ static u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len)
|
|||
#define EMIT2(b1, b2) EMIT((b1) + ((b2) << 8), 2)
|
||||
#define EMIT3(b1, b2, b3) EMIT((b1) + ((b2) << 8) + ((b3) << 16), 3)
|
||||
#define EMIT4(b1, b2, b3, b4) EMIT((b1) + ((b2) << 8) + ((b3) << 16) + ((b4) << 24), 4)
|
||||
#define EMIT5(b1, b2, b3, b4, b5) \
|
||||
do { EMIT1(b1); EMIT4(b2, b3, b4, b5); } while (0)
|
||||
|
||||
#define EMIT1_off32(b1, off) \
|
||||
do { EMIT1(b1); EMIT(off, 4); } while (0)
|
||||
|
@ -661,7 +663,10 @@ static void emit_indirect_jump(u8 **pprog, int reg, u8 *ip)
|
|||
{
|
||||
u8 *prog = *pprog;
|
||||
|
||||
if (cpu_feature_enabled(X86_FEATURE_RETPOLINE_LFENCE)) {
|
||||
if (cpu_feature_enabled(X86_FEATURE_INDIRECT_THUNK_ITS)) {
|
||||
OPTIMIZER_HIDE_VAR(reg);
|
||||
emit_jump(&prog, its_static_thunk(reg), ip);
|
||||
} else if (cpu_feature_enabled(X86_FEATURE_RETPOLINE_LFENCE)) {
|
||||
EMIT_LFENCE();
|
||||
EMIT2(0xFF, 0xE0 + reg);
|
||||
} else if (cpu_feature_enabled(X86_FEATURE_RETPOLINE)) {
|
||||
|
@ -683,7 +688,7 @@ static void emit_return(u8 **pprog, u8 *ip)
|
|||
{
|
||||
u8 *prog = *pprog;
|
||||
|
||||
if (cpu_feature_enabled(X86_FEATURE_RETHUNK)) {
|
||||
if (cpu_wants_rethunk()) {
|
||||
emit_jump(&prog, x86_return_thunk, ip);
|
||||
} else {
|
||||
EMIT1(0xC3); /* ret */
|
||||
|
@ -1502,6 +1507,48 @@ static void emit_priv_frame_ptr(u8 **pprog, void __percpu *priv_frame_ptr)
|
|||
#define PRIV_STACK_GUARD_SZ 8
|
||||
#define PRIV_STACK_GUARD_VAL 0xEB9F12345678eb9fULL
|
||||
|
||||
static int emit_spectre_bhb_barrier(u8 **pprog, u8 *ip,
|
||||
struct bpf_prog *bpf_prog)
|
||||
{
|
||||
u8 *prog = *pprog;
|
||||
u8 *func;
|
||||
|
||||
if (cpu_feature_enabled(X86_FEATURE_CLEAR_BHB_LOOP)) {
|
||||
/* The clearing sequence clobbers eax and ecx. */
|
||||
EMIT1(0x50); /* push rax */
|
||||
EMIT1(0x51); /* push rcx */
|
||||
ip += 2;
|
||||
|
||||
func = (u8 *)clear_bhb_loop;
|
||||
ip += x86_call_depth_emit_accounting(&prog, func, ip);
|
||||
|
||||
if (emit_call(&prog, func, ip))
|
||||
return -EINVAL;
|
||||
EMIT1(0x59); /* pop rcx */
|
||||
EMIT1(0x58); /* pop rax */
|
||||
}
|
||||
/* Insert IBHF instruction */
|
||||
if ((cpu_feature_enabled(X86_FEATURE_CLEAR_BHB_LOOP) &&
|
||||
cpu_feature_enabled(X86_FEATURE_HYPERVISOR)) ||
|
||||
cpu_feature_enabled(X86_FEATURE_CLEAR_BHB_HW)) {
|
||||
/*
|
||||
* Add an Indirect Branch History Fence (IBHF). IBHF acts as a
|
||||
* fence preventing branch history from before the fence from
|
||||
* affecting indirect branches after the fence. This is
|
||||
* specifically used in cBPF jitted code to prevent Intra-mode
|
||||
* BHI attacks. The IBHF instruction is designed to be a NOP on
|
||||
* hardware that doesn't need or support it. The REP and REX.W
|
||||
* prefixes are required by the microcode, and they also ensure
|
||||
* that the NOP is unlikely to be used in existing code.
|
||||
*
|
||||
* IBHF is not a valid instruction in 32-bit mode.
|
||||
*/
|
||||
EMIT5(0xF3, 0x48, 0x0F, 0x1E, 0xF8); /* ibhf */
|
||||
}
|
||||
*pprog = prog;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, u8 *rw_image,
|
||||
int oldproglen, struct jit_context *ctx, bool jmp_padding)
|
||||
{
|
||||
|
@ -2544,6 +2591,13 @@ emit_jmp:
|
|||
seen_exit = true;
|
||||
/* Update cleanup_addr */
|
||||
ctx->cleanup_addr = proglen;
|
||||
if (bpf_prog_was_classic(bpf_prog) &&
|
||||
!capable(CAP_SYS_ADMIN)) {
|
||||
u8 *ip = image + addrs[i - 1];
|
||||
|
||||
if (emit_spectre_bhb_barrier(&prog, ip, bpf_prog))
|
||||
return -EINVAL;
|
||||
}
|
||||
if (bpf_prog->aux->exception_boundary) {
|
||||
pop_callee_regs(&prog, all_callee_regs_used);
|
||||
pop_r12(&prog);
|
||||
|
|
|
@ -480,7 +480,8 @@ static inline void blk_zone_update_request_bio(struct request *rq,
|
|||
* the original BIO sector so that blk_zone_write_plug_bio_endio() can
|
||||
* lookup the zone write plug.
|
||||
*/
|
||||
if (req_op(rq) == REQ_OP_ZONE_APPEND || bio_zone_write_plugging(bio))
|
||||
if (req_op(rq) == REQ_OP_ZONE_APPEND ||
|
||||
bio_flagged(bio, BIO_EMULATES_ZONE_APPEND))
|
||||
bio->bi_iter.bi_sector = rq->__sector;
|
||||
}
|
||||
void blk_zone_write_plug_bio_endio(struct bio *bio);
|
||||
|
|
|
@ -46,12 +46,8 @@ int ioprio_check_cap(int ioprio)
|
|||
*/
|
||||
if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_NICE))
|
||||
return -EPERM;
|
||||
fallthrough;
|
||||
/* rt has prio field too */
|
||||
case IOPRIO_CLASS_BE:
|
||||
if (level >= IOPRIO_NR_LEVELS)
|
||||
return -EINVAL;
|
||||
break;
|
||||
case IOPRIO_CLASS_BE:
|
||||
case IOPRIO_CLASS_IDLE:
|
||||
break;
|
||||
case IOPRIO_CLASS_NONE:
|
||||
|
|
|
@ -119,7 +119,7 @@ static void timeouts_init(struct ivpu_device *vdev)
|
|||
else
|
||||
vdev->timeout.autosuspend = 100;
|
||||
vdev->timeout.d0i3_entry_msg = 5;
|
||||
vdev->timeout.state_dump_msg = 10;
|
||||
vdev->timeout.state_dump_msg = 100;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -681,8 +681,8 @@ static int ivpu_job_submit(struct ivpu_job *job, u8 priority, u32 cmdq_id)
|
|||
err_erase_xa:
|
||||
xa_erase(&vdev->submitted_jobs_xa, job->job_id);
|
||||
err_unlock:
|
||||
mutex_unlock(&vdev->submitted_jobs_lock);
|
||||
mutex_unlock(&file_priv->lock);
|
||||
mutex_unlock(&vdev->submitted_jobs_lock);
|
||||
ivpu_rpm_put(vdev);
|
||||
return ret;
|
||||
}
|
||||
|
@ -874,15 +874,21 @@ int ivpu_cmdq_submit_ioctl(struct drm_device *dev, void *data, struct drm_file *
|
|||
int ivpu_cmdq_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
|
||||
{
|
||||
struct ivpu_file_priv *file_priv = file->driver_priv;
|
||||
struct ivpu_device *vdev = file_priv->vdev;
|
||||
struct drm_ivpu_cmdq_create *args = data;
|
||||
struct ivpu_cmdq *cmdq;
|
||||
int ret;
|
||||
|
||||
if (!ivpu_is_capable(file_priv->vdev, DRM_IVPU_CAP_MANAGE_CMDQ))
|
||||
if (!ivpu_is_capable(vdev, DRM_IVPU_CAP_MANAGE_CMDQ))
|
||||
return -ENODEV;
|
||||
|
||||
if (args->priority > DRM_IVPU_JOB_PRIORITY_REALTIME)
|
||||
return -EINVAL;
|
||||
|
||||
ret = ivpu_rpm_get(vdev);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
mutex_lock(&file_priv->lock);
|
||||
|
||||
cmdq = ivpu_cmdq_create(file_priv, ivpu_job_to_jsm_priority(args->priority), false);
|
||||
|
@ -891,6 +897,8 @@ int ivpu_cmdq_create_ioctl(struct drm_device *dev, void *data, struct drm_file *
|
|||
|
||||
mutex_unlock(&file_priv->lock);
|
||||
|
||||
ivpu_rpm_put(vdev);
|
||||
|
||||
return cmdq ? 0 : -ENOMEM;
|
||||
}
|
||||
|
||||
|
@ -900,28 +908,35 @@ int ivpu_cmdq_destroy_ioctl(struct drm_device *dev, void *data, struct drm_file
|
|||
struct ivpu_device *vdev = file_priv->vdev;
|
||||
struct drm_ivpu_cmdq_destroy *args = data;
|
||||
struct ivpu_cmdq *cmdq;
|
||||
u32 cmdq_id;
|
||||
u32 cmdq_id = 0;
|
||||
int ret;
|
||||
|
||||
if (!ivpu_is_capable(vdev, DRM_IVPU_CAP_MANAGE_CMDQ))
|
||||
return -ENODEV;
|
||||
|
||||
ret = ivpu_rpm_get(vdev);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
mutex_lock(&file_priv->lock);
|
||||
|
||||
cmdq = xa_load(&file_priv->cmdq_xa, args->cmdq_id);
|
||||
if (!cmdq || cmdq->is_legacy) {
|
||||
ret = -ENOENT;
|
||||
goto err_unlock;
|
||||
} else {
|
||||
cmdq_id = cmdq->id;
|
||||
ivpu_cmdq_destroy(file_priv, cmdq);
|
||||
ret = 0;
|
||||
}
|
||||
|
||||
cmdq_id = cmdq->id;
|
||||
ivpu_cmdq_destroy(file_priv, cmdq);
|
||||
mutex_unlock(&file_priv->lock);
|
||||
ivpu_cmdq_abort_all_jobs(vdev, file_priv->ctx.id, cmdq_id);
|
||||
return 0;
|
||||
|
||||
err_unlock:
|
||||
mutex_unlock(&file_priv->lock);
|
||||
/* Abort any pending jobs only if cmdq was destroyed */
|
||||
if (!ret)
|
||||
ivpu_cmdq_abort_all_jobs(vdev, file_priv->ctx.id, cmdq_id);
|
||||
|
||||
ivpu_rpm_put(vdev);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
@ -600,6 +600,7 @@ CPU_SHOW_VULN_FALLBACK(spec_rstack_overflow);
|
|||
CPU_SHOW_VULN_FALLBACK(gds);
|
||||
CPU_SHOW_VULN_FALLBACK(reg_file_data_sampling);
|
||||
CPU_SHOW_VULN_FALLBACK(ghostwrite);
|
||||
CPU_SHOW_VULN_FALLBACK(indirect_target_selection);
|
||||
|
||||
static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL);
|
||||
static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL);
|
||||
|
@ -616,6 +617,7 @@ static DEVICE_ATTR(spec_rstack_overflow, 0444, cpu_show_spec_rstack_overflow, NU
|
|||
static DEVICE_ATTR(gather_data_sampling, 0444, cpu_show_gds, NULL);
|
||||
static DEVICE_ATTR(reg_file_data_sampling, 0444, cpu_show_reg_file_data_sampling, NULL);
|
||||
static DEVICE_ATTR(ghostwrite, 0444, cpu_show_ghostwrite, NULL);
|
||||
static DEVICE_ATTR(indirect_target_selection, 0444, cpu_show_indirect_target_selection, NULL);
|
||||
|
||||
static struct attribute *cpu_root_vulnerabilities_attrs[] = {
|
||||
&dev_attr_meltdown.attr,
|
||||
|
@ -633,6 +635,7 @@ static struct attribute *cpu_root_vulnerabilities_attrs[] = {
|
|||
&dev_attr_gather_data_sampling.attr,
|
||||
&dev_attr_reg_file_data_sampling.attr,
|
||||
&dev_attr_ghostwrite.attr,
|
||||
&dev_attr_indirect_target_selection.attr,
|
||||
NULL
|
||||
};
|
||||
|
||||
|
|
|
@ -1440,7 +1440,7 @@ static void platform_shutdown(struct device *_dev)
|
|||
|
||||
static int platform_dma_configure(struct device *dev)
|
||||
{
|
||||
struct platform_driver *drv = to_platform_driver(dev->driver);
|
||||
struct device_driver *drv = READ_ONCE(dev->driver);
|
||||
struct fwnode_handle *fwnode = dev_fwnode(dev);
|
||||
enum dev_dma_attr attr;
|
||||
int ret = 0;
|
||||
|
@ -1451,8 +1451,8 @@ static int platform_dma_configure(struct device *dev)
|
|||
attr = acpi_get_dma_attr(to_acpi_device_node(fwnode));
|
||||
ret = acpi_dma_configure(dev, attr);
|
||||
}
|
||||
/* @drv may not be valid when we're called from the IOMMU layer */
|
||||
if (ret || !dev->driver || drv->driver_managed_dma)
|
||||
/* @dev->driver may not be valid when we're called from the IOMMU layer */
|
||||
if (ret || !drv || to_platform_driver(drv)->driver_managed_dma)
|
||||
return ret;
|
||||
|
||||
ret = iommu_device_use_default_domain(dev);
|
||||
|
|
|
@ -505,6 +505,17 @@ static void loop_assign_backing_file(struct loop_device *lo, struct file *file)
|
|||
lo->lo_min_dio_size = loop_query_min_dio_size(lo);
|
||||
}
|
||||
|
||||
static int loop_check_backing_file(struct file *file)
|
||||
{
|
||||
if (!file->f_op->read_iter)
|
||||
return -EINVAL;
|
||||
|
||||
if ((file->f_mode & FMODE_WRITE) && !file->f_op->write_iter)
|
||||
return -EINVAL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* loop_change_fd switched the backing store of a loopback device to
|
||||
* a new file. This is useful for operating system installers to free up
|
||||
|
@ -526,6 +537,10 @@ static int loop_change_fd(struct loop_device *lo, struct block_device *bdev,
|
|||
if (!file)
|
||||
return -EBADF;
|
||||
|
||||
error = loop_check_backing_file(file);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
/* suppress uevents while reconfiguring the device */
|
||||
dev_set_uevent_suppress(disk_to_dev(lo->lo_disk), 1);
|
||||
|
||||
|
@ -963,6 +978,14 @@ static int loop_configure(struct loop_device *lo, blk_mode_t mode,
|
|||
|
||||
if (!file)
|
||||
return -EBADF;
|
||||
|
||||
if ((mode & BLK_OPEN_WRITE) && !file->f_op->write_iter)
|
||||
return -EINVAL;
|
||||
|
||||
error = loop_check_backing_file(file);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
is_loop = is_loop_device(file);
|
||||
|
||||
/* This is safe, since we have a reference from open(). */
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Reference in a new issue